From 54242baefab9f5d60dd4a05fe9188b6b389c3550 Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Tue, 15 Aug 2023 20:06:56 -0400 Subject: [PATCH 001/741] Adds New Site OE + RF example Adds brand new site to script OnlyEncodes as well as adds the config for RF to example-config --- data/example-config.py | 11 ++- src/trackers/OE.py | 195 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 3 files changed, 208 insertions(+), 3 deletions(-) create mode 100644 src/trackers/OE.py diff --git a/data/example-config.py b/data/example-config.py index ddf855ab8..b164cee40 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -164,7 +164,16 @@ # "anon" : "False" "announce_url" : "https://hdts-announce.ru/announce.php", #DO NOT EDIT THIS LINE }, - + "OE" : { + "api_key" : "OE api key", + "announce_url" : "https://onlyencodes.cc/announce/customannounceurl", + # "anon" : False + }, + "RF" : { + "api_key" : "RF api key", + "announce_url" : "https://reelflix.xyz/announce/customannounceurl", + # "anon" : False + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/OE.py b/src/trackers/OE.py new file mode 100644 index 000000000..217abe1f7 --- /dev/null +++ b/src/trackers/OE.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from difflib import SequenceMatcher +import distutils.util +import json +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + +class OE(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + def __init__(self, config): + self.config = config + self.tracker = 'OE' + self.source_flag = 'OE' + self.search_url = 'https://onlyencodes.cc/api/torrents/filter' + self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' + self.signature = f"\n[center][url=https://onlyencodes.cc/]Created by L4G's Upload Assistant[/url][/center]" + self.banned_groups = ['AROMA', 'EMBER', 'FGT', 'Hi10', 'LAMA'] + pass + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['video_codec']) + resolution_id = await self.get_res_id(meta['resolution']) + name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name' : name, + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + + async def edit_name(self, meta): + aither_name = meta['name'] + has_eng_audio = False + if meta['is_disc'] != "BDMV": + with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + for track in mi['media']['track']: + if track['@type'] == "Audio": + if track.get('Language', 'None').startswith('en'): + has_eng_audio = True + if not has_eng_audio: + audio_lang = mi['media']['track'][2].get('Language_String', "").upper() + if audio_lang != "": + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + else: + for audio in meta['bdinfo']['audio']: + if audio['language'] == 'English': + has_eng_audio = True + if not has_eng_audio: + audio_lang = meta['bdinfo']['audio'][0]['language'].upper() + if audio_lang != "": + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + # aither_name = aither_name.replace(meta.get('video_encode', meta.get('video_codec', "")), meta.get('video_encode', meta.get('video_codec', "")).replace('.', '')) + return aither_name + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'HEVC': '10', + 'AV1': '14', + 'AVC': '15', + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p':'10', + '4320p': '1', + '2160p': '2', + '1440p' : '3', + '1080p': '3', + '1080i':'4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + + + + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category']), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index 2d0a1b742..51eb1a749 100644 --- a/upload.py +++ b/upload.py @@ -30,6 +30,7 @@ from src.trackers.TDC import TDC from src.trackers.HDT import HDT from src.trackers.RF import RF +from src.trackers.OE import OE import json from pathlib import Path import asyncio @@ -241,12 +242,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE } for tracker in trackers: From 18d041caea113fc060bc15ee0f1c5b1a09f1b929 Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Sun, 15 Oct 2023 17:11:11 -0400 Subject: [PATCH 002/741] Update OE Update OnlyEncodes to add current banned groups. Adds the correct options for what's allowed on site. --- src/trackers/OE.py | 97 ++++++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 217abe1f7..bb69a3e02 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -25,18 +25,18 @@ def __init__(self, config): self.source_flag = 'OE' self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://onlyencodes.cc/]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['AROMA', 'EMBER', 'FGT', 'Hi10', 'LAMA'] + self.signature = f"\n[center][url=https://onlyencodes.cc/pages/1]OnlyEncodes Uploader - Powered by L4G's Upload Assistant[/url][/center]" + self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['video_codec']) + type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) - name = await self.edit_name(meta) + oe_name = await self.edit_name(meta) if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: @@ -51,7 +51,7 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, + 'name' : oe_name, 'description' : desc, 'mediainfo' : mi_dump, 'bdinfo' : bd_dump, @@ -74,27 +74,29 @@ async def upload(self, meta): 'doubleup' : 0, 'sticky' : 0, } - headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - # Internal if self.config['TRACKERS'][self.tracker].get('internal', False) == True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + if meta['debug'] == False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: + console.print(response.json()) except: console.print("It may have uploaded, go check") + open_torrent.close() return else: console.print(f"[cyan]Request Data:") @@ -104,30 +106,8 @@ async def upload(self, meta): async def edit_name(self, meta): - aither_name = meta['name'] - has_eng_audio = False - if meta['is_disc'] != "BDMV": - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) - - for track in mi['media']['track']: - if track['@type'] == "Audio": - if track.get('Language', 'None').startswith('en'): - has_eng_audio = True - if not has_eng_audio: - audio_lang = mi['media']['track'][2].get('Language_String', "").upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - else: - for audio in meta['bdinfo']['audio']: - if audio['language'] == 'English': - has_eng_audio = True - if not has_eng_audio: - audio_lang = meta['bdinfo']['audio'][0]['language'].upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - # aither_name = aither_name.replace(meta.get('video_encode', meta.get('video_codec', "")), meta.get('video_encode', meta.get('video_codec', "")).replace('.', '')) - return aither_name + oe_name = meta.get('name') + return oe_name async def get_cat_id(self, category_name): category_id = { @@ -136,12 +116,32 @@ async def get_cat_id(self, category_name): }.get(category_name, '0') return category_id - async def get_type_id(self, type): + async def get_type_id(self, type, tv_pack, video_codec, category): type_id = { - 'HEVC': '10', - 'AV1': '14', - 'AVC': '15', + 'DISC': '19', + 'REMUX': '20', + 'WEBDL': '21', }.get(type, '0') + if type == "WEBRIP": + if video_codec == "HEVC": + # x265 Encode + type_id = '10' + if video_codec == 'AV1': + # AV1 Encode + type_id = '14' + if video_codec == 'AVC': + # x264 Encode + type_id = '15' + if type == "ENCODE": + if video_codec == "HEVC": + # x265 Encode + type_id = '10' + if video_codec == 'AV1': + # AV1 Encode + type_id = '14' + if video_codec == 'AVC': + # x264 Encode + type_id = '15' return type_id async def get_res_id(self, resolution): @@ -161,7 +161,7 @@ async def get_res_id(self, resolution): return resolution_id - + async def search_existing(self, meta): @@ -171,25 +171,22 @@ async def search_existing(self, meta): 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId' : meta['tmdb'], 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), + 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), 'resolutions[]' : await self.get_res_id(meta['resolution']), 'name' : "" } if meta['category'] == 'TV': - params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": - params['name'] = params['name'] + f" {meta['edition']}" - + params['name'] + meta['edition'] try: response = requests.get(url=self.search_url, params=params) response = response.json() for each in response['data']: result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: dupes.append(result) except: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file From ec151c98e4b75cb66c5e5c8d5916dac2f0e8955b Mon Sep 17 00:00:00 2001 From: mindset-tk Date: Thu, 19 Oct 2023 23:30:30 -0700 Subject: [PATCH 003/741] fix genre tags including commas for MTV --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index d4de506d1..1b8f65cd8 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -340,7 +340,7 @@ async def get_origin_id(self, meta): async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip().lower() for x in meta['genres'].split()]) + tags.extend([x.strip(', ').lower() for x in meta['genres'].split()]) # Resolution tags.append(meta['resolution'].lower()) if meta['sd'] == 1: From dc4e42108dffc6d4ef0f975e2a2eb83d2784e5e4 Mon Sep 17 00:00:00 2001 From: mindset-tk Date: Fri, 20 Oct 2023 00:07:32 -0700 Subject: [PATCH 004/741] join two-word genres with a period --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 1b8f65cd8..e3476c7ac 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -340,7 +340,7 @@ async def get_origin_id(self, meta): async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip(', ').lower() for x in meta['genres'].split()]) + tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split()]) # Resolution tags.append(meta['resolution'].lower()) if meta['sd'] == 1: From 3d3685ae55b169986db7d0f49cb82727b5b50f09 Mon Sep 17 00:00:00 2001 From: mindset-tk Date: Fri, 20 Oct 2023 00:11:44 -0700 Subject: [PATCH 005/741] MTV: Genre tag fix --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index e3476c7ac..c773f7560 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -340,7 +340,7 @@ async def get_origin_id(self, meta): async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split()]) + tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) # Resolution tags.append(meta['resolution'].lower()) if meta['sd'] == 1: From 6e8e76c4602bb42dd44af47a7bb3006fb992d4f0 Mon Sep 17 00:00:00 2001 From: Mike Date: Mon, 30 Oct 2023 13:30:14 -0700 Subject: [PATCH 006/741] Add files via upload Add FNP.py for FearNoPeer tracker. --- src/trackers/FNP.py | 183 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 src/trackers/FNP.py diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py new file mode 100644 index 000000000..a01274d72 --- /dev/null +++ b/src/trackers/FNP.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import distutils.util +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + + +class FNP(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + ############################################################### + ######## EDIT ME ######## + ############################################################### + + # ALSO EDIT CLASS NAME ABOVE + + def __init__(self, config): + self.config = config + self.tracker = 'FNP' + self.source_flag = 'FnP' + self.upload_url = 'https://fearnopeer.com/api/torrents/upload' + self.search_url = 'https://fearnopeer.com/api/torrents/filter' + self.signature = 'This torrent was downloaded from FearNoPeer.' + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p':'10', + '4320p': '1', + '2160p': '2', + '1440p' : '3', + '1080p': '3', + '1080i':'4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + ############################################################### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ############################################################### + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name' : meta['name'], + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + + + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category']), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes \ No newline at end of file From e50c7dac4e7ccee138fa3d094002d3364341c38e Mon Sep 17 00:00:00 2001 From: Mike Date: Mon, 30 Oct 2023 13:32:36 -0700 Subject: [PATCH 007/741] add fnp tracker --- upload.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index 0c7292865..4e58be733 100644 --- a/upload.py +++ b/upload.py @@ -33,6 +33,7 @@ from src.trackers.OE import OE from src.trackers.BHDTV import BHDTV from src.trackers.RTF import RTF +from src.trackers.FNP import FNP import json from pathlib import Path import asyncio @@ -246,12 +247,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'FNP'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'FNP': FNP} for tracker in trackers: if meta['name'].endswith('DUPE?'): From 89ec42dad04dfd1099de2442baa60157c9c0b3da Mon Sep 17 00:00:00 2001 From: Mike Date: Mon, 30 Oct 2023 13:35:34 -0700 Subject: [PATCH 008/741] Update example-config.py to include FNP --- data/example-config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index c1b37e8dc..26f63d260 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -188,6 +188,11 @@ "announce_url" : "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, + "FNP" :{ + "api_key" : "FNP api key", + "announce_url" : "https://fearnopeer.com/announce/customannounceurl", + # "anon" : "False" + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" From de9a9f416210992e03428a6acfdf05f1f10b2686 Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Tue, 31 Oct 2023 23:22:05 -0400 Subject: [PATCH 009/741] Add TFM New Tracker --- data/example-config.py | 5 ++ src/trackers/TFM.py | 183 +++++++++++++++++++++++++++++++++++++++++ upload.py | 3 +- 3 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 src/trackers/TFM.py diff --git a/data/example-config.py b/data/example-config.py index c1b37e8dc..500a76d88 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -188,6 +188,11 @@ "announce_url" : "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, + "TFM" : { + "api_key" : "TFM api key", + "announce_url" : "https://toonsfor.me/announce/customannounceurl", + # "anon" : False + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/TFM.py b/src/trackers/TFM.py new file mode 100644 index 000000000..12b5bb8a2 --- /dev/null +++ b/src/trackers/TFM.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import distutils.util +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + + +class TFM(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + ############################################################### + ######## EDIT ME ######## + ############################################################### + + # ALSO EDIT CLASS NAME ABOVE + + def __init__(self, config): + self.config = config + self.tracker = 'Abbreviated' + self.source_flag = 'Source flag for .torrent' + self.upload_url = 'https://toonsfor.me/api/torrents/upload' + self.search_url = 'https://toonsfor.me/api/torrents/filter' + self.signature = f"\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p':'10', + '4320p': '1', + '2160p': '2', + '1440p' : '3', + '1080p': '3', + '1080i':'4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + ############################################################### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ############################################################### + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name' : meta['name'], + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + + + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category']), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes \ No newline at end of file diff --git a/upload.py b/upload.py index 0c7292865..a2956a001 100644 --- a/upload.py +++ b/upload.py @@ -33,6 +33,7 @@ from src.trackers.OE import OE from src.trackers.BHDTV import BHDTV from src.trackers.RTF import RTF +from src.trackers.TFM import TFM import json from pathlib import Path import asyncio @@ -251,7 +252,7 @@ async def do_the_thing(base_dir): tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'TFM': TFM} for tracker in trackers: if meta['name'].endswith('DUPE?'): From a9addec697080bf3979a4f49cef805ec90666efe Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 9 Nov 2023 15:23:34 +0000 Subject: [PATCH 010/741] Update RTF.py Will now auto get API if you have passed username and password. will add torrent link to torrent file as comment --- src/trackers/RTF.py | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index d67dc9bff..8ef181d6b 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -5,6 +5,7 @@ import base64 import re import datetime +import json from src.trackers.COMMON import COMMON from src.console import console @@ -86,6 +87,10 @@ async def upload(self, meta): response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: console.print(response.json()) + + t_id = response.json()['torrent']['id'] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) + except: console.print("It may have uploaded, go check") return @@ -106,7 +111,6 @@ async def search_existing(self, meta): 'includingDead' : '1' } - # search is intentionally vague and just uses IMDB if available as many releases are not named properly on site. if meta['imdb_id'] != "0": params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] else: @@ -122,4 +126,39 @@ async def search_existing(self, meta): console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + # tests if API key valid site API key expires every week so a new one has to be generated. + async def api_test(self, meta): + headers = { + 'accept': 'application/json', + 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + } + + response = requests.get('https://retroflix.club/api/test', headers=headers) + + if response.status_code != 200: + console.print('[bold red]Your API key is incorrect SO generating a new one') + await self.generate_new_api(meta) + else: + return + + async def generate_new_api(self, meta): + headers = { + 'accept': 'application/json', + } + + json_data = { + 'username': self.config['TRACKERS'][self.tracker]['username'], + 'password': self.config['TRACKERS'][self.tracker]['password'], + } + + response = requests.post('https://retroflix.club/api/login', headers=headers, json=json_data) + + if response.status_code == 201: + console.print('[bold green]Using New API key generated for this upload') + console.print(f'[bold green]Please update your L4G config with the below RTF API Key for future uploads') + console.print(f'[bold yellow]{response.json()["token"]}') + self.config['TRACKERS'][self.tracker]['api_key'] = response.json()["token"] + else: + console.print(f'[bold red]Error getting new API key got error code {response.status_code}, Please check username and password in config') From 9139de14adc39b038742bd0ae1dcc019ff45bf4c Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 9 Nov 2023 15:36:07 +0000 Subject: [PATCH 011/741] Update upload.py --- upload.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/upload.py b/upload.py index 0c7292865..4de06b843 100644 --- a/upload.py +++ b/upload.py @@ -272,6 +272,8 @@ async def do_the_thing(base_dir): console.print(f"Uploading to {tracker_class.tracker}") if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue + if tracker == "RTF": + await tracker_class.api_test(meta) dupes = await tracker_class.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) # note BHDTV does not have search implemented. From 5c07361a98a25fdd6c57344f0e245b5fe2e44581 Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 9 Nov 2023 15:41:11 +0000 Subject: [PATCH 012/741] Update example-config.py update config for rtf username and password --- data/example-config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index c1b37e8dc..cd4d4f07d 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -178,6 +178,8 @@ # "anon" : False }, "RTF": { + "username" : "username", + "password" : "password", "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', "announce_url": "get from upload page", # "tag": "RetroFlix, nd", From 27be98f20c74d11cae575086a58d5ebe9afdf894 Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Mon, 29 Jan 2024 22:34:35 -0500 Subject: [PATCH 013/741] Fix broken api API Fix --- src/trackers/TFM.py | 4 ++-- upload.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/TFM.py b/src/trackers/TFM.py index 12b5bb8a2..a797d5eb2 100644 --- a/src/trackers/TFM.py +++ b/src/trackers/TFM.py @@ -27,8 +27,8 @@ class TFM(): def __init__(self, config): self.config = config - self.tracker = 'Abbreviated' - self.source_flag = 'Source flag for .torrent' + self.tracker = 'TFM' + self.source_flag = 'TFM' self.upload_url = 'https://toonsfor.me/api/torrents/upload' self.search_url = 'https://toonsfor.me/api/torrents/filter' self.signature = f"\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" diff --git a/upload.py b/upload.py index a2956a001..d0000432b 100644 --- a/upload.py +++ b/upload.py @@ -247,7 +247,7 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'TFM'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, From b89d90f3888366c950d76c4eff9b68cb9611eebc Mon Sep 17 00:00:00 2001 From: sirius-sama Date: Sat, 2 Mar 2024 13:54:57 +0600 Subject: [PATCH 014/741] Removing distutils, adding str2bool as a replacement --- requirements.txt | 3 ++- src/prep.py | 4 ++-- src/trackers/ACM.py | 4 ++-- src/trackers/AITHER.py | 4 ++-- src/trackers/ANT.py | 4 ++-- src/trackers/BHD.py | 6 +++--- src/trackers/BHDTV.py | 4 ++-- src/trackers/BLU.py | 4 ++-- src/trackers/FL.py | 4 ++-- src/trackers/HDT.py | 4 ++-- src/trackers/HP.py | 4 ++-- src/trackers/HUNO.py | 4 ++-- src/trackers/JPTV.py | 4 ++-- src/trackers/LCD.py | 4 ++-- src/trackers/LST.py | 4 ++-- src/trackers/LT.py | 4 ++-- src/trackers/MTV.py | 4 ++-- src/trackers/NBL.py | 2 +- src/trackers/OE.py | 4 ++-- src/trackers/PTER.py | 4 ++-- src/trackers/PTP.py | 4 ++-- src/trackers/R4E.py | 4 ++-- src/trackers/RF.py | 4 ++-- src/trackers/STC.py | 4 ++-- src/trackers/STT.py | 4 ++-- src/trackers/TDC.py | 4 ++-- src/trackers/TTG.py | 4 ++-- src/trackers/UNIT3D_TEMPLATE.py | 4 ++-- 28 files changed, 56 insertions(+), 55 deletions(-) diff --git a/requirements.txt b/requirements.txt index 19e7c5038..b20352a43 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,5 @@ beautifulsoup4 pyoxipng rich Jinja2 -pyotp \ No newline at end of file +pyotp +str2bool \ No newline at end of file diff --git a/src/prep.py b/src/prep.py index 7bbbd970a..74ea7638d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,8 +17,8 @@ import re import math import sys - import distutils.util import asyncio + from str2bool import str2bool from guessit import guessit import ntpath from pathlib import Path @@ -2772,7 +2772,7 @@ async def tag_override(self, meta): else: pass elif key == 'personalrelease': - meta[key] = bool(distutils.util.strtobool(str(value.get(key, 'False')))) + meta[key] = bool(str2bool(str(value.get(key, 'False')))) elif key == 'template': meta['desc_template'] = value.get(key) else: diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 270fd25b3..4f75e8522 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -207,7 +207,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 89777724a..2911aeaa4 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 0bd5c40b8..ed9289c8f 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -3,8 +3,8 @@ import os import asyncio import requests -import distutils.util import platform +from str2bool import str2bool from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON @@ -68,7 +68,7 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) flags = await self.get_flags(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d6ce9bca1..d9e73acdf 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import urllib import os import platform @@ -39,7 +39,7 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 @@ -263,7 +263,7 @@ async def search_existing(self, meta): async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(distutils.util.strtobool(str(draft))) #0 for send to draft, 1 for live + draft = bool(str2bool(str(draft))) #0 for send to draft, 1 for live if draft: draft_int = 0 else: diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index 97d0e1c8e..ea6f911c1 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -4,7 +4,7 @@ from torf import Torrent import requests from src.console import console -import distutils.util +from str2bool import str2bool from pprint import pprint import os import traceback @@ -54,7 +54,7 @@ async def upload(self, meta): # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - distutils.util.strtobool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 33e03975b..1e302eff2 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -49,7 +49,7 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 06cd4bb0b..8dc75071f 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -3,7 +3,7 @@ import re import os from pathlib import Path -import distutils.util +from str2bool import str2bool import json import glob import pickle @@ -161,7 +161,7 @@ async def upload(self, meta): if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 6b9fa0320..63f7fa6f8 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -6,8 +6,8 @@ import glob import cli_ui import pickle -import distutils from pathlib import Path +from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo @@ -173,7 +173,7 @@ async def upload(self, meta): data['season'] = 'false' # Anonymous check - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 250e9e851..7c11e0744 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -78,7 +78,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 0bd8c746d..e6015b7ea 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import os import re import platform @@ -37,7 +37,7 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(distutils.util.strtobool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 354b1be1a..06253aebe 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -92,7 +92,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 5c3f14309..a457a601a 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -40,7 +40,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 21368bd39..37e1db0e0 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -87,7 +87,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 2e06a0df2..4bc6088b1 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -88,7 +88,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index d4de506d1..b1ded5e34 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,8 +8,8 @@ import cli_ui import pickle import re -import distutils.util from pathlib import Path +from str2bool import str2bool from src.trackers.COMMON import COMMON class MTV(): @@ -77,7 +77,7 @@ async def upload(self, meta): mtv_name = await self.edit_name(meta) # anon - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 489c21902..03c3db84f 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os from guessit import guessit +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console diff --git a/src/trackers/OE.py b/src/trackers/OE.py index bb69a3e02..332fc6d7f 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index b9fcecfa0..818f2779b 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -7,7 +7,7 @@ import traceback import json import glob -import distutils.util +from str2bool import str2bool import cli_ui import pickle from unidecode import unidecode @@ -288,7 +288,7 @@ async def pterimg_upload(self, meta): return image_list async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 01ec975fd..b452b46e7 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -2,9 +2,9 @@ import requests import asyncio import re -import distutils.util import os from pathlib import Path +from str2bool import str2bool import time import traceback import json @@ -33,7 +33,7 @@ def __init__(self, config): self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() - self.web_source = distutils.util.strtobool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) + self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'd3g', 'x0r', 'YIFY', 'BMDru'] diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 67d33c997..0528c10cc 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import tmdbsimple as tmdb import os @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/RF.py b/src/trackers/RF.py index ca94837b9..aa108340d 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -41,7 +41,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 224e89889..71d70ce2f 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -2,7 +2,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 0a72f7eab..fdeed9e88 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index e201bcb83..b22ec6cd7 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -2,8 +2,8 @@ # import discord import asyncio import requests -import distutils.util import os +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -77,7 +77,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 491a3bacc..21a1d12bd 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -7,8 +7,8 @@ from pathlib import Path import traceback import json -import distutils.util import cli_ui +from str2bool import str2bool from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON @@ -104,7 +104,7 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 405e2c9f1..c77e758a7 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -82,7 +82,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 From 289debbf9a2f0003b863ac0a5421ee3f75264373 Mon Sep 17 00:00:00 2001 From: sirius-sama Date: Sat, 2 Mar 2024 14:51:13 +0600 Subject: [PATCH 015/741] Wrapping some regex pattern in raw string literal --- src/prep.py | 31 +++++++++++++++---------------- src/trackers/FL.py | 4 ++-- src/trackers/HDB.py | 4 ++-- src/trackers/HDT.py | 2 +- src/trackers/MTV.py | 2 +- src/trackers/PTER.py | 2 +- src/trackers/RTF.py | 2 +- src/trackers/THR.py | 2 +- src/trackers/TTG.py | 2 +- 9 files changed, 25 insertions(+), 26 deletions(-) diff --git a/src/prep.py b/src/prep.py index 74ea7638d..a46fa14ec 100644 --- a/src/prep.py +++ b/src/prep.py @@ -97,7 +97,7 @@ async def gather_prep(self, meta, mode): meta['filelist'] = [] try: guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] @@ -105,7 +105,7 @@ async def gather_prep(self, meta, mode): meta['search_year'] = "" except Exception: guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] @@ -164,7 +164,7 @@ async def gather_prep(self, meta, mode): videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub(r"[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] @@ -359,8 +359,8 @@ async def gather_prep(self, meta, mode): meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search("REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub("REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') @@ -1369,7 +1369,7 @@ def get_romaji(self, tmdb_name, mal): result = {'title' : {}} difference = 0 for anime in media: - search_name = re.sub("[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) + search_name = re.sub(r"[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): if title != None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) @@ -1906,7 +1906,7 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): repack = "RERIP" # if "HYBRID" in video.upper() and "HYBRID" not in title.upper(): # edition = "Hybrid " + edition - edition = re.sub("(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() + edition = re.sub(r"(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: @@ -2010,7 +2010,7 @@ def torf_cb(self, torrent, filepath, pieces_done, pieces_total): cli_ui.info_progress("Hashing...", pieces_done, pieces_total) def create_random_torrents(self, base_dir, uuid, num, path): - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") for i in range(1, int(num) + 1): new_torrent = base_torrent @@ -2020,7 +2020,6 @@ def create_random_torrents(self, base_dir, uuid, num, path): def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) - base_torrent.creation_date = datetime.now() base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" @@ -2491,8 +2490,8 @@ async def get_season_episode(self, video, meta): for lang, names in values.items(): if lang == "jp": for name in names: - romaji_check = re.sub("[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + romaji_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() if romaji_check in name_check: if diff >= difference: @@ -2505,8 +2504,8 @@ async def get_season_episode(self, video, meta): difference = diff if lang == "us": for name in names: - eng_check = re.sub("[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + eng_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() if eng_check in name_check: if diff >= difference: @@ -2623,7 +2622,7 @@ def get_service(self, video, tag, audio, guess_title): } - video_name = re.sub("[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): @@ -2827,7 +2826,7 @@ async def package(self, meta): generic.write(f"\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") - title = re.sub("[^0-9a-zA-Z\[\]]+", "", meta['title']) + title = re.sub(r"[^0-9a-zA-Z\[\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: @@ -2837,7 +2836,7 @@ async def package(self, meta): try: if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 8dc75071f..5813f469a 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -98,7 +98,7 @@ async def edit_name(self, meta): fl_name = fl_name.replace('DTS7.1', 'DTS').replace('DTS5.1', 'DTS').replace('DTS2.0', 'DTS').replace('DTS1.0', 'DTS') fl_name = fl_name.replace('Dubbed', '').replace('Dual-Audio', '') fl_name = ' '.join(fl_name.split()) - fl_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) + fl_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name @@ -319,7 +319,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_comparison_to_centered(desc, 900) desc = desc.replace('[img]', '[img]').replace('[/img]', '[/img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 7fab14991..b8eafcb7c 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -192,7 +192,7 @@ async def edit_name(self, meta): hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') hdb_name = hdb_name.replace('REMUX', 'Remux') hdb_name = ' '.join(hdb_name.split()) - hdb_name = re.sub("[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) + hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') return hdb_name @@ -431,7 +431,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: console.print("[green]Rehosting Images...") diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 63f7fa6f8..6bcf76964 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -105,7 +105,7 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(' DV ', ' DoVi ') hdt_name = ' '.join(hdt_name.split()) - hdt_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) + hdt_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index b1ded5e34..9ac64f3f1 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -223,7 +223,7 @@ async def edit_name(self, meta): if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" mtv_name = ' '.join(mtv_name.split()) - mtv_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) + mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 818f2779b..71eb1c2a1 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -187,7 +187,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index d67dc9bff..f8e527c8f 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -53,7 +53,7 @@ async def upload(self, meta): # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub("(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 3080ae581..f1f74e15b 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -49,7 +49,7 @@ async def upload(self, session, meta): return else: thr_name = thr_name_manually - torrent_name = re.sub("[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) + torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) if meta.get('is_disc', '') == 'BDMV': diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 21a1d12bd..6795d13cc 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -327,7 +327,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) images = meta['image_list'] if len(images) > 0: From 11dfc2a0d36ba6fb50645a05bbf492cae1275a21 Mon Sep 17 00:00:00 2001 From: sirius-sama Date: Sat, 2 Mar 2024 14:55:54 +0600 Subject: [PATCH 016/741] Adding CHD to the banned release group --- src/trackers/BLU.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 1e302eff2..1c6c94868 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -26,7 +26,7 @@ def __init__(self, config): self.upload_url = 'https://blutopia.cc/api/torrents/upload' self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', + '[Oj]', '3LTON', '4yEo', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'STUTTERSHIT', 'Telly', 'TM', 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', From 8bf640c9eb1f217764ab6c9399d2798eda0b183b Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Tue, 5 Mar 2024 19:37:08 -0500 Subject: [PATCH 017/741] OldToonsWorld Add OldToonsWorld --- data/example-config.py | 6 +++--- src/trackers/{TFM.py => OTW.py} | 10 +++++----- upload.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) rename src/trackers/{TFM.py => OTW.py} (96%) diff --git a/data/example-config.py b/data/example-config.py index 500a76d88..49371e329 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -188,9 +188,9 @@ "announce_url" : "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, - "TFM" : { - "api_key" : "TFM api key", - "announce_url" : "https://toonsfor.me/announce/customannounceurl", + "OTW" : { + "api_key" : "OTW api key", + "announce_url" : "https://oldtoons.world/announce/customannounceurl", # "anon" : False }, "MANUAL" : { diff --git a/src/trackers/TFM.py b/src/trackers/OTW.py similarity index 96% rename from src/trackers/TFM.py rename to src/trackers/OTW.py index a797d5eb2..0425b3924 100644 --- a/src/trackers/TFM.py +++ b/src/trackers/OTW.py @@ -10,7 +10,7 @@ from src.console import console -class TFM(): +class OTW(): """ Edit for Tracker: Edit BASE.torrent with announce and source @@ -27,10 +27,10 @@ class TFM(): def __init__(self, config): self.config = config - self.tracker = 'TFM' - self.source_flag = 'TFM' - self.upload_url = 'https://toonsfor.me/api/torrents/upload' - self.search_url = 'https://toonsfor.me/api/torrents/filter' + self.tracker = 'OTW' + self.source_flag = 'OTW' + self.upload_url = 'https://oldtoons.world/api/torrents/upload' + self.search_url = 'https://oldtoons.world/api/torrents/filter' self.signature = f"\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/upload.py b/upload.py index d0000432b..c2045152c 100644 --- a/upload.py +++ b/upload.py @@ -33,7 +33,7 @@ from src.trackers.OE import OE from src.trackers.BHDTV import BHDTV from src.trackers.RTF import RTF -from src.trackers.TFM import TFM +from src.trackers.OTW import OTW import json from pathlib import Path import asyncio @@ -247,12 +247,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'TFM'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'TFM': TFM} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW} for tracker in trackers: if meta['name'].endswith('DUPE?'): From 83d47993bb96bf3004362fac8a9e21681a1c548d Mon Sep 17 00:00:00 2001 From: AN1MATEK <15142596+AN1MATEK@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:58:52 +0100 Subject: [PATCH 018/741] Update clients.py Simplify adding the torrent to one line / API Call, where it already includes the Tag and the paused state to False. --- src/clients.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/src/clients.py b/src/clients.py index c8d5fcba1..5bf4d1f42 100644 --- a/src/clients.py +++ b/src/clients.py @@ -309,18 +309,7 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d content_layout = client.get('content_layout', 'Original') - qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) - # Wait for up to 30 seconds for qbit to actually return the download - # there's an async race conditiion within qbt that it will return ok before the torrent is actually added - for _ in range(0, 30): - if len(qbt_client.torrents_info(torrent_hashes=torrent.infohash)) > 0: - break - await asyncio.sleep(1) - qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) != None: - qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') != None: - qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) + qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, is_paused=False, content_layout=content_layout, category=qbt_category, tags=client.get('qbit_tag')) console.print(f"Added to: {path}") @@ -411,4 +400,4 @@ async def remote_path_map(self, meta): if local_path.endswith(os.sep): remote_path = remote_path + os.sep - return local_path, remote_path \ No newline at end of file + return local_path, remote_path From a0ebdd58543887f524ebf95138e69015860f27f1 Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Sun, 7 Apr 2024 16:52:30 -0300 Subject: [PATCH 019/741] add support for CBR --- README.md | 2 +- cogs/commands.py | 23 ++++- data/example-config.py | 7 +- src/trackers/CBR.py | 191 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 5 files changed, 221 insertions(+), 7 deletions(-) create mode 100644 src/trackers/CBR.py diff --git a/README.md b/README.md index 596a3303a..4ede718ea 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/CBR - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/cogs/commands.py b/cogs/commands.py index 52efe6ac2..a67fe93ca 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -8,6 +8,7 @@ from src.trackers.AITHER import AITHER from src.trackers.STC import STC from src.trackers.LCD import LCD +from src.trackers.CBR import CBR from data.config import config import discord @@ -419,7 +420,10 @@ async def send_embed_and_upload(self,ctx,meta): await asyncio.sleep(0.3) if "LCD" in each.replace(' ', ''): await message.add_reaction(config['DISCORD']['discord_emojis']['LCD']) - await asyncio.sleep(0.3) + await asyncio.sleep(0.3) + if "CBR" in each.replace(' ', ''): + await message.add_reaction(config['DISCORD']['discord_emojis']['CBR']) + await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['MANUAL']) await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['CANCEL']) @@ -511,7 +515,10 @@ def check(reaction, user): await stc.edit_desc(meta) if manual_tracker.upper() == "LCD": lcd = LCD(config=config) - await lcd.edit_desc(meta) + await lcd.edit_desc(meta) + if manual_tracker.upper() == "CBR": + cbr = CBR(config=config) + await cbr.edit_desc(meta) archive_url = await prep.package(meta) upload_embed_description = upload_embed_description.replace('MANUAL', '~~MANUAL~~') if archive_url == False: @@ -571,7 +578,17 @@ def check(reaction, user): await client.add_to_client(meta, "LCD") upload_embed_description = upload_embed_description.replace('LCD', '~~LCD~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) + if "CBR" in tracker_list: + cbr = CBR(config=config) + dupes = await cbr.search_existing(meta) + meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) + if meta['upload'] == True: + await cbr.upload(meta) + await client.add_to_client(meta, "CBR") + upload_embed_description = upload_embed_description.replace('CBR', '~~CBR~~') + upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + await msg.edit(embed=upload_embed) return None diff --git a/data/example-config.py b/data/example-config.py index c1b37e8dc..feecf382d 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -34,7 +34,7 @@ "TRACKERS" : { # Which trackers do you want to upload to? - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV", + "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR", "BLU" : { "useAPI" : False, # Set to True if using BLU @@ -139,6 +139,11 @@ "announce_url" : "https://locadora.cc/announce/customannounceurl", # "anon" : False }, + "CBR" : { + "api_key" : "CBR api key", + "announce_url" : "https://capybarabr.com/announce/customannounceurl", + # "anon" : False + }, "LST" : { "api_key" : "LST api key", "announce_url" : "https://lst.gg/announce/customannounceurl", diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py new file mode 100644 index 000000000..af73c36da --- /dev/null +++ b/src/trackers/CBR.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import distutils.util +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + + + +class CBR(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + def __init__(self, config): + self.config = config + self.tracker = 'CBR' + self.source_flag = 'CapybaraBR' + self.search_url = 'https://capybarabr.com/api/torrents/filter' + self.torrent_url = 'https://capybarabr.com/api/torrents/' + self.upload_url = 'https://capybarabr.com/api/torrents/upload' + self.signature = f"\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" + self.banned_groups = [""] + pass + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + cat_id = await self.get_cat_id(meta['category'], meta.get('edition', ''), meta) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + data = { + 'name' : name, + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + + + + async def get_cat_id(self, category_name, edition, meta): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'ANIMES': '4' + }.get(category_name, '0') + if meta['anime'] == True and category_id == '2': + category_id = '4' + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { +# '8640p':'10', + '4320p': '1', + '2160p': '2', +# '1440p' : '2', + '1080p': '3', + '1080i':'34', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9', + 'Other': '10', + }.get(resolution, '10') + return resolution_id + + + + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Buscando por duplicatas no tracker...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') + await asyncio.sleep(5) + + return dupes + + async def edit_name(self, meta): + + + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1') + + return name diff --git a/upload.py b/upload.py index 0c7292865..f626388a4 100644 --- a/upload.py +++ b/upload.py @@ -33,6 +33,7 @@ from src.trackers.OE import OE from src.trackers.BHDTV import BHDTV from src.trackers.RTF import RTF +from src.trackers.CBR import CBR import json from pathlib import Path import asyncio @@ -246,12 +247,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'CBR'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF,'CBR':CBR} for tracker in trackers: if meta['name'].endswith('DUPE?'): From 24ae4df1f5cbcc8b8a862facacb531b26d59939d Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 21:57:19 +1000 Subject: [PATCH 020/741] (Update) BLU banned groups --- src/trackers/BLU.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 33e03975b..a9e0f226a 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -26,10 +26,10 @@ def __init__(self, config): self.upload_url = 'https://blutopia.cc/api/torrents/upload' self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', + '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', - 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'STUTTERSHIT', 'Telly', 'TM', 'TRiToN', 'UPiNSMOKE', - 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', + 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', + 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', // AOC requires prior approval first, do manually. ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] ] From 937ba822011e3567b80a72a25e43dbeb8235d631 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 22:05:04 +1000 Subject: [PATCH 021/741] (Update) Aither banned groups --- src/trackers/AITHER.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 89777724a..7930871eb 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -26,7 +26,9 @@ def __init__(self, config): self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' self.signature = f"\n[center][url=https://aither.cc/forums/topics/1349]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['4K4U', 'AROMA', 'EMBER', 'FGT', 'Hi10', 'ION10', 'Judas', 'LAMA', 'MeGusta', 'QxR', 'RARBG', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'Sicario', 'TAoE', 'TGx', 'TSP', 'TSPxL', 'Tigole', 'Weasley[HONE]', 'Will1869', 'YIFY', 'd3g', 'nikt0', 'x0r'] + self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', + 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', + 'Will1869', 'x0r', 'YIFY'] pass async def upload(self, meta): From e0065c0b5106d9d7d13f751e1a86ced3d84d318a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 22:13:11 +1000 Subject: [PATCH 022/741] OTW in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 596a3303a..b29838c85 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/OTW - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs From b7cdd4ba8d5e26c3ae625ffb796f3dc5e6db96aa Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 22:26:04 +1000 Subject: [PATCH 023/741] (Update) MTV banned groups --- src/trackers/MTV.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index c773f7560..aba9b3b5d 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -29,11 +29,9 @@ def __init__(self, config): self.forum_link = 'https://www.morethantv.me/wiki.php?action=article&id=73' self.search_url = 'https://www.morethantv.me/api/torznab' self.banned_groups = [ - '3LTON', 'mRS', 'CM8', 'BRrip', 'Leffe', 'aXXo', 'FRDS', 'XS', 'KiNGDOM', 'WAF', 'nHD', - 'h65', 'CrEwSaDe', 'TM', 'ViSiON', 'x0r', 'PandaRG', 'HD2DVD', 'iPlanet', 'JIVE', 'ELiTE', - 'nikt0', 'STUTTERSHIT', 'ION10', 'RARBG', 'FaNGDiNG0', 'YIFY', 'FUM', 'ViSION', 'NhaNc3', - 'nSD', 'PRODJi', 'DNL', 'DeadFish', 'HDTime', 'mHD', 'TERMiNAL', - '[Oj]', 'QxR', 'ZmN', 'RDN', 'mSD', 'LOAD', 'BDP', 'SANTi', 'ZKBL', ['EVO', 'WEB-DL Only'] + 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'iPlanet', + 'KiNGDOM', 'Leffe', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', + 'STUTTERSHIT', 'TERMiNAL', 'ViSION', 'WAF', 'x0r', 'YIFY', ['EVO', 'WEB-DL Only'] ] pass From a8b233baaae506cd2e9f51a3aa08e007ea1fd642 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 22:32:22 +1000 Subject: [PATCH 024/741] (Update) NBL banned groups formatting --- src/trackers/NBL.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 489c21902..1da1c6baa 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -32,7 +32,13 @@ def __init__(self, config): self.upload_url = 'https://nebulance.io/upload.php' self.search_url = 'https://nebulance.io/api.php' self.api_key = self.config['TRACKERS'][self.tracker]['api_key'].strip() - self.banned_groups = ['0neshot', '3LTON', '4yEo', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime', 'Time', 'AnimeRG', 'AniURL', 'ASW', 'BakedFish', 'bonkai77', 'Cleo', 'DeadFish', 'DeeJayAhmed', 'ELiTE', 'EMBER', 'eSc', 'FGT', 'FUM', 'GERMini', 'HAiKU', 'Hi10', 'ION10', 'JacobSwaggedUp', 'JIVE', 'Judas', 'LOAD', 'MeGusta', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NhaNc3', 'NOIVTC', 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = ['0neshot', '3LTON', '4yEo', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'ASW', 'BakedFish', + 'bonkai77', 'Cleo', 'DeadFish', 'DeeJayAhmed', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'FGT', 'FUM', 'GERMini', 'HAiKU', 'Hi10', 'ION10', + 'JacobSwaggedUp', 'JIVE', 'Judas', 'LOAD', 'MeGusta', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NhaNc3', 'NOIVTC', + 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', + 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', + 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + // EVO and ROBOTS are allowed on condition. Do manually. pass From 8a261d5ddc99e7eb800fb488e417df0b60738a1a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 22:36:30 +1000 Subject: [PATCH 025/741] (Update) PTP banned groups --- src/trackers/PTP.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 01ec975fd..7ededcac7 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -35,7 +35,9 @@ def __init__(self, config): self.password = config['TRACKERS']['PTP'].get('password', '').strip() self.web_source = distutils.util.strtobool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'd3g', 'x0r', 'YIFY', 'BMDru'] + self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', + 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', + 'WAF', 'x0r', 'YIFY',] self.sub_lang_map = { ("Arabic", "ara", "ar") : 22, From 8acfbbebad1e96804144e75283f25f2de282f198 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 23:09:48 +1000 Subject: [PATCH 026/741] (Fix) BLU banned groups --- src/trackers/BLU.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index a9e0f226a..57d750110 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -29,7 +29,7 @@ def __init__(self, config): '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', - 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', // AOC requires prior approval first, do manually. + 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] ] From 4a9bab6a3924dacdfa0c96ce301ab5c85431d19c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 13 Apr 2024 23:11:07 +1000 Subject: [PATCH 027/741] (Fix) NBL banned groups --- src/trackers/NBL.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 1da1c6baa..813cb871b 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -38,7 +38,7 @@ def __init__(self, config): 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] - // EVO and ROBOTS are allowed on condition. Do manually. + pass From bb99f1055847d51d887fb1d38e4e968f4108791d Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Tue, 16 Apr 2024 00:15:54 -0300 Subject: [PATCH 028/741] Update CBR.py update replaces --- src/trackers/CBR.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index af73c36da..d4ed5d38e 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -186,6 +186,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1') + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1") return name From 2886733d22d5bf0a7dd1f2c7f8fe84cdc5e2505f Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Fri, 19 Apr 2024 02:48:34 -0300 Subject: [PATCH 029/741] Update CBR.py replace DTS 5.1 --- src/trackers/CBR.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index d4ed5d38e..5f7717c31 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -186,6 +186,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1") + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1") return name From 0ab21e0e874275af5a998a07cb165657de9fa686 Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:39:30 -0300 Subject: [PATCH 030/741] Update CBR.py replace AAC1.0 --- src/trackers/CBR.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 5f7717c31..7c755fc00 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -186,6 +186,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1") + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0") return name From 26471c900d78a2d03cd947664bd2c556e9d7aa4c Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:53:08 -0300 Subject: [PATCH 031/741] Update CBR.py update replace dd+ --- src/trackers/CBR.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 7c755fc00..73ca0154a 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -186,6 +186,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0") + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") return name From 731164452eacc1c981cf8be6825faa48fee2a211 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 14:05:54 +1000 Subject: [PATCH 032/741] FNP - original signature --- src/trackers/FNP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index a01274d72..c13995604 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -31,7 +31,7 @@ def __init__(self, config): self.source_flag = 'FnP' self.upload_url = 'https://fearnopeer.com/api/torrents/upload' self.search_url = 'https://fearnopeer.com/api/torrents/filter' - self.signature = 'This torrent was downloaded from FearNoPeer.' + self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass From 997c729a304fb6fa0c1475966a263d50d8506b47 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 14:13:04 +1000 Subject: [PATCH 033/741] README update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b29838c85..d767465e7 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/OTW + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs From 3e8822e72f81a1e35d81350cab9fb28d3ad54725 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 20:24:13 +1000 Subject: [PATCH 034/741] Remove API key in config file --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 6739f0336..778091673 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -98,7 +98,7 @@ # "anon" : False }, "SN": { - "api_key": "6Z1tMrXzcYpIeSdGZueQWqb3BowlS6YuIoZLHe3dvIqkSfY0Ws5SHx78oGSTazG0jQ1agduSqe07FPPE8sdWTg", + "api_key": "SN", "announce_url": "https://tracker.swarmazon.club:8443//announce", }, "HP" :{ From d65f8b2fb09a72d4e222f9f24601579f0aa68f60 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 23:10:56 +1000 Subject: [PATCH 035/741] Update - signature URL's --- src/trackers/FNP.py | 2 +- src/trackers/HUNO.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index c13995604..f255b2c43 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -31,7 +31,7 @@ def __init__(self, config): self.source_flag = 'FnP' self.upload_url = 'https://fearnopeer.com/api/torrents/upload' self.search_url = 'https://fearnopeer.com/api/torrents/filter' - self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 0bd8c746d..32ee4d2c6 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -25,7 +25,7 @@ def __init__(self, config): self.source_flag = 'HUNO' self.search_url = 'https://hawke.uno/api/torrents/filter' self.upload_url = 'https://hawke.uno/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by HUNO's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass From 8f59da357689bd1b85071fd447c6d4af21c405b4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 23:30:30 +1000 Subject: [PATCH 036/741] Update - bump image optimization --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7bbbd970a..998e31751 100644 --- a/src/prep.py +++ b/src/prep.py @@ -997,10 +997,10 @@ def optimize_images(self, image): pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: import oxipng - if os.path.getsize(image) >= 31000000: + if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: - oxipng.optimize(image, level=1) + oxipng.optimize(image, level=3) except: pass return From bab5f7ce48f406fef9b1707752887f3e947a9e75 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 27 Apr 2024 23:47:42 +1000 Subject: [PATCH 037/741] Update - add all trackers to default Also add the list to a comment. --- data/example-config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 29f64762c..1dc303b78 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -34,7 +34,8 @@ "TRACKERS" : { # Which trackers do you want to upload to? - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR", + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP + "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP", "BLU" : { "useAPI" : False, # Set to True if using BLU From 0a9a1e42539c6797fe7a8c63a1074f88ecac40f2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 28 Apr 2024 00:19:07 +1000 Subject: [PATCH 038/741] Update - Readme links --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0979c4c82..20b982e3a 100644 --- a/README.md +++ b/README.md @@ -32,16 +32,16 @@ A simple tool to take the work out of uploading. - Also needs MediaInfo and ffmpeg installed on your system - On Windows systems, ffmpeg must be added to PATH (https://windowsloop.com/install-ffmpeg-windows-10/) - On linux systems, get it from your favorite package manager - - Clone the repo to your system `git clone https://github.com/L4GSP1KE/Upload-Assistant.git` + - Clone the repo to your system `git clone https://github.com/Audionut/Upload-Assistant.git` - Copy and Rename `data/example-config.py` to `data/config.py` - - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/L4GSP1KE/Upload-Assistant/wiki)) + - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)) - tmdb_api (v3) key can be obtained from https://developers.themoviedb.org/3/getting-started/introduction - image host api keys can be obtained from their respective sites - Install necessary python modules `pip3 install --user -U -r requirements.txt` - **Additional Resources are found in the [wiki](https://github.com/L4GSP1KE/Upload-Assistant/wiki)** + **Additional Resources are found in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)** Feel free to contact me if you need help, I'm not that hard to find. @@ -55,4 +55,4 @@ A simple tool to take the work out of uploading. Args are OPTIONAL, for a list of acceptable args, pass `--help` ## **Docker Usage:** - Visit our wonderful [docker usage wiki page](https://github.com/L4GSP1KE/Upload-Assistant/wiki/Docker) + Visit our wonderful [docker usage wiki page](https://github.com/Audionut/Upload-Assistant/wiki/Docker) From d5ceb7b38031ff29e403700f4ac0310ea8d4f33a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 28 Apr 2024 00:46:42 +1000 Subject: [PATCH 039/741] ADD - UTP Pulled from https://github.com/utpto/Upload-Assistant --- README.md | 2 +- data/example-config.py | 9 ++- src/trackers/UTP.py | 174 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 4 files changed, 185 insertions(+), 5 deletions(-) create mode 100644 src/trackers/UTP.py diff --git a/README.md b/README.md index 20b982e3a..f8f8dab5d 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 1dc303b78..507d3b2ca 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -34,8 +34,8 @@ "TRACKERS" : { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP", + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, URP + "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP", "BLU" : { "useAPI" : False, # Set to True if using BLU @@ -206,6 +206,11 @@ "announce_url" : "https://fearnopeer.com/announce/customannounceurl", # "anon" : "False" }, + "UTP" : { + "api_key" : "UTP api key", + "announce_url" : "https://UTP/announce/customannounceurl", + # "anon" : False + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py new file mode 100644 index 000000000..9b3476cc3 --- /dev/null +++ b/src/trackers/UTP.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import distutils.util +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + +class UTP(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + def __init__(self, config): + self.config = config + self.tracker = 'UTP' + self.source_flag = 'UTOPIA' + self.search_url = 'https://utp.to/api/torrents/filter' + self.torrent_url = 'https://utp.to/api/torrents/' + self.upload_url = 'https://utp.to/api/torrents/upload' + self.signature = f"\n[center][url=https://utp.to/forums/topics/76]Created by UTOPIA Upload Assistant[/url][/center]" + self.banned_groups = [] + pass + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + data = { + 'name' : meta['name'], + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + + + + async def get_cat_id(self, category_name, edition): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'FANRES': '3' + }.get(category_name, '0') + if category_name == 'MOVIE' and 'FANRES' in edition: + category_id = '3' + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1080p': '3', + '1080i': '4' + }.get(resolution, '1') + return resolution_id + + + + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index 0032a1b9f..511e40753 100644 --- a/upload.py +++ b/upload.py @@ -36,6 +36,7 @@ from src.trackers.OTW import OTW from src.trackers.FNP import FNP from src.trackers.CBR import CBR +from src.trackers.UTP import UTP import json from pathlib import Path import asyncio @@ -249,12 +250,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP} for tracker in trackers: if meta['name'].endswith('DUPE?'): From dfc12d7a53b073cdcadb646ca916820da3ec8ae2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 28 Apr 2024 00:47:47 +1000 Subject: [PATCH 040/741] Spelling --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 507d3b2ca..6f6dd1957 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -34,7 +34,7 @@ "TRACKERS" : { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, URP + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP", "BLU" : { From dc0d95a46869a2015d7b6851f812d159197e41fe Mon Sep 17 00:00:00 2001 From: harpiacbr <166330660+harpiacbr@users.noreply.github.com> Date: Wed, 1 May 2024 12:17:45 -0300 Subject: [PATCH 041/741] Update CBR.py update id 1080i --- src/trackers/CBR.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 73ca0154a..aedb30937 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -136,12 +136,10 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { -# '8640p':'10', '4320p': '1', '2160p': '2', -# '1440p' : '2', '1080p': '3', - '1080i':'34', + '1080i':'4', '720p': '5', '576p': '6', '576i': '7', From 7b07df8d54eef4b76b50a7f291eccc8898fc3fad Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 19:26:29 +1000 Subject: [PATCH 042/741] Revert "Merge branch 'pr/319'" This reverts commit 1c2578eaa626ee715597f803fc547779a44ff0ed, reversing changes made to dfc12d7a53b073cdcadb646ca916820da3ec8ae2. --- requirements.txt | 3 +-- src/prep.py | 35 +++++++++++++++++---------------- src/trackers/ACM.py | 4 ++-- src/trackers/AITHER.py | 4 ++-- src/trackers/ANT.py | 4 ++-- src/trackers/BHD.py | 6 +++--- src/trackers/BHDTV.py | 4 ++-- src/trackers/BLU.py | 4 ++-- src/trackers/FL.py | 8 ++++---- src/trackers/HDB.py | 4 ++-- src/trackers/HDT.py | 6 +++--- src/trackers/HP.py | 4 ++-- src/trackers/HUNO.py | 4 ++-- src/trackers/JPTV.py | 4 ++-- src/trackers/LCD.py | 4 ++-- src/trackers/LST.py | 4 ++-- src/trackers/LT.py | 4 ++-- src/trackers/MTV.py | 6 +++--- src/trackers/NBL.py | 2 +- src/trackers/OE.py | 4 ++-- src/trackers/PTER.py | 6 +++--- src/trackers/PTP.py | 4 ++-- src/trackers/R4E.py | 4 ++-- src/trackers/RF.py | 4 ++-- src/trackers/RTF.py | 2 +- src/trackers/STC.py | 4 ++-- src/trackers/STT.py | 4 ++-- src/trackers/TDC.py | 4 ++-- src/trackers/THR.py | 2 +- src/trackers/TTG.py | 6 +++--- src/trackers/UNIT3D_TEMPLATE.py | 4 ++-- 31 files changed, 81 insertions(+), 81 deletions(-) diff --git a/requirements.txt b/requirements.txt index b20352a43..19e7c5038 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,5 +18,4 @@ beautifulsoup4 pyoxipng rich Jinja2 -pyotp -str2bool \ No newline at end of file +pyotp \ No newline at end of file diff --git a/src/prep.py b/src/prep.py index ed7915038..998e31751 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,8 +17,8 @@ import re import math import sys + import distutils.util import asyncio - from str2bool import str2bool from guessit import guessit import ntpath from pathlib import Path @@ -97,7 +97,7 @@ async def gather_prep(self, meta, mode): meta['filelist'] = [] try: guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] @@ -105,7 +105,7 @@ async def gather_prep(self, meta, mode): meta['search_year'] = "" except Exception: guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] @@ -164,7 +164,7 @@ async def gather_prep(self, meta, mode): videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub(r"[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] @@ -359,8 +359,8 @@ async def gather_prep(self, meta, mode): meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + meta['repack'] = re.search("REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub("REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') @@ -1369,7 +1369,7 @@ def get_romaji(self, tmdb_name, mal): result = {'title' : {}} difference = 0 for anime in media: - search_name = re.sub(r"[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) + search_name = re.sub("[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): if title != None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) @@ -1906,7 +1906,7 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): repack = "RERIP" # if "HYBRID" in video.upper() and "HYBRID" not in title.upper(): # edition = "Hybrid " + edition - edition = re.sub(r"(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() + edition = re.sub("(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: @@ -2010,7 +2010,7 @@ def torf_cb(self, torrent, filepath, pieces_done, pieces_total): cli_ui.info_progress("Hashing...", pieces_done, pieces_total) def create_random_torrents(self, base_dir, uuid, num, path): - manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) + manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") for i in range(1, int(num) + 1): new_torrent = base_torrent @@ -2020,6 +2020,7 @@ def create_random_torrents(self, base_dir, uuid, num, path): def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) + base_torrent.creation_date = datetime.now() base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" @@ -2490,8 +2491,8 @@ async def get_season_episode(self, video, meta): for lang, names in values.items(): if lang == "jp": for name in names: - romaji_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + romaji_check = re.sub("[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) + name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() if romaji_check in name_check: if diff >= difference: @@ -2504,8 +2505,8 @@ async def get_season_episode(self, video, meta): difference = diff if lang == "us": for name in names: - eng_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub(r"[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + eng_check = re.sub("[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) + name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() if eng_check in name_check: if diff >= difference: @@ -2622,7 +2623,7 @@ def get_service(self, video, tag, audio, guess_title): } - video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + video_name = re.sub("[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): @@ -2771,7 +2772,7 @@ async def tag_override(self, meta): else: pass elif key == 'personalrelease': - meta[key] = bool(str2bool(str(value.get(key, 'False')))) + meta[key] = bool(distutils.util.strtobool(str(value.get(key, 'False')))) elif key == 'template': meta['desc_template'] = value.get(key) else: @@ -2826,7 +2827,7 @@ async def package(self, meta): generic.write(f"\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") - title = re.sub(r"[^0-9a-zA-Z\[\]]+", "", meta['title']) + title = re.sub("[^0-9a-zA-Z\[\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: @@ -2836,7 +2837,7 @@ async def package(self, meta): try: if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) + manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 4f75e8522..270fd25b3 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -207,7 +207,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 308383a5f..7930871eb 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import json import os import platform @@ -39,7 +39,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index ed9289c8f..0bd5c40b8 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -3,8 +3,8 @@ import os import asyncio import requests +import distutils.util import platform -from str2bool import str2bool from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON @@ -68,7 +68,7 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) flags = await self.get_flags(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d9e73acdf..d6ce9bca1 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import urllib import os import platform @@ -39,7 +39,7 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 @@ -263,7 +263,7 @@ async def search_existing(self, meta): async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(str2bool(str(draft))) #0 for send to draft, 1 for live + draft = bool(distutils.util.strtobool(str(draft))) #0 for send to draft, 1 for live if draft: draft_int = 0 else: diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index ea6f911c1..97d0e1c8e 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -4,7 +4,7 @@ from torf import Torrent import requests from src.console import console -from str2bool import str2bool +import distutils.util from pprint import pprint import os import traceback @@ -54,7 +54,7 @@ async def upload(self, meta): # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + distutils.util.strtobool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index d3efe8ef3..57d750110 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -49,7 +49,7 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 5813f469a..06cd4bb0b 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -3,7 +3,7 @@ import re import os from pathlib import Path -from str2bool import str2bool +import distutils.util import json import glob import pickle @@ -98,7 +98,7 @@ async def edit_name(self, meta): fl_name = fl_name.replace('DTS7.1', 'DTS').replace('DTS5.1', 'DTS').replace('DTS2.0', 'DTS').replace('DTS1.0', 'DTS') fl_name = fl_name.replace('Dubbed', '').replace('Dual-Audio', '') fl_name = ' '.join(fl_name.split()) - fl_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) + fl_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name @@ -161,7 +161,7 @@ async def upload(self, meta): if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' @@ -319,7 +319,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_comparison_to_centered(desc, 900) desc = desc.replace('[img]', '[img]').replace('[/img]', '[/img]') - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index b8eafcb7c..7fab14991 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -192,7 +192,7 @@ async def edit_name(self, meta): hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') hdb_name = hdb_name.replace('REMUX', 'Remux') hdb_name = ' '.join(hdb_name.split()) - hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) + hdb_name = re.sub("[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') return hdb_name @@ -431,7 +431,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: console.print("[green]Rehosting Images...") diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 6bcf76964..6b9fa0320 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -6,8 +6,8 @@ import glob import cli_ui import pickle +import distutils from pathlib import Path -from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo @@ -105,7 +105,7 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(' DV ', ' DoVi ') hdt_name = ' '.join(hdt_name.split()) - hdt_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) + hdt_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name @@ -173,7 +173,7 @@ async def upload(self, meta): data['season'] = 'false' # Anonymous check - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 7c11e0744..250e9e851 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -78,7 +78,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 881a0c1a9..32ee4d2c6 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import os import re import platform @@ -37,7 +37,7 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 06253aebe..354b1be1a 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -92,7 +92,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index a457a601a..5c3f14309 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -40,7 +40,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 37e1db0e0..21368bd39 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -87,7 +87,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 4bc6088b1..2e06a0df2 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -88,7 +88,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index cee1de771..aba9b3b5d 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,8 +8,8 @@ import cli_ui import pickle import re +import distutils.util from pathlib import Path -from str2bool import str2bool from src.trackers.COMMON import COMMON class MTV(): @@ -75,7 +75,7 @@ async def upload(self, meta): mtv_name = await self.edit_name(meta) # anon - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 @@ -221,7 +221,7 @@ async def edit_name(self, meta): if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" mtv_name = ' '.join(mtv_name.split()) - mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) + mtv_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 56e01a671..813cb871b 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os from guessit import guessit -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 332fc6d7f..bb69a3e02 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 71eb1c2a1..b9fcecfa0 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -7,7 +7,7 @@ import traceback import json import glob -from str2bool import str2bool +import distutils.util import cli_ui import pickle from unidecode import unidecode @@ -187,7 +187,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: @@ -288,7 +288,7 @@ async def pterimg_upload(self, meta): return image_list async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 91944df34..7ededcac7 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -2,9 +2,9 @@ import requests import asyncio import re +import distutils.util import os from pathlib import Path -from str2bool import str2bool import time import traceback import json @@ -33,7 +33,7 @@ def __init__(self, config): self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() - self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) + self.web_source = distutils.util.strtobool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 0528c10cc..67d33c997 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import json import tmdbsimple as tmdb import os @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/RF.py b/src/trackers/RF.py index aa108340d..ca94837b9 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -41,7 +41,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 28ce55924..8ef181d6b 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -54,7 +54,7 @@ async def upload(self, meta): # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub("(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 71d70ce2f..224e89889 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -2,7 +2,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import json import os import platform @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/STT.py b/src/trackers/STT.py index fdeed9e88..0a72f7eab 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -from str2bool import str2bool +import distutils.util import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index b22ec6cd7..e201bcb83 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -2,8 +2,8 @@ # import discord import asyncio import requests +import distutils.util import os -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -77,7 +77,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/THR.py b/src/trackers/THR.py index f1f74e15b..3080ae581 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -49,7 +49,7 @@ async def upload(self, session, meta): return else: thr_name = thr_name_manually - torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) + torrent_name = re.sub("[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) if meta.get('is_disc', '') == 'BDMV': diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 6795d13cc..491a3bacc 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -7,8 +7,8 @@ from pathlib import Path import traceback import json +import distutils.util import cli_ui -from str2bool import str2bool from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON @@ -104,7 +104,7 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' @@ -327,7 +327,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) images = meta['image_list'] if len(images) > 0: diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index c77e758a7..405e2c9f1 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests +import distutils.util import os import platform -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -82,7 +82,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 From 0d97e7c49e2214beccd7b4144314859dd5237f4a Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 20:02:00 +1000 Subject: [PATCH 043/741] Update - HUNO banned groups --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 32ee4d2c6..3cb5ea642 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -26,7 +26,7 @@ def __init__(self, config): self.search_url = 'https://hawke.uno/api/torrents/filter' self.upload_url = 'https://hawke.uno/api/torrents/upload' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass From 935b1d697492d58e6d363f30da66d835cf310473 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 20:06:07 +1000 Subject: [PATCH 044/741] Remove blank line --- upload.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/upload.py b/upload.py index 511e40753..4b1845a7d 100644 --- a/upload.py +++ b/upload.py @@ -572,5 +572,4 @@ def get_missing(meta): loop = asyncio.get_event_loop() loop.run_until_complete(do_the_thing(base_dir)) else: - asyncio.run(do_the_thing(base_dir)) - + asyncio.run(do_the_thing(base_dir)) \ No newline at end of file From a1d2763413e54ae8aa4d76c480a7e784869be0ff Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 20:36:06 +1000 Subject: [PATCH 045/741] Update docker for my VS install Seems to work --- Dockerfile | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4c3e3ccba..1d051748e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,27 @@ FROM alpine:latest -# add mono repo and mono +# Add mono repository and install mono RUN apk add --no-cache mono --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing -# install requirements -RUN apk add --no-cache --upgrade ffmpeg mediainfo python3 git py3-pip python3-dev g++ cargo mktorrent rust -RUN pip3 install wheel +# Install system dependencies including Python and tools +RUN apk add --no-cache --upgrade ffmpeg mediainfo python3 git py3-pip python3-dev g++ cargo mktorrent rust -WORKDIR Upload-Assistant +# Set up a virtual environment to isolate our Python dependencies +RUN python3 -m venv /venv +ENV PATH="/venv/bin:$PATH" -# install reqs +# Install wheel and other Python dependencies +RUN pip install wheel + +# Set the working directory in the container +WORKDIR /Upload-Assistant + +# Copy the Python requirements file and install Python dependencies COPY requirements.txt . -RUN pip3 install -r requirements.txt +RUN pip install -r requirements.txt -# copy everything +# Copy the rest of the application's code COPY . . +# Set the entry point for the container ENTRYPOINT ["python3", "/Upload-Assistant/upload.py"] \ No newline at end of file From eab4a8b7906ad49c448d76dc286bfc1930634092 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 22:32:37 +1000 Subject: [PATCH 046/741] Update - docker-image --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 8a5d04241..195a62ed6 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -54,4 +54,4 @@ jobs: tags: ${{ steps.meta.outputs.tags }}, ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.SHA_SHORT }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha - cache-to: type=gha,mode=max + cache-to: type=gha,mode=max \ No newline at end of file From 65b62c458af3d71a31798807799010deb69ef675 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 13 May 2024 12:46:03 +0000 Subject: [PATCH 047/741] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f8f8dab5d..b997ac3ea 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![Create and publish a Docker image](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml/badge.svg?branch=master)](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml) + # L4G's Upload Assistant A simple tool to take the work out of uploading. From 2893aea1419a62dd503ff68debdc001ee3f5928c Mon Sep 17 00:00:00 2001 From: CubedRoot Date: Thu, 16 May 2024 10:56:51 -0400 Subject: [PATCH 048/741] Added OFT to BLU Banned groups --- src/trackers/BLU.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 57d750110..44dbaf85f 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -27,7 +27,7 @@ def __init__(self, config): self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', - 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', + 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] @@ -214,4 +214,4 @@ async def search_existing(self, meta): console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes From 1f6493273b64476111cd41bf1d0fd7f6e62e54a5 Mon Sep 17 00:00:00 2001 From: sedrini Date: Sun, 2 Jun 2024 21:22:37 -0600 Subject: [PATCH 049/741] Update LT.py 1.- Added 'ANIME' and 'TELENOVELAS' categories. 2.- Edit lt_name to append '[SUBS]' at the end if the content does not have Spanish audio. 3.- Removed 'region_id' because LatTeam does not have region options for FULL DISC. 4.- Kept the 'Turcas o Doramas' category determination method, although its effectiveness is uncertain. It may be useful for some users. --- src/trackers/LT.py | 47 +++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 2e06a0df2..aeb6b3550 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -35,11 +35,23 @@ def __init__(self, config): self.banned_groups = [""] pass - async def get_cat_id(self, category_name): + async def get_cat_id(self, category_name, meta): category_id = { 'MOVIE': '1', - 'TV': '2', + 'TV': '2', + 'ANIME': '5', + 'TELENOVELAS': '8', + 'Doramas & Turcas': '20', }.get(category_name, '0') + #if is anime + if meta['anime'] == True and category_id == '2': + category_id = '5' + #elif is telenovela + elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): + category_id = '8' + #if is TURCAS o Doramas + #elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : + #category_id = '20' return category_id async def get_type_id(self, type): @@ -70,10 +82,18 @@ async def get_res_id(self, resolution): return resolution_id async def edit_name(self, meta): - lt_name = meta['name'] - lt_name = lt_name.replace('Dubbed', '').replace('Dual-Audio', '') + lt_name = meta['name'].replace('Dubbed', '').replace('Dual-Audio', '').replace(' ', ' ').strip() + # Check if audio Spanish exists, if not append [SUBS] at the end + if meta['type'] != 'DISC': #DISC don't have mediainfo + audio_language_list = meta['mediainfo']['media']['track'][0].get('Audio_Language_List', '') + if 'Spanish' not in audio_language_list and '[SUBS]' not in lt_name: + if not meta['tag']: + lt_name += " [SUBS]" + else: + lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") return lt_name + ############################################################### ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### ############################################################### @@ -81,11 +101,11 @@ async def edit_name(self, meta): async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) + cat_id = await self.get_cat_id(meta['category'], meta) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) + #region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: @@ -130,21 +150,20 @@ async def upload(self, meta): if self.config['TRACKERS'][self.tracker].get('internal', False) == True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id + + if distributor_id != 0: data['distributor_id'] = distributor_id if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data['season_number'] = int(meta.get('season_int', '0')) + data['episode_number'] = int(meta.get('episode_int', '0')) headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() } - + if meta['debug'] == False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: @@ -158,8 +177,6 @@ async def upload(self, meta): open_torrent.close() - - async def search_existing(self, meta): dupes = [] @@ -167,7 +184,7 @@ async def search_existing(self, meta): params = { 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), + 'categories[]' : await self.get_cat_id(meta['category'], meta), 'types[]' : await self.get_type_id(meta['type']), 'resolutions[]' : await self.get_res_id(meta['resolution']), 'name' : "" From 9f53d4bf831c730d6f202bc83fa02ad1c0883cc6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 8 Jun 2024 22:34:05 +1000 Subject: [PATCH 050/741] LST - banned groups --- src/trackers/LST.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 21368bd39..5d42b1ba2 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -32,7 +32,9 @@ def __init__(self, config): self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' self.signature = f"\n[center]Created by L4G's Upload Assistant[/center]" - self.banned_groups = [""] + self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', + 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', + 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY'] pass async def get_cat_id(self, category_name, keywords, service): From 3795c74726f00f5cf050b475e9b3988c1887d22b Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 20 Jun 2024 14:11:23 +1000 Subject: [PATCH 051/741] Revert "Update clients.py" This reverts commit 83d47993bb96bf3004362fac8a9e21681a1c548d. --- src/clients.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/clients.py b/src/clients.py index 5bf4d1f42..c8d5fcba1 100644 --- a/src/clients.py +++ b/src/clients.py @@ -309,7 +309,18 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d content_layout = client.get('content_layout', 'Original') - qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, is_paused=False, content_layout=content_layout, category=qbt_category, tags=client.get('qbit_tag')) + qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) + # Wait for up to 30 seconds for qbit to actually return the download + # there's an async race conditiion within qbt that it will return ok before the torrent is actually added + for _ in range(0, 30): + if len(qbt_client.torrents_info(torrent_hashes=torrent.infohash)) > 0: + break + await asyncio.sleep(1) + qbt_client.torrents_resume(torrent.infohash) + if client.get('qbit_tag', None) != None: + qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) + if meta.get('qbit_tag') != None: + qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) console.print(f"Added to: {path}") @@ -400,4 +411,4 @@ async def remote_path_map(self, meta): if local_path.endswith(os.sep): remote_path = remote_path + os.sep - return local_path, remote_path + return local_path, remote_path \ No newline at end of file From 259e80a8c447a992e10728b7825ba7af47bd94e9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 21 Jun 2024 12:07:25 +1000 Subject: [PATCH 052/741] Specify python version --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1d051748e..0bc07928c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,8 +3,8 @@ FROM alpine:latest # Add mono repository and install mono RUN apk add --no-cache mono --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing -# Install system dependencies including Python and tools -RUN apk add --no-cache --upgrade ffmpeg mediainfo python3 git py3-pip python3-dev g++ cargo mktorrent rust +# Install system dependencies including Python 3.11 and tools +RUN apk add --no-cache --upgrade ffmpeg mediainfo python3=3.11.5-r0 git py3-pip python3-dev=3.11.5-r0 g++ cargo mktorrent rust # Set up a virtual environment to isolate our Python dependencies RUN python3 -m venv /venv From 2ae760bf15078caf0e07bb31868152d84c7f98fc Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 21 Jun 2024 12:12:13 +1000 Subject: [PATCH 053/741] Attempt python version again --- Dockerfile | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0bc07928c..61a00d426 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,24 @@ -FROM alpine:latest - -# Add mono repository and install mono -RUN apk add --no-cache mono --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing - -# Install system dependencies including Python 3.11 and tools -RUN apk add --no-cache --upgrade ffmpeg mediainfo python3=3.11.5-r0 git py3-pip python3-dev=3.11.5-r0 g++ cargo mktorrent rust +FROM python:3.11 + +# Update the package list and install system dependencies including mono +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ffmpeg \ + mediainfo \ + git \ + g++ \ + cargo \ + mktorrent \ + rustc \ + mono-complete && \ + rm -rf /var/lib/apt/lists/* # Set up a virtual environment to isolate our Python dependencies -RUN python3 -m venv /venv +RUN python -m venv /venv ENV PATH="/venv/bin:$PATH" # Install wheel and other Python dependencies -RUN pip install wheel +RUN pip install --upgrade pip wheel # Set the working directory in the container WORKDIR /Upload-Assistant @@ -24,4 +31,4 @@ RUN pip install -r requirements.txt COPY . . # Set the entry point for the container -ENTRYPOINT ["python3", "/Upload-Assistant/upload.py"] \ No newline at end of file +ENTRYPOINT ["python", "/Upload-Assistant/upload.py"] \ No newline at end of file From 6185f81da35084d029190abd7d250793d7d4bb9d Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 21 Jun 2024 13:12:55 +1000 Subject: [PATCH 054/741] Update - ANT banned group list --- src/trackers/ANT.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 0bd5c40b8..5e925152a 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -32,9 +32,13 @@ def __init__(self, config): self.source_flag = 'ANT' self.search_url = 'https://anthelion.me/api.php' self.upload_url = 'https://anthelion.me/api.php' - self.banned_groups = ['Ozlem', 'RARBG', 'FGT', 'STUTTERSHIT', 'LiGaS', 'DDR', 'Zeus', 'TBS', 'aXXo', 'CrEwSaDe', 'DNL', 'EVO', - 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'iPlanet', 'KiNGDOM', 'NhaNc3', 'PRoDJi', 'SANTi', 'ViSiON', 'WAF', 'YIFY', - 'YTS', 'MkvCage', 'mSD'] + self.banned_groups = ['3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', + 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', + 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', + 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', + 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', + 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', + 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT'] self.signature = None pass From e76f00a56e278c88d493934d09fc9f84a47bd4e2 Mon Sep 17 00:00:00 2001 From: Gloft Date: Fri, 19 Jul 2024 19:05:21 +0300 Subject: [PATCH 055/741] Implement --hardcoded-subs to HUNO --- src/trackers/HUNO.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 3cb5ea642..9a7f6ad55 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -133,6 +133,7 @@ async def get_name(self, meta): # It was much easier to build the name from scratch than to alter the existing name. basename = self.get_basename(meta) + hc = meta.get('hardcoded-subs') type = meta.get('type', "") title = meta.get('title',"") alt_title = meta.get('aka', "") @@ -206,6 +207,8 @@ async def get_name(self, meta): elif type == "HDTV": #HDTV name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" + if hc: + name = re.sub(r'((\([0-9]{4}\)))', r'\1 Ensubbed', name) return ' '.join(name.split()).replace(": ", " - ") From 8df4f685fa93c9727a1f4c3199b4a868fe2f8440 Mon Sep 17 00:00:00 2001 From: ptscreens Date: Wed, 24 Jul 2024 20:16:25 +0100 Subject: [PATCH 056/741] Update args.py Add PT Screens image host --- src/args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/args.py b/src/args.py index ff93c8e0e..413b63b25 100644 --- a/src/args.py +++ b/src/args.py @@ -51,7 +51,7 @@ def parse(self, args, meta): parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") parser.add_argument('-df', '--descfile', nargs='*', required=False, help="Custom Description (path to file)") - parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump']) + parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens']) parser.add_argument('-siu', '--skip-imagehost-upload', dest='skip_imghost_upload', action='store_true', required=False, help="Skip Uploading to an image host") parser.add_argument('-th', '--torrenthash', nargs='*', required=False, help="Torrent Hash to re-use from your client's session directory") parser.add_argument('-nfo', '--nfo', action='store_true', required=False, help="Use .nfo in directory for description") From 55c954ba72ed89b14e2574efd220169141a5f0dd Mon Sep 17 00:00:00 2001 From: ptscreens Date: Wed, 24 Jul 2024 20:23:15 +0100 Subject: [PATCH 057/741] Add PT Screens image host --- src/prep.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 998e31751..e543af3aa 100644 --- a/src/prep.py +++ b/src/prep.py @@ -851,6 +851,8 @@ def _is_vob_good(n, loops, num_screens): i += 1 elif self.img_host == "lensdump": i += 1 + elif self.img_host == "ptscreens": + i += 1 else: console.print("[red]Image too large for your image host, retaking") retake = True @@ -950,7 +952,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non i += 1 elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake == False: i += 1 - elif self.img_host in ["ptpimg", "lensdump"] and retake == False: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake == False: i += 1 elif self.img_host == "freeimage.host": console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") @@ -2170,6 +2172,26 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i progress.console.print("[yellow]lensdump failed, trying next image host") progress.stop() newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + try: + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + progress.console.print(response) + img_url = response['data'].get('medium', response['data']['image'])['url'] + web_url = response['data']['url_viewer'] + raw_url = response['data']['image']['url'] + except Exception: + progress.console.print("[yellow]PT Screens failed, trying next image host") + progress.stop() + newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) else: console.print("[bold red]Please choose a supported image host in your config") exit() From 533d0d63668ad7e68b2c419c9abce83a95f0d03e Mon Sep 17 00:00:00 2001 From: ptscreens Date: Wed, 24 Jul 2024 20:25:04 +0100 Subject: [PATCH 058/741] Add PT Screens image host --- data/example-config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 6f6dd1957..ce95b4834 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -10,12 +10,13 @@ "imgbb_api" : "imgbb api key", "ptpimg_api" : "ptpimg api key", "lensdump_api" : "lensdump api key", + "ptscreens_api" : "ptscreens api key", # Order of image hosts, and backup image hosts "img_host_1": "imgbb", "img_host_2": "ptpimg", "img_host_3": "imgbox", - "img_host_4": "pixhost", + "img_host_4": "pixhost", "img_host_5": "lensdump", From bddd15a3f53a97b904a66e5afaabe67cc4339496 Mon Sep 17 00:00:00 2001 From: Gloft Date: Thu, 8 Aug 2024 22:48:04 +0300 Subject: [PATCH 059/741] Correctly name silent content --- src/trackers/HUNO.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 9a7f6ad55..9b69e74e3 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -121,7 +121,8 @@ def get_audio(self, meta): elif 'mediainfo' in meta: language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language_String', "English") language = re.sub(r'\(.+\)', '', language) - + if language == "zxx": + language = "Silent" return f'{codec} {channels} {language}' def get_basename(self, meta): From d5415faa6b3cdfb3172007a4a99870816e76b9bb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 10 Aug 2024 21:50:16 +1000 Subject: [PATCH 060/741] FIX - case sensitive mediainfo for m2ts at ANT --- src/trackers/ANT.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 5e925152a..06f492cc0 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -81,7 +81,8 @@ async def upload(self, meta): bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') - m2ts = os.path.join(path, meta['bdinfo']['files'][0]['file']) + file_name = meta['bdinfo']['files'][0]['file'].lower() + m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) mi_dump = media_info_output.replace('\r\n', '\n') else: From 3479b14f01b5a066495c63e170f0f989382bcdd7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 10 Aug 2024 22:50:10 +1000 Subject: [PATCH 061/741] FIX - acm url --- src/trackers/ACM.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 270fd25b3..121c60cbc 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -29,8 +29,8 @@ def __init__(self, config): self.config = config self.tracker = 'ACM' self.source_flag = 'AsianCinema' - self.upload_url = 'https://asiancinema.me/api/torrents/upload' - self.search_url = 'https://asiancinema.me/api/torrents/filter' + self.upload_url = 'https://eiga.moi/api/torrents/upload' + self.search_url = 'https://eiga.moi/api/torrents/filter' self.signature = None self.banned_groups = [""] pass From efd644ff56aaff8200127c9de656df1fb58b5708 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 10 Aug 2024 23:42:51 +1000 Subject: [PATCH 062/741] FIX - manual edition --- data/example-config.py | 5 ++-- src/prep.py | 54 +++++++++++++++++++++--------------------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index ce95b4834..1a26a307b 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -10,14 +10,15 @@ "imgbb_api" : "imgbb api key", "ptpimg_api" : "ptpimg api key", "lensdump_api" : "lensdump api key", - "ptscreens_api" : "ptscreens api key", + "ptscreens_api" : "ptscreens api key", # Order of image hosts, and backup image hosts "img_host_1": "imgbb", "img_host_2": "ptpimg", "img_host_3": "imgbox", - "img_host_4": "pixhost", + "img_host_4": "pixhost", "img_host_5": "lensdump", + "img_host_6": "ptscreens", "screens" : "6", diff --git a/src/prep.py b/src/prep.py index e543af3aa..f4e731cf0 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1866,61 +1866,61 @@ def get_video_encode(self, mi, type, bdinfo): def get_edition(self, video, bdinfo, filelist, manual_edition): if video.lower().startswith('dc'): video = video.replace('dc', '', 1) + guess = guessit(video) tag = guess.get('release_group', 'NOGROUP') repack = "" edition = "" - if bdinfo != None: + + if bdinfo is not None: try: edition = guessit(bdinfo['label'])['edition'] - except: + except Exception as e: + print(f"BDInfo Edition Guess Error: {e}") edition = "" else: try: - edition = guess['edition'] - except: + edition = guess.get('edition', "") + except Exception as e: + print(f"Video Edition Guess Error: {e}") edition = "" + if isinstance(edition, list): - # time.sleep(2) edition = " ".join(edition) + if len(filelist) == 1: video = os.path.basename(video) - video = video.upper().replace('.', ' ').replace(tag, '').replace('-', '') + video = video.upper().replace('.', ' ').replace(tag.upper(), '').replace('-', '') if "OPEN MATTE" in video: - edition = edition + "Open Matte" + edition = edition + " Open Matte" - if manual_edition != None: - if isinstance(manual_edition, list): - manual_edition = " ".join(manual_edition) + if manual_edition: edition = str(manual_edition) - - if " REPACK " in (video or edition) or "V2" in video: + + print(f"Edition After Manual Edition: {edition}") + + if "REPACK" in edition.upper() or "V2" in video: repack = "REPACK" - if " REPACK2 " in (video or edition) or "V3" in video: + if "REPACK2" in edition.upper() or "V3" in video: repack = "REPACK2" - if " REPACK3 " in (video or edition) or "V4" in video: + if "REPACK3" in edition.upper() or "V4" in video: repack = "REPACK3" - if " PROPER " in (video or edition): + if "PROPER" in edition.upper(): repack = "PROPER" - if " RERIP " in (video.upper() or edition): + if "RERIP" in edition.upper(): repack = "RERIP" - # if "HYBRID" in video.upper() and "HYBRID" not in title.upper(): - # edition = "Hybrid " + edition - edition = re.sub("(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() + + print(f"Repack after Checks: {repack}") + + # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition + edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: edition = "" - # try: - # other = guess['other'] - # except: - # other = "" - # if " 3D " in other: - # edition = edition + " 3D " - # if edition == None or edition == None: - # edition = "" + return edition, repack From 483f98901fceaba64f4fc17f9615eb00c9b442f8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 11 Aug 2024 00:14:11 +1000 Subject: [PATCH 063/741] Revert 319 again Update trackers added since PR --- requirements.txt | 3 ++- src/prep.py | 33 ++++++++++++++++----------------- src/trackers/ACM.py | 4 ++-- src/trackers/AITHER.py | 4 ++-- src/trackers/ANT.py | 4 ++-- src/trackers/BHD.py | 6 +++--- src/trackers/BHDTV.py | 4 ++-- src/trackers/BLU.py | 4 ++-- src/trackers/CBR.py | 4 ++-- src/trackers/FL.py | 8 ++++---- src/trackers/FNP.py | 4 ++-- src/trackers/HDB.py | 4 ++-- src/trackers/HDT.py | 6 +++--- src/trackers/HP.py | 4 ++-- src/trackers/HUNO.py | 4 ++-- src/trackers/JPTV.py | 4 ++-- src/trackers/LCD.py | 4 ++-- src/trackers/LST.py | 4 ++-- src/trackers/LT.py | 4 ++-- src/trackers/MTV.py | 6 +++--- src/trackers/NBL.py | 2 +- src/trackers/OE.py | 4 ++-- src/trackers/OTW.py | 4 ++-- src/trackers/PTER.py | 6 +++--- src/trackers/PTP.py | 4 ++-- src/trackers/R4E.py | 4 ++-- src/trackers/RF.py | 4 ++-- src/trackers/RTF.py | 2 +- src/trackers/STC.py | 4 ++-- src/trackers/STT.py | 4 ++-- src/trackers/TDC.py | 4 ++-- src/trackers/THR.py | 2 +- src/trackers/TTG.py | 6 +++--- src/trackers/UNIT3D_TEMPLATE.py | 4 ++-- src/trackers/UTP.py | 4 ++-- 35 files changed, 88 insertions(+), 88 deletions(-) diff --git a/requirements.txt b/requirements.txt index 19e7c5038..b20352a43 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,5 @@ beautifulsoup4 pyoxipng rich Jinja2 -pyotp \ No newline at end of file +pyotp +str2bool \ No newline at end of file diff --git a/src/prep.py b/src/prep.py index f4e731cf0..e53ae336d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,7 +17,7 @@ import re import math import sys - import distutils.util + from str2bool import str2bool import asyncio from guessit import guessit import ntpath @@ -97,7 +97,7 @@ async def gather_prep(self, meta, mode): meta['filelist'] = [] try: guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] @@ -105,7 +105,7 @@ async def gather_prep(self, meta, mode): meta['search_year'] = "" except Exception: guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] @@ -164,7 +164,7 @@ async def gather_prep(self, meta, mode): videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] @@ -359,8 +359,8 @@ async def gather_prep(self, meta, mode): meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search("REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub("REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') @@ -1371,7 +1371,7 @@ def get_romaji(self, tmdb_name, mal): result = {'title' : {}} difference = 0 for anime in media: - search_name = re.sub("[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) + search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): if title != None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) @@ -2012,7 +2012,7 @@ def torf_cb(self, torrent, filepath, pieces_done, pieces_total): cli_ui.info_progress("Hashing...", pieces_done, pieces_total) def create_random_torrents(self, base_dir, uuid, num, path): - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") for i in range(1, int(num) + 1): new_torrent = base_torrent @@ -2022,7 +2022,6 @@ def create_random_torrents(self, base_dir, uuid, num, path): def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) - base_torrent.creation_date = datetime.now() base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" @@ -2513,8 +2512,8 @@ async def get_season_episode(self, video, meta): for lang, names in values.items(): if lang == "jp": for name in names: - romaji_check = re.sub("[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() if romaji_check in name_check: if diff >= difference: @@ -2527,8 +2526,8 @@ async def get_season_episode(self, video, meta): difference = diff if lang == "us": for name in names: - eng_check = re.sub("[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() if eng_check in name_check: if diff >= difference: @@ -2645,7 +2644,7 @@ def get_service(self, video, tag, audio, guess_title): } - video_name = re.sub("[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): @@ -2794,7 +2793,7 @@ async def tag_override(self, meta): else: pass elif key == 'personalrelease': - meta[key] = bool(distutils.util.strtobool(str(value.get(key, 'False')))) + meta[key] = bool(str2bool(str(value.get(key, 'False')))) elif key == 'template': meta['desc_template'] = value.get(key) else: @@ -2849,7 +2848,7 @@ async def package(self, meta): generic.write(f"\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") - title = re.sub("[^0-9a-zA-Z\[\]]+", "", meta['title']) + title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: @@ -2859,7 +2858,7 @@ async def package(self, meta): try: if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 121c60cbc..194a2d0a2 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -207,7 +207,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 7930871eb..308383a5f 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -39,7 +39,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 06f492cc0..1297f45ab 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -3,8 +3,8 @@ import os import asyncio import requests -import distutils.util import platform +from str2bool import str2bool from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON @@ -72,7 +72,7 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) flags = await self.get_flags(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d6ce9bca1..d9e73acdf 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import urllib import os import platform @@ -39,7 +39,7 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 @@ -263,7 +263,7 @@ async def search_existing(self, meta): async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(distutils.util.strtobool(str(draft))) #0 for send to draft, 1 for live + draft = bool(str2bool(str(draft))) #0 for send to draft, 1 for live if draft: draft_int = 0 else: diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index 97d0e1c8e..ea6f911c1 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -4,7 +4,7 @@ from torf import Torrent import requests from src.console import console -import distutils.util +from str2bool import str2bool from pprint import pprint import os import traceback @@ -54,7 +54,7 @@ async def upload(self, meta): # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - distutils.util.strtobool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 44dbaf85f..9f2757327 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -49,7 +49,7 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index aedb30937..445f4e9d5 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -2,7 +2,7 @@ # import discord import asyncio import requests -import distutils.util +from str2bool import str2bool import os import platform @@ -40,7 +40,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2obool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 06cd4bb0b..5813f469a 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -3,7 +3,7 @@ import re import os from pathlib import Path -import distutils.util +from str2bool import str2bool import json import glob import pickle @@ -98,7 +98,7 @@ async def edit_name(self, meta): fl_name = fl_name.replace('DTS7.1', 'DTS').replace('DTS5.1', 'DTS').replace('DTS2.0', 'DTS').replace('DTS1.0', 'DTS') fl_name = fl_name.replace('Dubbed', '').replace('Dual-Audio', '') fl_name = ' '.join(fl_name.split()) - fl_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) + fl_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name @@ -161,7 +161,7 @@ async def upload(self, meta): if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' @@ -319,7 +319,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_comparison_to_centered(desc, 900) desc = desc.replace('[img]', '[img]').replace('[/img]', '[/img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index f255b2c43..8c7ecd0fe 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -2,7 +2,7 @@ # import discord import asyncio import requests -import distutils.util +from str2bool import str2bool import os import platform @@ -82,7 +82,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 7fab14991..b8eafcb7c 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -192,7 +192,7 @@ async def edit_name(self, meta): hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') hdb_name = hdb_name.replace('REMUX', 'Remux') hdb_name = ' '.join(hdb_name.split()) - hdb_name = re.sub("[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) + hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') return hdb_name @@ -431,7 +431,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: console.print("[green]Rehosting Images...") diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 6b9fa0320..6bcf76964 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -6,8 +6,8 @@ import glob import cli_ui import pickle -import distutils from pathlib import Path +from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo @@ -105,7 +105,7 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(' DV ', ' DoVi ') hdt_name = ' '.join(hdt_name.split()) - hdt_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) + hdt_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name @@ -173,7 +173,7 @@ async def upload(self, meta): data['season'] = 'false' # Anonymous check - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 250e9e851..7c11e0744 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -78,7 +78,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 9b69e74e3..ee13c0338 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import os import re import platform @@ -37,7 +37,7 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(distutils.util.strtobool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 354b1be1a..06253aebe 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -92,7 +92,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 5c3f14309..a457a601a 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -40,7 +40,7 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 5d42b1ba2..d748fd939 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -89,7 +89,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/LT.py b/src/trackers/LT.py index aeb6b3550..b5965c843 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -108,7 +108,7 @@ async def upload(self, meta): #region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index aba9b3b5d..cee1de771 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,8 +8,8 @@ import cli_ui import pickle import re -import distutils.util from pathlib import Path +from str2bool import str2bool from src.trackers.COMMON import COMMON class MTV(): @@ -75,7 +75,7 @@ async def upload(self, meta): mtv_name = await self.edit_name(meta) # anon - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 @@ -221,7 +221,7 @@ async def edit_name(self, meta): if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" mtv_name = ' '.join(mtv_name.split()) - mtv_name = re.sub("[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) + mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 813cb871b..56e01a671 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os from guessit import guessit +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console diff --git a/src/trackers/OE.py b/src/trackers/OE.py index bb69a3e02..332fc6d7f 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 0425b3924..697a2197f 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -2,7 +2,7 @@ # import discord import asyncio import requests -import distutils.util +from str2bool import str2bool import os import platform @@ -82,7 +82,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index b9fcecfa0..71eb1c2a1 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -7,7 +7,7 @@ import traceback import json import glob -import distutils.util +from str2bool import str2bool import cli_ui import pickle from unidecode import unidecode @@ -187,7 +187,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) if self.rehost_images == True: @@ -288,7 +288,7 @@ async def pterimg_upload(self, meta): return image_list async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 7ededcac7..91944df34 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -2,9 +2,9 @@ import requests import asyncio import re -import distutils.util import os from pathlib import Path +from str2bool import str2bool import time import traceback import json @@ -33,7 +33,7 @@ def __init__(self, config): self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() - self.web_source = distutils.util.strtobool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) + self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 67d33c997..0528c10cc 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import tmdbsimple as tmdb import os @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/RF.py b/src/trackers/RF.py index ca94837b9..aa108340d 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -41,7 +41,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 8ef181d6b..28ce55924 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -54,7 +54,7 @@ async def upload(self, meta): # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub("(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 224e89889..71d70ce2f 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -2,7 +2,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -36,7 +36,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 0a72f7eab..fdeed9e88 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -3,7 +3,7 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import json import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index e201bcb83..b22ec6cd7 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -2,8 +2,8 @@ # import discord import asyncio import requests -import distutils.util import os +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -77,7 +77,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 3080ae581..f1f74e15b 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -49,7 +49,7 @@ async def upload(self, session, meta): return else: thr_name = thr_name_manually - torrent_name = re.sub("[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) + torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) if meta.get('is_disc', '') == 'BDMV': diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 491a3bacc..6795d13cc 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -7,8 +7,8 @@ from pathlib import Path import traceback import json -import distutils.util import cli_ui +from str2bool import str2bool from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON @@ -104,7 +104,7 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 'no' else: anon = 'yes' @@ -327,7 +327,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) images = meta['image_list'] if len(images) > 0: diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 405e2c9f1..c77e758a7 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -82,7 +82,7 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 9b3476cc3..5ba904811 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -2,7 +2,7 @@ # import discord import asyncio import requests -import distutils.util +from str2bool import str2bool import os import platform @@ -37,7 +37,7 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2obool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: anon = 0 else: anon = 1 From f1c352eab676db2440af89eb63162ddd91c8189b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 17 Aug 2024 19:27:52 +1000 Subject: [PATCH 064/741] Update docker building for different branches --- .github/workflows/docker-image.yml | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 195a62ed6..665023fcf 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,8 +1,11 @@ -name: Create and publish a Docker image +name: Create and publish Docker images on: push: - branches: ['master'] + branches: + - master + - develop + - qbit-torrent-check env: REGISTRY: ghcr.io @@ -45,13 +48,25 @@ jobs: id: get_short_commit_id run: | echo "SHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + + - name: Set image name based on branch + id: set_image_name + run: | + if [ "${{ github.ref_name }}" == "master" ]; then + IMAGE_TAG="latest" + elif [ "${{ github.ref_name }}" == "develop" ]; then + IMAGE_TAG="develop" + else + IMAGE_TAG="${{ github.ref_name }}" + fi + echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV - name: Build and push Docker image uses: docker/build-push-action@v3 with: context: . push: true - tags: ${{ steps.meta.outputs.tags }}, ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.SHA_SHORT }} + tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.IMAGE_TAG }}, ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max \ No newline at end of file From 4c173a31f6d6b05d5118eb2d25c16cc336aaf62e Mon Sep 17 00:00:00 2001 From: Khakis Date: Sat, 17 Aug 2024 23:37:43 -0500 Subject: [PATCH 065/741] Update prep.py Added the ability to support proper identification of Dubbed/Dual audio for Spanish audio tracks. --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index e53ae336d..e5e333813 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1480,8 +1480,8 @@ def get_audio_v2(self, mi, meta, bdinfo): # Check for original Language Track if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): orig = True - # Catch Chinese / Norwegian variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb'] + # Catch Chinese / Norwegian / Spanish variants + variants = ['zh', 'cn', 'cmn', 'no', 'nb', 'es-419', 'es-ES', 'es'] if audio_language in variants and meta['original_language'] in variants: orig = True # Check for additional, bloated Tracks From 2cf16c706c391e2c0b493afe384dd007a45dd4b0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 18 Aug 2024 19:40:57 +1000 Subject: [PATCH 066/741] Update links --- README.md | 2 +- src/trackers/AITHER.py | 2 +- src/trackers/LCD.py | 2 +- src/trackers/LST.py | 2 +- src/trackers/OE.py | 2 +- src/trackers/OTW.py | 2 +- src/trackers/TDC.py | 2 +- src/trackers/UTP.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b997ac3ea..076b66d09 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ A simple tool to take the work out of uploading. ## **Setup:** - - **REQUIRES AT LEAST PYTHON 3.7 AND PIP3** + - **REQUIRES AT LEAST PYTHON 3.12 AND PIP3** - Needs [mono](https://www.mono-project.com/) on linux systems for BDInfo - Also needs MediaInfo and ffmpeg installed on your system - On Windows systems, ffmpeg must be added to PATH (https://windowsloop.com/install-ffmpeg-windows-10/) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 308383a5f..382ae92d2 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -25,7 +25,7 @@ def __init__(self, config): self.source_flag = 'Aither' self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://aither.cc/forums/topics/1349]Created by L4G's Upload Assistant[/url][/center]" + self.signature = f"\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', 'Will1869', 'x0r', 'YIFY'] diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index a457a601a..8e02f8d77 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -26,7 +26,7 @@ def __init__(self, config): self.search_url = 'https://locadora.cc/api/torrents/filter' self.torrent_url = 'https://locadora.cc/api/torrents/' self.upload_url = 'https://locadora.cc/api/torrents/upload' - self.signature = f"\n[center]Criado usando L4G's Upload Assistant[/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/LST.py b/src/trackers/LST.py index d748fd939..4fe525b09 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -31,7 +31,7 @@ def __init__(self, config): self.source_flag = 'LST.GG' self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' - self.signature = f"\n[center]Created by L4G's Upload Assistant[/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY'] diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 332fc6d7f..a9f6c6d09 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -25,7 +25,7 @@ def __init__(self, config): self.source_flag = 'OE' self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://onlyencodes.cc/pages/1]OnlyEncodes Uploader - Powered by L4G's Upload Assistant[/url][/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 697a2197f..8834f5510 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -31,7 +31,7 @@ def __init__(self, config): self.source_flag = 'OTW' self.upload_url = 'https://oldtoons.world/api/torrents/upload' self.search_url = 'https://oldtoons.world/api/torrents/filter' - self.signature = f"\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index b22ec6cd7..74801157a 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -26,7 +26,7 @@ def __init__(self, config): self.source_flag = 'TDC' self.upload_url = 'https://thedarkcommunity.cc/api/torrents/upload' self.search_url = 'https://thedarkcommunity.cc/api/torrents/filter' - self.signature = "Created by L4G's Upload Assistant" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 5ba904811..496a52fd3 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -24,7 +24,7 @@ def __init__(self, config): self.search_url = 'https://utp.to/api/torrents/filter' self.torrent_url = 'https://utp.to/api/torrents/' self.upload_url = 'https://utp.to/api/torrents/upload' - self.signature = f"\n[center][url=https://utp.to/forums/topics/76]Created by UTOPIA Upload Assistant[/url][/center]" + self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [] pass From b05e8ed9e23f4662b85372a5d21d73d731ad1e92 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 18 Aug 2024 19:43:13 +1000 Subject: [PATCH 067/741] Revert docker tag for master branch --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 665023fcf..006845314 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -53,7 +53,7 @@ jobs: id: set_image_name run: | if [ "${{ github.ref_name }}" == "master" ]; then - IMAGE_TAG="latest" + IMAGE_TAG="master" elif [ "${{ github.ref_name }}" == "develop" ]; then IMAGE_TAG="develop" else From 761ccbf52a57a4077061a29565b24be330ffd67a Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 07:38:11 +1000 Subject: [PATCH 068/741] Squashed commit of the following: commit e24d3979fd326f7cbe20a2c6e6e817210f5e2ce7 Author: Audionut Date: Sun Aug 18 14:45:09 2024 +1000 Filter the pymediainfo output instead commit 83c08827d750ad29efcc6e6f2f3bd4caa8f8798f Author: Audionut Date: Sun Aug 18 14:29:15 2024 +1000 Remove installation specific commit a5c19db6bfbf03e17287de378c4bf556a2c908ea Author: Audionut Date: Sun Aug 18 14:26:18 2024 +1000 Use command line mediainfo commit dd708db9af709cb4e34574fbb73d3c1f7a3e2b10 Author: Audionut Date: Sat Aug 17 21:21:39 2024 +1000 Clean mediainfo of unneeded lines --- src/prep.py | 151 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 139 insertions(+), 12 deletions(-) diff --git a/src/prep.py b/src/prep.py index e5e333813..a912ccbad 100644 --- a/src/prep.py +++ b/src/prep.py @@ -476,26 +476,153 @@ def get_video(self, videoloc, mode): Get and parse mediainfo """ def exportInfo(self, video, isdir, folder_id, base_dir, export_text): - if os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") == False and export_text != False: + def filter_mediainfo(data): + filtered = { + "creatingLibrary": data.get("creatingLibrary"), + "media": { + "@ref": data["media"]["@ref"], + "track": [] + } + } + + for track in data["media"]["track"]: + if track["@type"] == "General": + filtered["media"]["track"].append({ + "@type": track["@type"], + "VideoCount": track.get("VideoCount"), + "AudioCount": track.get("AudioCount"), + "MenuCount": track.get("MenuCount"), + "FileExtension": track.get("FileExtension"), + "Format": track.get("Format"), + "Format_Version": track.get("Format_Version"), + "FileSize": track.get("FileSize"), + "Duration": track.get("Duration"), + "OverallBitRate": track.get("OverallBitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "StreamSize": track.get("StreamSize"), + "IsStreamable": track.get("IsStreamable"), + "File_Created_Date": track.get("File_Created_Date"), + "File_Created_Date_Local": track.get("File_Created_Date_Local"), + "File_Modified_Date": track.get("File_Modified_Date"), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), + "Encoded_Application": track.get("Encoded_Application"), + "Encoded_Library": track.get("Encoded_Library"), + }) + elif track["@type"] == "Video": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Profile": track.get("Format_Profile"), + "Format_Level": track.get("Format_Level"), + "Format_Settings_CABAC": track.get("Format_Settings_CABAC"), + "Format_Settings_RefFrames": track.get("Format_Settings_RefFrames"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "Width": track.get("Width"), + "Height": track.get("Height"), + "Sampled_Width": track.get("Sampled_Width"), + "Sampled_Height": track.get("Sampled_Height"), + "PixelAspectRatio": track.get("PixelAspectRatio"), + "DisplayAspectRatio": track.get("DisplayAspectRatio"), + "FrameRate_Mode": track.get("FrameRate_Mode"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "ColorSpace": track.get("ColorSpace"), + "ChromaSubsampling": track.get("ChromaSubsampling"), + "BitDepth": track.get("BitDepth"), + "ScanType": track.get("ScanType"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "StreamSize": track.get("StreamSize"), + "Encoded_Library": track.get("Encoded_Library"), + "Encoded_Library_Name": track.get("Encoded_Library_Name"), + "Encoded_Library_Version": track.get("Encoded_Library_Version"), + "Encoded_Library_Settings": track.get("Encoded_Library_Settings"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + }) + elif track["@type"] == "Audio": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Settings_Mode": track.get("Format_Settings_Mode"), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate_Mode": track.get("BitRate_Mode"), + "BitRate": track.get("BitRate"), + "Channels": track.get("Channels"), + "ChannelPositions": track.get("ChannelPositions"), + "ChannelLayout": track.get("ChannelLayout"), + "SamplesPerFrame": track.get("SamplesPerFrame"), + "SamplingRate": track.get("SamplingRate"), + "SamplingCount": track.get("SamplingCount"), + "FrameRate": track.get("FrameRate"), + "BitDepth": track.get("BitDepth"), + "Compression_Mode": track.get("Compression_Mode"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "Video_Delay": track.get("Video_Delay"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + }) + elif track["@type"] == "Text": + filtered["media"]["track"].append({ + "@type": track["@type"], + "@typeorder": track.get("@typeorder"), + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "ElementCount": track.get("ElementCount"), + "StreamSize": track.get("StreamSize"), + "Title": track.get("Title"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + }) + elif track["@type"] == "Menu": + filtered["media"]["track"].append({ + "@type": track["@type"], + "extra": track.get("extra"), + }) + + return filtered + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: console.print("[bold yellow]Exporting MediaInfo...") - #MediaInfo to text - if isdir == False: + if not isdir: os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version' : '1'}) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: export.write(media_info) - export.close() with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: export_cleanpath.write(media_info.replace(video, os.path.basename(video))) - export_cleanpath.close() console.print("[bold green]MediaInfo Exported.") - if os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt") == False: - #MediaInfo to JSON - media_info = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version' : '1'}) - export = open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') - export.write(media_info) - export.close() + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): + media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_dict = json.loads(media_info_json) + filtered_info = filter_mediainfo(media_info_dict) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + json.dump(filtered_info, export, indent=4) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) From f32ab7038e57e32d08eaf1b7b4a21389e72896d4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 18:33:21 +1000 Subject: [PATCH 069/741] Allow keep folder --- src/args.py | 1 + src/clients.py | 4 ++-- src/prep.py | 20 ++++++++++++-------- upload.py | 9 ++++++++- 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/args.py b/src/args.py index 413b63b25..b1f55cf3b 100644 --- a/src/args.py +++ b/src/args.py @@ -56,6 +56,7 @@ def parse(self, args, meta): parser.add_argument('-th', '--torrenthash', nargs='*', required=False, help="Torrent Hash to re-use from your client's session directory") parser.add_argument('-nfo', '--nfo', action='store_true', required=False, help="Use .nfo in directory for description") parser.add_argument('-k', '--keywords', nargs='*', required=False, help="Add comma seperated keywords e.g. 'keyword, keyword2, etc'") + parser.add_argument('-kf', '--keep-folder', action='store_true', required=False, help="Keep the folder containing the single file. Works only when supplying a directory as input. For uploads with poor filenames, like some scene.") parser.add_argument('-reg', '--region', nargs='*', required=False, help="Region for discs") parser.add_argument('-a', '--anon', action='store_true', required=False, help="Upload anonymously") parser.add_argument('-st', '--stream', action='store_true', required=False, help="Stream Optimized Upload") diff --git a/src/clients.py b/src/clients.py index c8d5fcba1..14a756d91 100644 --- a/src/clients.py +++ b/src/clients.py @@ -118,8 +118,8 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.log(torrent_path) if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) - # Reuse if disc and basename matches - if meta.get('is_disc', None) != None: + # Reuse if disc and basename matches or --keep-folder was specified + if meta.get('is_disc', None) != None or (meta['keep_folder'] and meta['isdir']): torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True diff --git a/src/prep.py b/src/prep.py index a912ccbad..c53cdaf79 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2060,14 +2060,18 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): def create_torrent(self, meta, path, output_filename, piece_size_max): piece_size_max = int(piece_size_max) if piece_size_max is not None else 0 if meta['isdir'] == True: - os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") - no_sample_globs = [] - for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): - no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) - if len(no_sample_globs) == 1: - path = meta['filelist'][0] + if meta['keep_folder']: + cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') + path = path + else: + os.chdir(path) + globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + no_sample_globs = [] + for file in globs: + if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) + if len(no_sample_globs) == 1: + path = meta['filelist'][0] if meta['is_disc']: include, exclude = "", "" else: diff --git a/upload.py b/upload.py index 4b1845a7d..cd2018809 100644 --- a/upload.py +++ b/upload.py @@ -467,7 +467,14 @@ def get_confirmation(meta): if meta.get('unattended', False) == False: get_missing(meta) ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) == True else "" # \a rings the bell - cli_ui.info_section(cli_ui.yellow, f"Is this correct?{ring_the_bell}") + cli_ui.info(ring_the_bell) + if meta['isdir'] and meta['keep_folder']: + cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") + kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) + if not kf_confirm: + cli_ui.info('Aborting...') + exit() + cli_ui.info_section(cli_ui.yellow, f"Is this correct?") cli_ui.info(f"Name: {meta['name']}") confirm = cli_ui.ask_yes_no("Correct?", default=False) else: From f4d0c48dc27ccaea9cabdb4d7892f50a7eaf2892 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 18:59:47 +1000 Subject: [PATCH 070/741] Return error response from ANT --- src/trackers/ANT.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 1297f45ab..000222e73 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -110,19 +110,23 @@ async def upload(self, meta): headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) - if response.status_code in [200, 201]: - response = response.json() - try: - console.print(response) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - open_torrent.close() + + try: + if not meta['debug']: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) + if response.status_code in [200, 201]: + response_data = response.json() + else: + response_data = { + "error": f"Unexpected status code: {response.status_code}", + "response_content": response.text # or use response.json() if JSON is expected + } + console.print(response_data) + else: + console.print("[cyan]Request Data:") + console.print(data) + finally: + open_torrent.close() async def edit_desc(self, meta): return From 6339846cf6668cd54e54e126bfe6cc3661796990 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 20:16:24 +1000 Subject: [PATCH 071/741] Extra mediainfo keys --- src/prep.py | 333 ++++++++++++++++++++++++++++------------------------ 1 file changed, 178 insertions(+), 155 deletions(-) diff --git a/src/prep.py b/src/prep.py index c53cdaf79..6f22365d8 100644 --- a/src/prep.py +++ b/src/prep.py @@ -469,166 +469,189 @@ def get_video(self, videoloc, mode): - - - """ Get and parse mediainfo """ - def exportInfo(self, video, isdir, folder_id, base_dir, export_text): - def filter_mediainfo(data): - filtered = { - "creatingLibrary": data.get("creatingLibrary"), - "media": { - "@ref": data["media"]["@ref"], - "track": [] - } +def exportInfo(self, video, isdir, folder_id, base_dir, export_text): + def filter_mediainfo(data): + filtered = { + "creatingLibrary": data.get("creatingLibrary"), + "media": { + "@ref": data["media"]["@ref"], + "track": [] } - - for track in data["media"]["track"]: - if track["@type"] == "General": - filtered["media"]["track"].append({ - "@type": track["@type"], - "VideoCount": track.get("VideoCount"), - "AudioCount": track.get("AudioCount"), - "MenuCount": track.get("MenuCount"), - "FileExtension": track.get("FileExtension"), - "Format": track.get("Format"), - "Format_Version": track.get("Format_Version"), - "FileSize": track.get("FileSize"), - "Duration": track.get("Duration"), - "OverallBitRate": track.get("OverallBitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "StreamSize": track.get("StreamSize"), - "IsStreamable": track.get("IsStreamable"), - "File_Created_Date": track.get("File_Created_Date"), - "File_Created_Date_Local": track.get("File_Created_Date_Local"), - "File_Modified_Date": track.get("File_Modified_Date"), - "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), - "Encoded_Application": track.get("Encoded_Application"), - "Encoded_Library": track.get("Encoded_Library"), - }) - elif track["@type"] == "Video": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Profile": track.get("Format_Profile"), - "Format_Level": track.get("Format_Level"), - "Format_Settings_CABAC": track.get("Format_Settings_CABAC"), - "Format_Settings_RefFrames": track.get("Format_Settings_RefFrames"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "Width": track.get("Width"), - "Height": track.get("Height"), - "Sampled_Width": track.get("Sampled_Width"), - "Sampled_Height": track.get("Sampled_Height"), - "PixelAspectRatio": track.get("PixelAspectRatio"), - "DisplayAspectRatio": track.get("DisplayAspectRatio"), - "FrameRate_Mode": track.get("FrameRate_Mode"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "ColorSpace": track.get("ColorSpace"), - "ChromaSubsampling": track.get("ChromaSubsampling"), - "BitDepth": track.get("BitDepth"), - "ScanType": track.get("ScanType"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "StreamSize": track.get("StreamSize"), - "Encoded_Library": track.get("Encoded_Library"), - "Encoded_Library_Name": track.get("Encoded_Library_Name"), - "Encoded_Library_Version": track.get("Encoded_Library_Version"), - "Encoded_Library_Settings": track.get("Encoded_Library_Settings"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - }) - elif track["@type"] == "Audio": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Settings_Mode": track.get("Format_Settings_Mode"), - "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate_Mode": track.get("BitRate_Mode"), - "BitRate": track.get("BitRate"), - "Channels": track.get("Channels"), - "ChannelPositions": track.get("ChannelPositions"), - "ChannelLayout": track.get("ChannelLayout"), - "SamplesPerFrame": track.get("SamplesPerFrame"), - "SamplingRate": track.get("SamplingRate"), - "SamplingCount": track.get("SamplingCount"), - "FrameRate": track.get("FrameRate"), - "BitDepth": track.get("BitDepth"), - "Compression_Mode": track.get("Compression_Mode"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "Video_Delay": track.get("Video_Delay"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - }) - elif track["@type"] == "Text": - filtered["media"]["track"].append({ - "@type": track["@type"], - "@typeorder": track.get("@typeorder"), - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "ElementCount": track.get("ElementCount"), - "StreamSize": track.get("StreamSize"), - "Title": track.get("Title"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - }) - elif track["@type"] == "Menu": - filtered["media"]["track"].append({ - "@type": track["@type"], - "extra": track.get("extra"), - }) - - return filtered - - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: - console.print("[bold yellow]Exporting MediaInfo...") - if not isdir: - os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: - export.write(media_info) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: - export_cleanpath.write(media_info.replace(video, os.path.basename(video))) - console.print("[bold green]MediaInfo Exported.") - - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): - media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) - media_info_dict = json.loads(media_info_json) - filtered_info = filter_mediainfo(media_info_dict) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: - json.dump(filtered_info, export, indent=4) - - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + } - return mi - - + for track in data["media"]["track"]: + if track["@type"] == "General": + filtered["media"]["track"].append({ + "@type": track["@type"], + "UniqueID": track.get("UniqueID"), + "VideoCount": track.get("VideoCount"), + "AudioCount": track.get("AudioCount"), + "TextCount": track.get("TextCount"), + "MenuCount": track.get("MenuCount"), + "FileExtension": track.get("FileExtension"), + "Format": track.get("Format"), + "Format_Version": track.get("Format_Version"), + "FileSize": track.get("FileSize"), + "Duration": track.get("Duration"), + "OverallBitRate": track.get("OverallBitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "StreamSize": track.get("StreamSize"), + "IsStreamable": track.get("IsStreamable"), + "File_Created_Date": track.get("File_Created_Date"), + "File_Created_Date_Local": track.get("File_Created_Date_Local"), + "File_Modified_Date": track.get("File_Modified_Date"), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), + "Encoded_Application": track.get("Encoded_Application"), + "Encoded_Library": track.get("Encoded_Library"), + }) + elif track["@type"] == "Video": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Profile": track.get("Format_Profile"), + "Format_Level": track.get("Format_Level"), + "Format_Tier": track.get("Format_Tier"), + "HDR_Format": track.get("HDR_Format"), + "HDR_Format_Version": track.get("HDR_Format_Version"), + "HDR_Format_Profile": track.get("HDR_Format_Profile"), + "HDR_Format_Level": track.get("HDR_Format_Level"), + "HDR_Format_Settings": track.get("HDR_Format_Settings"), + "HDR_Format_Compression": track.get("HDR_Format_Compression"), + "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "Width": track.get("Width"), + "Height": track.get("Height"), + "Stored_Height": track.get("Stored_Height"), + "Sampled_Width": track.get("Sampled_Width"), + "Sampled_Height": track.get("Sampled_Height"), + "PixelAspectRatio": track.get("PixelAspectRatio"), + "DisplayAspectRatio": track.get("DisplayAspectRatio"), + "FrameRate_Mode": track.get("FrameRate_Mode"), + "FrameRate": track.get("FrameRate"), + "FrameRate_Num": track.get("FrameRate_Num"), + "FrameRate_Den": track.get("FrameRate_Den"), + "FrameCount": track.get("FrameCount"), + "ColorSpace": track.get("ColorSpace"), + "ChromaSubsampling": track.get("ChromaSubsampling"), + "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position"), + "BitDepth": track.get("BitDepth"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "colour_description_present": track.get("colour_description_present"), + "colour_description_present_Source": track.get("colour_description_present_Source"), + "colour_range": track.get("colour_range"), + "colour_range_Source": track.get("colour_range_Source"), + "colour_primaries": track.get("colour_primaries"), + "colour_primaries_Source": track.get("colour_primaries_Source"), + "transfer_characteristics": track.get("transfer_characteristics"), + "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), + "matrix_coefficients": track.get("matrix_coefficients"), + "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), + "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), + "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source"), + "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance"), + "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source"), + "MaxCLL": track.get("MaxCLL"), + "MaxCLL_Source": track.get("MaxCLL_Source"), + "MaxFALL": track.get("MaxFALL"), + "MaxFALL_Source": track.get("MaxFALL_Source"), + }) + elif track["@type"] == "Audio": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny"), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), + "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate_Mode": track.get("BitRate_Mode"), + "BitRate": track.get("BitRate"), + "Channels": track.get("Channels"), + "ChannelPositions": track.get("ChannelPositions"), + "ChannelLayout": track.get("ChannelLayout"), + "SamplesPerFrame": track.get("SamplesPerFrame"), + "SamplingRate": track.get("SamplingRate"), + "SamplingCount": track.get("SamplingCount"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "Compression_Mode": track.get("Compression_Mode"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "Video_Delay": track.get("Video_Delay"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "ServiceKind": track.get("ServiceKind"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "extra": track.get("extra"), + }) + elif track["@type"] == "Text": + filtered["media"]["track"].append({ + "@type": track["@type"], + "@typeorder": track.get("@typeorder"), + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "ElementCount": track.get("ElementCount"), + "StreamSize": track.get("StreamSize"), + "Title": track.get("Title"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + }) + elif track["@type"] == "Menu": + filtered["media"]["track"].append({ + "@type": track["@type"], + "extra": track.get("extra"), + }) + + return filtered + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: + console.print("[bold yellow]Exporting MediaInfo...") + if not isdir: + os.chdir(os.path.dirname(video)) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: + export.write(media_info) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: + export_cleanpath.write(media_info.replace(video, os.path.basename(video))) + console.print("[bold green]MediaInfo Exported.") + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): + media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_dict = json.loads(media_info_json) + filtered_info = filter_mediainfo(media_info_dict) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + json.dump(filtered_info, export, indent=4) + + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + return mi """ From 816b20d1ebba3e90de24c8fe938ee9b4547c5502 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 20:41:27 +1000 Subject: [PATCH 072/741] FIX - incorrect tab --- src/prep.py | 312 ++++++++++++++++++++++++++-------------------------- 1 file changed, 156 insertions(+), 156 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6f22365d8..55ea27224 100644 --- a/src/prep.py +++ b/src/prep.py @@ -472,163 +472,163 @@ def get_video(self, videoloc, mode): """ Get and parse mediainfo """ -def exportInfo(self, video, isdir, folder_id, base_dir, export_text): - def filter_mediainfo(data): - filtered = { - "creatingLibrary": data.get("creatingLibrary"), - "media": { - "@ref": data["media"]["@ref"], - "track": [] + def exportInfo(self, video, isdir, folder_id, base_dir, export_text): + def filter_mediainfo(data): + filtered = { + "creatingLibrary": data.get("creatingLibrary"), + "media": { + "@ref": data["media"]["@ref"], + "track": [] + } } - } - - for track in data["media"]["track"]: - if track["@type"] == "General": - filtered["media"]["track"].append({ - "@type": track["@type"], - "UniqueID": track.get("UniqueID"), - "VideoCount": track.get("VideoCount"), - "AudioCount": track.get("AudioCount"), - "TextCount": track.get("TextCount"), - "MenuCount": track.get("MenuCount"), - "FileExtension": track.get("FileExtension"), - "Format": track.get("Format"), - "Format_Version": track.get("Format_Version"), - "FileSize": track.get("FileSize"), - "Duration": track.get("Duration"), - "OverallBitRate": track.get("OverallBitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "StreamSize": track.get("StreamSize"), - "IsStreamable": track.get("IsStreamable"), - "File_Created_Date": track.get("File_Created_Date"), - "File_Created_Date_Local": track.get("File_Created_Date_Local"), - "File_Modified_Date": track.get("File_Modified_Date"), - "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), - "Encoded_Application": track.get("Encoded_Application"), - "Encoded_Library": track.get("Encoded_Library"), - }) - elif track["@type"] == "Video": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Profile": track.get("Format_Profile"), - "Format_Level": track.get("Format_Level"), - "Format_Tier": track.get("Format_Tier"), - "HDR_Format": track.get("HDR_Format"), - "HDR_Format_Version": track.get("HDR_Format_Version"), - "HDR_Format_Profile": track.get("HDR_Format_Profile"), - "HDR_Format_Level": track.get("HDR_Format_Level"), - "HDR_Format_Settings": track.get("HDR_Format_Settings"), - "HDR_Format_Compression": track.get("HDR_Format_Compression"), - "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "Width": track.get("Width"), - "Height": track.get("Height"), - "Stored_Height": track.get("Stored_Height"), - "Sampled_Width": track.get("Sampled_Width"), - "Sampled_Height": track.get("Sampled_Height"), - "PixelAspectRatio": track.get("PixelAspectRatio"), - "DisplayAspectRatio": track.get("DisplayAspectRatio"), - "FrameRate_Mode": track.get("FrameRate_Mode"), - "FrameRate": track.get("FrameRate"), - "FrameRate_Num": track.get("FrameRate_Num"), - "FrameRate_Den": track.get("FrameRate_Den"), - "FrameCount": track.get("FrameCount"), - "ColorSpace": track.get("ColorSpace"), - "ChromaSubsampling": track.get("ChromaSubsampling"), - "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position"), - "BitDepth": track.get("BitDepth"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "colour_description_present": track.get("colour_description_present"), - "colour_description_present_Source": track.get("colour_description_present_Source"), - "colour_range": track.get("colour_range"), - "colour_range_Source": track.get("colour_range_Source"), - "colour_primaries": track.get("colour_primaries"), - "colour_primaries_Source": track.get("colour_primaries_Source"), - "transfer_characteristics": track.get("transfer_characteristics"), - "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), - "matrix_coefficients": track.get("matrix_coefficients"), - "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), - "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), - "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source"), - "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance"), - "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source"), - "MaxCLL": track.get("MaxCLL"), - "MaxCLL_Source": track.get("MaxCLL_Source"), - "MaxFALL": track.get("MaxFALL"), - "MaxFALL_Source": track.get("MaxFALL_Source"), - }) - elif track["@type"] == "Audio": - filtered["media"]["track"].append({ - "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny"), - "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), - "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate_Mode": track.get("BitRate_Mode"), - "BitRate": track.get("BitRate"), - "Channels": track.get("Channels"), - "ChannelPositions": track.get("ChannelPositions"), - "ChannelLayout": track.get("ChannelLayout"), - "SamplesPerFrame": track.get("SamplesPerFrame"), - "SamplingRate": track.get("SamplingRate"), - "SamplingCount": track.get("SamplingCount"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "Compression_Mode": track.get("Compression_Mode"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "Video_Delay": track.get("Video_Delay"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "ServiceKind": track.get("ServiceKind"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "extra": track.get("extra"), - }) - elif track["@type"] == "Text": - filtered["media"]["track"].append({ - "@type": track["@type"], - "@typeorder": track.get("@typeorder"), - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "ElementCount": track.get("ElementCount"), - "StreamSize": track.get("StreamSize"), - "Title": track.get("Title"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - }) - elif track["@type"] == "Menu": - filtered["media"]["track"].append({ - "@type": track["@type"], - "extra": track.get("extra"), - }) - - return filtered + + for track in data["media"]["track"]: + if track["@type"] == "General": + filtered["media"]["track"].append({ + "@type": track["@type"], + "UniqueID": track.get("UniqueID"), + "VideoCount": track.get("VideoCount"), + "AudioCount": track.get("AudioCount"), + "TextCount": track.get("TextCount"), + "MenuCount": track.get("MenuCount"), + "FileExtension": track.get("FileExtension"), + "Format": track.get("Format"), + "Format_Version": track.get("Format_Version"), + "FileSize": track.get("FileSize"), + "Duration": track.get("Duration"), + "OverallBitRate": track.get("OverallBitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "StreamSize": track.get("StreamSize"), + "IsStreamable": track.get("IsStreamable"), + "File_Created_Date": track.get("File_Created_Date"), + "File_Created_Date_Local": track.get("File_Created_Date_Local"), + "File_Modified_Date": track.get("File_Modified_Date"), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), + "Encoded_Application": track.get("Encoded_Application"), + "Encoded_Library": track.get("Encoded_Library"), + }) + elif track["@type"] == "Video": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Profile": track.get("Format_Profile"), + "Format_Level": track.get("Format_Level"), + "Format_Tier": track.get("Format_Tier"), + "HDR_Format": track.get("HDR_Format"), + "HDR_Format_Version": track.get("HDR_Format_Version"), + "HDR_Format_Profile": track.get("HDR_Format_Profile"), + "HDR_Format_Level": track.get("HDR_Format_Level"), + "HDR_Format_Settings": track.get("HDR_Format_Settings"), + "HDR_Format_Compression": track.get("HDR_Format_Compression"), + "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "Width": track.get("Width"), + "Height": track.get("Height"), + "Stored_Height": track.get("Stored_Height"), + "Sampled_Width": track.get("Sampled_Width"), + "Sampled_Height": track.get("Sampled_Height"), + "PixelAspectRatio": track.get("PixelAspectRatio"), + "DisplayAspectRatio": track.get("DisplayAspectRatio"), + "FrameRate_Mode": track.get("FrameRate_Mode"), + "FrameRate": track.get("FrameRate"), + "FrameRate_Num": track.get("FrameRate_Num"), + "FrameRate_Den": track.get("FrameRate_Den"), + "FrameCount": track.get("FrameCount"), + "ColorSpace": track.get("ColorSpace"), + "ChromaSubsampling": track.get("ChromaSubsampling"), + "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position"), + "BitDepth": track.get("BitDepth"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "colour_description_present": track.get("colour_description_present"), + "colour_description_present_Source": track.get("colour_description_present_Source"), + "colour_range": track.get("colour_range"), + "colour_range_Source": track.get("colour_range_Source"), + "colour_primaries": track.get("colour_primaries"), + "colour_primaries_Source": track.get("colour_primaries_Source"), + "transfer_characteristics": track.get("transfer_characteristics"), + "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), + "matrix_coefficients": track.get("matrix_coefficients"), + "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), + "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), + "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source"), + "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance"), + "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source"), + "MaxCLL": track.get("MaxCLL"), + "MaxCLL_Source": track.get("MaxCLL_Source"), + "MaxFALL": track.get("MaxFALL"), + "MaxFALL_Source": track.get("MaxFALL_Source"), + }) + elif track["@type"] == "Audio": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny"), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), + "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate_Mode": track.get("BitRate_Mode"), + "BitRate": track.get("BitRate"), + "Channels": track.get("Channels"), + "ChannelPositions": track.get("ChannelPositions"), + "ChannelLayout": track.get("ChannelLayout"), + "SamplesPerFrame": track.get("SamplesPerFrame"), + "SamplingRate": track.get("SamplingRate"), + "SamplingCount": track.get("SamplingCount"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "Compression_Mode": track.get("Compression_Mode"), + "Delay": track.get("Delay"), + "Delay_Source": track.get("Delay_Source"), + "Video_Delay": track.get("Video_Delay"), + "StreamSize": track.get("StreamSize"), + "Language": track.get("Language"), + "ServiceKind": track.get("ServiceKind"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + "extra": track.get("extra"), + }) + elif track["@type"] == "Text": + filtered["media"]["track"].append({ + "@type": track["@type"], + "@typeorder": track.get("@typeorder"), + "StreamOrder": track.get("StreamOrder"), + "ID": track.get("ID"), + "UniqueID": track.get("UniqueID"), + "Format": track.get("Format"), + "CodecID": track.get("CodecID"), + "Duration": track.get("Duration"), + "BitRate": track.get("BitRate"), + "FrameRate": track.get("FrameRate"), + "FrameCount": track.get("FrameCount"), + "ElementCount": track.get("ElementCount"), + "StreamSize": track.get("StreamSize"), + "Title": track.get("Title"), + "Language": track.get("Language"), + "Default": track.get("Default"), + "Forced": track.get("Forced"), + }) + elif track["@type"] == "Menu": + filtered["media"]["track"].append({ + "@type": track["@type"], + "extra": track.get("extra"), + }) + + return filtered if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: console.print("[bold yellow]Exporting MediaInfo...") From a4ad5735f94e32b03619c4086f254cb59b064d77 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 19 Aug 2024 20:43:40 +1000 Subject: [PATCH 073/741] Python tabbing --- src/prep.py | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/prep.py b/src/prep.py index 55ea27224..e68bca259 100644 --- a/src/prep.py +++ b/src/prep.py @@ -630,28 +630,28 @@ def filter_mediainfo(data): return filtered - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: - console.print("[bold yellow]Exporting MediaInfo...") - if not isdir: - os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: - export.write(media_info) - with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: - export_cleanpath.write(media_info.replace(video, os.path.basename(video))) - console.print("[bold green]MediaInfo Exported.") - - if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): - media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) - media_info_dict = json.loads(media_info_json) - filtered_info = filter_mediainfo(media_info_dict) - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: - json.dump(filtered_info, export, indent=4) - - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) - - return mi + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: + console.print("[bold yellow]Exporting MediaInfo...") + if not isdir: + os.chdir(os.path.dirname(video)) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: + export.write(media_info) + with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: + export_cleanpath.write(media_info.replace(video, os.path.basename(video))) + console.print("[bold green]MediaInfo Exported.") + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): + media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_dict = json.loads(media_info_json) + filtered_info = filter_mediainfo(media_info_dict) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + json.dump(filtered_info, export, indent=4) + + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + return mi """ From 0a90a2c87b8651aa8705c7092d4d640facabd490 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 15:19:42 +1000 Subject: [PATCH 074/741] ANT - recreate torrent if pieces > 1000 --- src/trackers/ANT.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 000222e73..1bc0abdc6 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -6,6 +6,9 @@ import platform from str2bool import str2bool from pymediainfo import MediaInfo +import math +from torf import Torrent +from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console @@ -70,7 +73,19 @@ async def get_flags(self, meta): async def upload(self, meta): common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) + torrent_filename = "BASE" + torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + total_size = sum(file.size for file in torrent.files) + piece_size = math.ceil(total_size / 999) + piece_size = max(131072, min(piece_size, 16777216)) + if torrent.pieces > 1000: + console.print("[red]Torrent has more than 1000 pieces. Generating a new .torrent") + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + prep.create_torrent(meta, Path(meta['path']), "ANT", piece_size_max=piece_size) + torrent_filename = "ANT" + + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) flags = await self.get_flags(meta) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 From cba8e9f87d62c66980c31f44a0cacc39e546c97b Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 15:31:02 +1000 Subject: [PATCH 075/741] Update dupe feedback --- cogs/commands.py | 2 +- upload.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cogs/commands.py b/cogs/commands.py index a67fe93ca..7a92faa64 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -601,7 +601,7 @@ async def dupe_embed(self, dupes, meta, emojis, channel): else: dupe_text = "\n\n•".join(dupes) dupe_text = f"```•{dupe_text}```" - embed = discord.Embed(title="Are these dupes?", description=dupe_text, color=0xff0000) + embed = discord.Embed(title="Check if these are actually dupes!", description=dupe_text, color=0xff0000) embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") message = await channel.send(embed=embed) await message.add_reaction(emojis['CANCEL']) diff --git a/upload.py b/upload.py index cd2018809..feb7ee52e 100644 --- a/upload.py +++ b/upload.py @@ -491,7 +491,7 @@ def dupe_check(dupes, meta): console.print() dupe_text = "\n".join(dupes) console.print() - cli_ui.info_section(cli_ui.bold, "Are these dupes?") + cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) if meta['unattended']: if meta.get('dupe', False) == False: From 71c195eaa899e0056703ecd7e9f43a762b2a80df Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 18:33:04 +1000 Subject: [PATCH 076/741] Retry different image host if not allowed - MTV for now --- src/prep.py | 285 +++++++++++++++++++------------------------- src/trackers/MTV.py | 201 +++++++++++++++---------------- 2 files changed, 222 insertions(+), 264 deletions(-) diff --git a/src/prep.py b/src/prep.py index e68bca259..a94cadd75 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2196,21 +2196,16 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): """ Upload Screenshots """ - def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict): - # if int(total_screens) != 0 or len(meta.get('image_list', [])) > total_screens: - # if custom_img_list == []: - # console.print('[yellow]Uploading Screens') + def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False): os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - if img_host != self.img_host and meta.get('imghost', None) == None: - img_host = self.img_host - i -= 1 - elif img_host_num == 1 and meta.get('imghost') != img_host: - img_host = meta.get('imghost') - img_host_num = 0 + initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] + img_host = initial_img_host + console.print(f"[cyan]Starting image upload with host: {img_host}") + image_list = [] newhost_list = [] - if custom_img_list != []: + + if custom_img_list: image_glob = custom_img_list existing_images = [] else: @@ -2218,157 +2213,127 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i if 'POSTER.png' in image_glob: image_glob.remove('POSTER.png') existing_images = meta.get('image_list', []) - if len(existing_images) < total_screens: - if img_host == 'imgbox': - nest_asyncio.apply() - console.print("[green]Uploading Screens to Imgbox...") - image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) - if image_list == []: - if img_host_num == 0: - img_host_num = 1 - console.print("[yellow]Imgbox failed, trying next image host") - image_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - else: - with Progress( - TextColumn("[bold green]Uploading Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total = len(image_glob[-screens:])) - timeout=60 - for image in image_glob[-screens:]: - if img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - try: - response = requests.post(url, data = data,timeout=timeout) - response = response.json() - if response.get('success') != True: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] - web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]imgbb failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "freeimage.host": - progress.console.print("[red]Support for freeimage.host has been removed. Please remove from your config") - progress.console.print("continuing in 15 seconds") - time.sleep(15) - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i, img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - try: - response = requests.post(url, data=data, files=files,timeout=timeout) - if response.status_code != 200: - progress.console.print(response) - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - except Exception: - progress.console.print("[yellow]pixhost failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "ptpimg": - payload = { - 'format' : 'json', - 'api_key' : self.config['DEFAULT']['ptpimg_api'] # API key is obtained from inspecting element on the upload page. - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = { 'referer': 'https://ptpimg.me/index.php'} - url = "https://ptpimg.me/upload.php" - # tasks.append(asyncio.ensure_future(self.upload_image(session, url, data, headers, files=None))) - try: - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - web_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - except: - progress.console.print("[yellow]ptpimg failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i, img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - try: - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] - web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]lensdump failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - try: - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] - web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]PT Screens failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - else: - console.print("[bold red]Please choose a supported image host in your config") - exit() + # If the images are already uploaded, skip re-uploading unless in retry mode + if len(existing_images) >= total_screens and not retry_mode: + console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") + return existing_images, total_screens + with Progress( + TextColumn("[bold green]Uploading Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - - if len(newhost_list) >=1: - image_list.extend(newhost_list) - else: - image_dict = {} - image_dict['web_url'] = web_url - image_dict['img_url'] = img_url - image_dict['raw_url'] = raw_url - image_list.append(image_dict) - # cli_ui.info_count(i, total_screens, "Uploaded") - progress.advance(upload_task) - i += 1 - time.sleep(0.5) - if i >= total_screens: - break - return_dict['image_list'] = image_list - return image_list, i - else: - return meta.get('image_list', []), total_screens + for image in image_glob[-screens:]: + try: + timeout = 60 + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]PT Screens failed, trying next image host") + img_host_num += 1 + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + img_host_num += 1 + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + img_host_num += 1 + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = response['data']['url_viewer'] + else: + console.print(f"[red]Unsupported image host: {img_host}") + img_host_num += 1 + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + + # Update progress bar and print the result on the same line + progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') + + # If the upload was successful, add the image details + image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_list.append(image_dict) + progress.advance(upload_task) + i += 1 + + except Exception as e: + console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") + img_host_num += 1 + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + + time.sleep(0.5) + + if i >= total_screens: + break + + return_dict['image_list'] = image_list + console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return image_list, i async def imgbox_upload(self, chdir, image_glob): os.chdir(chdir) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index cee1de771..2a7d3132f 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -39,125 +39,118 @@ async def upload(self, meta): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") - torrent_filename = "BASE" - if not Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent").piece_size <= 8388608: - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + # Initiate the upload with retry logic + await self.upload_with_retry(meta, cookiefile, common) + + async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): + approved_image_hosts = ['ptpimg', 'imgbox'] + + while img_host_index <= len(approved_image_hosts): + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host or current_img_host not in approved_image_hosts: + console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") + img_host_index += 1 + continue + + meta['imghost'] = current_img_host + + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent = Torrent.read(torrent_path) + from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - prep.create_torrent(meta, Path(meta['path']), "MTV", piece_size_max=8) - torrent_filename = "MTV" - # Hash to f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - - # getting category HD Episode, HD Movies, SD Season, HD Season, SD Episode, SD Movies - cat_id = await self.get_cat_id(meta) - # res 480 720 1080 1440 2160 4k 6k Other - resolution_id = await self.get_res_id(meta['resolution']) - # getting source HDTV SDTV TV Rip DVD DVD Rip VHS BluRay BDRip WebDL WebRip Mixed Unknown - source_id = await self.get_source_id(meta) - # get Origin Internal Scene P2P User Mixed Other. P2P will be selected if not scene - origin_id = await self.get_origin_id(meta) - # getting tags - des_tags = await self.get_tags(meta) - # check for approved imghosts - approved_imghosts = ['ptpimg', 'imgbox', 'empornium', 'ibb'] - if not all(any(x in image['raw_url'] for x in approved_imghosts) for image in meta['image_list']): - console.print("[red]Unsupported image host detected, please use one of the approved imagehosts") - return - # getting description - await self.edit_desc(meta) - # getting groups des so things like imdb link, tmdb link etc.. - group_desc = await self.edit_group_desc(meta) - #poster is optional so no longer getting it as its a pain with having to use supported image provider - # poster = await self.get_poster(meta) - - #edit name to match MTV standards - mtv_name = await self.edit_name(meta) - # anon - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: - anon = 0 - else: - anon = 1 + if torrent.piece_size > 8388608: + console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + prep.create_torrent(meta, Path(meta['path']), "MTV", piece_size_max=8) + torrent_filename = "MTV" - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as f: - tfile = f.read() - f.close() + cat_id = await self.get_cat_id(meta) + resolution_id = await self.get_res_id(meta['resolution']) + source_id = await self.get_source_id(meta) + origin_id = await self.get_origin_id(meta) + des_tags = await self.get_tags(meta) - ## todo need to check the torrent and make sure its not more than 8MB + # Screenshot and upload process + prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) + return_dict = {} + prep.upload_screens(meta, screens=meta['screens'], img_host_num=img_host_index, i=0, total_screens=meta['screens'], custom_img_list=[], return_dict=return_dict, retry_mode=True) + + # Update meta['image_list'] with uploaded images + meta['image_list'] = return_dict.get('image_list', []) - # need to pass the name of the file along with the torrent - files = { - 'file_input': (f"{meta['name']}.torrent", tfile) - } + # Ensure images are from approved hosts + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): + console.print("[red]Unsupported image host detected, please use one of the approved imagehosts") + img_host_index += 1 + continue - data = { - # 'image': poster, - 'image': '', - 'title': mtv_name, - 'category': cat_id, - 'Resolution': resolution_id, - 'source': source_id, - 'origin': origin_id, - 'taglist': des_tags, - 'desc': desc, - 'groupDesc': group_desc, - 'ignoredupes': '1', - 'genre_tags': '---', - 'autocomplete_toggle': 'on', - 'fontfont': '-1', - 'fontsize': '-1', - 'auth': await self.get_auth(cookiefile), - 'anonymous': anon, - 'submit': 'true', - } + # Edit description and other details + await self.edit_desc(meta) + group_desc = await self.edit_group_desc(meta) + mtv_name = await self.edit_name(meta) - # cookie = {'sid': self.config['TRACKERS'][self.tracker].get('sid'), 'cid': self.config['TRACKERS'][self.tracker].get('cid')} + anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 - param = { - } + desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc = open(desc_path, 'r').read() - if meta['imdb_id'] not in ("0", "", None): - param['imdbID'] = "tt" + meta['imdb_id'] - if meta['tmdb'] != 0: - param['tmdbID'] = meta['tmdb'] - if meta['tvdb_id'] != 0: - param['thetvdbID'] = meta['tvdb_id'] - if meta['tvmaze_id'] != 0: - param['tvmazeID'] = meta['tvmaze_id'] - # if meta['mal_id'] != 0: - # param['malid'] = meta['mal_id'] + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + with open(torrent_file_path, 'rb') as f: + tfile = f.read() + files = { + 'file_input': (f"{meta['name']}.torrent", tfile) + } - if meta['debug'] == False: - with requests.Session() as session: - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) - response = session.post(url=self.upload_url, data=data, files=files) - try: - if "torrents.php" in response.url: - console.print(response.url) - else: - if "authkey.php" in response.url: - console.print(f"[red]No DL link in response, So unable to download torrent but It may have uploaded, go check") - print(response.content) - console.print(f"[red]Got response code = {response.status_code}") - print(data) + data = { + 'image': '', + 'title': mtv_name, + 'category': cat_id, + 'Resolution': resolution_id, + 'source': source_id, + 'origin': origin_id, + 'taglist': des_tags, + 'desc': desc, + 'groupDesc': group_desc, + 'ignoredupes': '1', + 'genre_tags': '---', + 'autocomplete_toggle': 'on', + 'fontfont': '-1', + 'fontsize': '-1', + 'auth': await self.get_auth(cookiefile), + 'anonymous': anon, + 'submit': 'true', + } + + if not meta['debug']: + with requests.Session() as session: + with open(cookiefile, 'rb') as cf: + session.cookies.update(pickle.load(cf)) + response = session.post(url=self.upload_url, data=data, files=files) + try: + if "torrents.php" in response.url: + console.print(response.url) else: - console.print(f"[red]Upload Failed, Doesnt look like you are logged in") - print(response.content) - print(data) - except: - console.print(f"[red]It may have uploaded, go check") - console.print(data) - print(traceback.print_exc()) - else: - console.print(f"[cyan]Request Data:") - console.print(data) - return + if "authkey.php" in response.url: + console.print(f"[red]No DL link in response, It may have uploaded, check manually.") + else: + console.print(f"[red]Upload Failed. It doesn't look like you are logged in.") + except: + console.print(f"[red]It may have uploaded, check manually.") + print(traceback.print_exc()) + else: + console.print(f"[cyan]Request Data:") + console.print(data) + return + console.print("[red]All image hosts failed. Please check your configuration.") + return async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() From 26c5db778cf19b476047aafdba4c172db624579a Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 18:35:15 +1000 Subject: [PATCH 077/741] ADD traceback import --- src/trackers/MTV.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 2a7d3132f..4d9d81423 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,6 +8,7 @@ import cli_ui import pickle import re +import traceback from pathlib import Path from str2bool import str2bool from src.trackers.COMMON import COMMON From 600ad42016d061646eccf3e91a7acc03394ab6bb Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 18:42:18 +1000 Subject: [PATCH 078/741] FIX - LFE channel layout --- src/prep.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index a94cadd75..dd5855710 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1606,7 +1606,9 @@ def get_audio_v2(self, mi, meta, bdinfo): channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] except: channel_layout = "" - if "LFE" in channel_layout: + + # Ensure channel_layout is not None or an empty string before iterating + if channel_layout and "LFE" in channel_layout: chan = f"{int(channels) - 1}.1" elif channel_layout == "": if int(channels) <= 2: From b3de856094971c088b8218de38d95ba48b81c3ff Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 21:05:02 +1000 Subject: [PATCH 079/741] FIX - MTV only reupload if original host not allowed --- src/trackers/MTV.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 4d9d81423..de14e5dc9 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -53,9 +53,11 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if not current_img_host or current_img_host not in approved_image_hosts: console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") img_host_index += 1 + retry_mode = True # Set retry_mode to True if switching to an approved host continue meta['imghost'] = current_img_host + retry_mode = False # No retry unless switching to another host torrent_filename = "BASE" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" @@ -80,14 +82,25 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): # Screenshot and upload process prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) return_dict = {} - prep.upload_screens(meta, screens=meta['screens'], img_host_num=img_host_index, i=0, total_screens=meta['screens'], custom_img_list=[], return_dict=return_dict, retry_mode=True) - + + # Only enable retry_mode if switching to an approved image host + prep.upload_screens( + meta, + screens=meta['screens'], + img_host_num=img_host_index, + i=0, + total_screens=meta['screens'], + custom_img_list=[], + return_dict=return_dict, + retry_mode=retry_mode + ) + # Update meta['image_list'] with uploaded images meta['image_list'] = return_dict.get('image_list', []) # Ensure images are from approved hosts if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): - console.print("[red]Unsupported image host detected, please use one of the approved imagehosts") + console.print("[red]Unsupported image host detected, please use one of the approved image hosts") img_host_index += 1 continue From daf030667fd29e2f0c94e6215662c8fb471c59cb Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 20 Aug 2024 21:59:41 +1000 Subject: [PATCH 080/741] ANT - better .torrent contraints --- src/trackers/ANT.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 1bc0abdc6..670d63595 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -76,10 +76,28 @@ async def upload(self, meta): torrent_filename = "BASE" torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") total_size = sum(file.size for file in torrent.files) - piece_size = math.ceil(total_size / 999) - piece_size = max(131072, min(piece_size, 16777216)) - if torrent.pieces > 1000: - console.print("[red]Torrent has more than 1000 pieces. Generating a new .torrent") + def calculate_pieces_and_file_size(total_size, piece_size): + num_pieces = math.ceil(total_size / piece_size) + torrent_file_size = 20 + (num_pieces * 20) # Approximate size: 20 bytes header + 20 bytes per piece + return num_pieces, torrent_file_size + + # Start with 4 MiB piece size and adjust if necessary + piece_size = 4194304 # 4 MiB + num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, piece_size) + while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 81920): # 80 KiB = 81920 bytes + if num_pieces < 1000: + piece_size //= 2 + if piece_size < 16384: # 16 KiB is the smallest allowed by the BitTorrent spec + piece_size = 16384 + break + elif num_pieces > 2000 or torrent_file_size > 81920: + piece_size *= 2 + num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, piece_size) + + if not (1000 <= num_pieces <= 2000): + console.print("[red]Unable to generate a .torrent with the required number of pieces and file size constraints") + else: + console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 80 KiB size limit.") from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) prep.create_torrent(meta, Path(meta['path']), "ANT", piece_size_max=piece_size) From 87d0bc2f524fe68ef9ddc6399084e0f6ab689e13 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 21 Aug 2024 00:22:02 +1000 Subject: [PATCH 081/741] Update VapourSynth screenshots Add feedback on indexing Allow without source/encode Using existing screens.txt from tmp directory if present and valid Switch libplacebo to true --- src/vs.py | 108 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 44 deletions(-) diff --git a/src/vs.py b/src/vs.py index 616b1844e..baef7810c 100644 --- a/src/vs.py +++ b/src/vs.py @@ -8,56 +8,75 @@ import os, sys from functools import partial -# Modified version of https://git.concertos.live/AHD/ahd_utils/src/branch/master/screengn.py -def vs_screengn(source, encode, filter_b_frames, num, dir): - # prefer ffms2, fallback to lsmash for m2ts +def CustomFrameInfo(clip, text): + def FrameProps(n, f, clip): + # Modify the frame properties extraction here to avoid the decode issue + info = f"Frame {n} of {clip.num_frames}\nPicture type: {f.props['_PictType']}" + # Adding the frame information as text to the clip + return core.text.Text(clip, info) + + # Apply FrameProps to each frame + return core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip) + +def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir="."): + screens_file = os.path.join(dir, "screens.txt") + + # Check if screens.txt already exists and use it if valid + if os.path.exists(screens_file): + with open(screens_file, "r") as txt: + frames = [int(line.strip()) for line in txt.readlines()] + if len(frames) == num and all(isinstance(f, int) and 0 <= f for f in frames): + print(f"Using existing frame numbers from {screens_file}") + else: + frames = [] + else: + frames = [] + + # Indexing the source using ffms2 or lsmash for m2ts files if str(source).endswith(".m2ts"): + print(f"Indexing {source} with LSMASHSource... This may take a while.") src = core.lsmas.LWLibavSource(source) else: - src = core.ffms2.Source(source, cachefile=f"{os.path.abspath(dir)}{os.sep}ffms2.ffms2") + cachefile = f"{os.path.abspath(dir)}{os.sep}ffms2.ffms2" + if not os.path.exists(cachefile): + print(f"Indexing {source} with ffms2... This may take a while.") + try: + src = core.ffms2.Source(source, cachefile=cachefile) + except vs.Error as e: + print(f"Error during indexing: {str(e)}") + raise + if os.path.exists(cachefile): + print(f"Indexing completed and cached at: {cachefile}") + else: + print("Indexing did not complete as expected.") - # we don't allow encodes in non-mkv containers anyway + # Check if encode is provided if encode: - enc = core.ffms2.Source(encode) + if not os.path.exists(encode): + print(f"Encode file {encode} not found. Skipping encode processing.") + encode = None + else: + enc = core.ffms2.Source(encode) - # since encodes are optional we use source length + # Use source length if encode is not provided num_frames = len(src) - # these values don't really matter, they're just to cut off intros/credits start, end = 1000, num_frames - 10000 - # filter b frames function for frameeval - def filter_ftype(n, f, clip, frame, frames, ftype="B"): - if f.props["_PictType"].decode() == ftype: - frames.append(frame) - return clip - - # generate random frame numbers, sort, and format for ScreenGen - # if filter option is on filter out non-b frames in encode - frames = [] - if filter_b_frames: - with open(os.devnull, "wb") as f: - i = 0 - while len(frames) < num: - frame = random.randint(start, end) - enc_f = enc[frame] - enc_f = enc_f.std.FrameEval(partial(filter_ftype, clip=enc_f, frame=frame, frames=frames), enc_f) - enc_f.output(f) - i += 1 - if i > num * 10: - raise ValueError("screengn: Encode doesn't seem to contain desired picture type frames.") - else: + # Generate random frame numbers for screenshots if not using existing ones + if not frames: for _ in range(num): frames.append(random.randint(start, end)) - frames = sorted(frames) - frames = [f"{x}\n" for x in frames] + frames = sorted(frames) + frames = [f"{x}\n" for x in frames] - # write to file, we might want to re-use these later - with open("screens.txt", "w") as txt: - txt.writelines(frames) + # Write the frame numbers to a file for reuse + with open(screens_file, "w") as txt: + txt.writelines(frames) + print(f"Generated and saved new frame numbers to {screens_file}") - # if an encode exists we have to crop and resize + # If an encode exists and is provided, crop and resize if encode: - if src.width != enc.width and src.height != enc.height: + if src.width != enc.width or src.height != enc.height: ref = zresize(enc, preset=src.height) crop = [(src.width - ref.width) / 2, (src.height - ref.height) / 2] src = src.std.Crop(left=crop[0], right=crop[0], top=crop[1], bottom=crop[1]) @@ -69,19 +88,20 @@ def filter_ftype(n, f, clip, frame, frames, ftype="B"): height = enc.height src = zresize(src, width=width, height=height) - # tonemap HDR + # Apply tonemapping if the source is HDR tonemapped = False if src.get_frame(0).props["_Primaries"] == 9: tonemapped = True - src = DynamicTonemap(src, src_fmt=False, libplacebo=False, adjust_gamma=True) + src = DynamicTonemap(src, src_fmt=False, libplacebo=True, adjust_gamma=True) if encode: - enc = DynamicTonemap(enc, src_fmt=False, libplacebo=False, adjust_gamma=True) + enc = DynamicTonemap(enc, src_fmt=False, libplacebo=True, adjust_gamma=True) + + # Use the custom FrameInfo function + if tonemapped: + src = CustomFrameInfo(src, "Tonemapped") - # add FrameInfo - if tonemapped == True: - src = FrameInfo(src, "Tonemapped") + # Generate screenshots ScreenGen(src, dir, "a") if encode: - if tonemapped == True: - enc = FrameInfo(enc, "Encode (Tonemapped)") + enc = CustomFrameInfo(enc, "Encode (Tonemapped)") ScreenGen(enc, dir, "b") \ No newline at end of file From 8aee52850134ea9b54e0c4451b7d37b7f19e28cd Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 21 Aug 2024 00:52:57 +1000 Subject: [PATCH 082/741] VapourSynth optimize images --- src/vs.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/vs.py b/src/vs.py index baef7810c..6684ca6ea 100644 --- a/src/vs.py +++ b/src/vs.py @@ -6,6 +6,8 @@ from typing import Union, List from pathlib import Path import os, sys +import platform +import multiprocessing from functools import partial def CustomFrameInfo(clip, text): @@ -18,7 +20,26 @@ def FrameProps(n, f, clip): # Apply FrameProps to each frame return core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip) -def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir="."): +def optimize_images(image, config): + import platform # Ensure platform is imported here + if config.get('optimize_images', True): + if os.path.exists(image): + try: + pyver = platform.python_version_tuple() + if int(pyver[0]) == 3 and int(pyver[1]) >= 7: + import oxipng + if os.path.getsize(image) >= 16000000: + oxipng.optimize(image, level=6) + else: + oxipng.optimize(image, level=3) + except Exception as e: + print(f"Image optimization failed: {e}") + return + +def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", config=None): + if config is None: + config = {'optimize_images': True} # Default configuration + screens_file = os.path.join(dir, "screens.txt") # Check if screens.txt already exists and use it if valid @@ -104,4 +125,9 @@ def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir="."): ScreenGen(src, dir, "a") if encode: enc = CustomFrameInfo(enc, "Encode (Tonemapped)") - ScreenGen(enc, dir, "b") \ No newline at end of file + ScreenGen(enc, dir, "b") + + # Optimize images + for i in range(1, num + 1): + image_path = os.path.join(dir, f"{str(i).zfill(2)}a.png") + optimize_images(image_path, config) \ No newline at end of file From 3d17a23fcab2c089abe96a38283f004e2a61eec3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 21 Aug 2024 07:38:28 +1000 Subject: [PATCH 083/741] FIX - Audio parameters missing from mediainfo --- src/prep.py | 160 +++++++++++++++++++++++++++------------------------- 1 file changed, 82 insertions(+), 78 deletions(-) diff --git a/src/prep.py b/src/prep.py index dd5855710..1ebed8b11 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1557,43 +1557,38 @@ def get_romaji(self, tmdb_name, mal): def get_audio_v2(self, mi, meta, bdinfo): extra = dual = "" has_commentary = False - #Get formats - if bdinfo != None: #Disks + + # Get formats + if bdinfo is not None: # Disks format_settings = "" - format = bdinfo['audio'][0]['codec'] + format = bdinfo.get('audio', [{}])[0].get('codec', '') commercial = format - try: - additional = bdinfo['audio'][0]['atmos_why_you_be_like_this'] - except: - additional = "" - #Channels - chan = bdinfo['audio'][0]['channels'] - + additional = bdinfo.get('audio', [{}])[0].get('atmos_why_you_be_like_this', '') - else: + # Channels + chan = bdinfo.get('audio', [{}])[0].get('channels', '') + else: track_num = 2 - for i in range(len(mi['media']['track'])): - t = mi['media']['track'][i] - if t['@type'] != "Audio": - pass - else: - if t.get('Language', "") == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): - track_num = i - break - format = mi['media']['track'][track_num]['Format'] - commercial = mi['media']['track'][track_num].get('Format_Commercial', '') - if mi['media']['track'][track_num].get('Language', '') == "zxx": + tracks = mi.get('media', {}).get('track', []) + + for i, t in enumerate(tracks): + if t.get('@type') != "Audio": + continue + if t.get('Language', '') == meta.get('original_language', '') and "commentary" not in t.get('Title', '').lower(): + track_num = i + break + + track = tracks[track_num] if len(tracks) > track_num else {} + format = track.get('Format', '') + commercial = track.get('Format_Commercial', '') + + if track.get('Language', '') == "zxx": meta['silent'] = True - try: - additional = mi['media']['track'][track_num]['Format_AdditionalFeatures'] - # format = f"{format} {additional}" - except: - additional = "" - try: - format_settings = mi['media']['track'][track_num]['Format_Settings'] - if format_settings in ['Explicit']: - format_settings = "" - except: + + additional = track.get('Format_AdditionalFeatures', '') + + format_settings = track.get('Format_Settings', '') + if format_settings in ['Explicit']: format_settings = "" #Channels channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) @@ -1618,46 +1613,49 @@ def get_audio_v2(self, mi, meta, bdinfo): else: chan = f"{channels}.0" - if meta['original_language'] != 'en': + if meta.get('original_language', '') != 'en': eng, orig = False, False try: - for t in mi['media']['track']: - if t['@type'] != "Audio": - pass - else: - audio_language = t.get('Language', '') - # Check for English Language Track - if audio_language == "en" and "commentary" not in t.get('Title', '').lower(): - eng = True - # Check for original Language Track - if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): - orig = True - # Catch Chinese / Norwegian / Spanish variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb', 'es-419', 'es-ES', 'es'] - if audio_language in variants and meta['original_language'] in variants: - orig = True - # Check for additional, bloated Tracks - if audio_language != meta['original_language'] and audio_language != "en": - if meta['original_language'] not in variants and audio_language not in variants: - audio_language = "und" if audio_language == "" else audio_language - console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") - time.sleep(5) - if eng and orig == True: + for t in mi.get('media', {}).get('track', []): + if t.get('@type') != "Audio": + continue + + audio_language = t.get('Language', '') + + # Check for English Language Track + if audio_language == "en" and "commentary" not in t.get('Title', '').lower(): + eng = True + + # Check for original Language Track + if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): + orig = True + + # Catch Chinese / Norwegian / Spanish variants + variants = ['zh', 'cn', 'cmn', 'no', 'nb', 'es-419', 'es-ES', 'es'] + if audio_language in variants and meta['original_language'] in variants: + orig = True + + # Check for additional, bloated Tracks + if audio_language != meta['original_language'] and audio_language != "en": + if meta['original_language'] not in variants and audio_language not in variants: + audio_language = "und" if audio_language == "" else audio_language + console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") + time.sleep(5) + + if eng and orig: dual = "Dual-Audio" - elif eng == True and orig == False and meta['original_language'] not in ['zxx', 'xx', None] and meta.get('no_dub', False) == False: + elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): dual = "Dubbed" - except Exception: - console.print(traceback.print_exc()) - pass - - - for t in mi['media']['track']: - if t['@type'] != "Audio": + except Exception as e: + console.print(traceback.format_exc()) pass - else: - if "commentary" in t.get('Title', '').lower(): - has_commentary = True - + + for t in mi.get('media', {}).get('track', []): + if t.get('@type') != "Audio": + continue + + if "commentary" in t.get('Title', '').lower(): + has_commentary = True #Convert commercial name to naming conventions audio = { @@ -1710,34 +1708,40 @@ def get_audio_v2(self, mi, meta, bdinfo): search_format = True - for key, value in commercial_names.items(): - if key in commercial: - codec = value - search_format = False - if "Atmos" in commercial or format_extra.get(additional, "") == " Atmos": - extra = " Atmos" + # Ensure commercial and additional are not None before iterating + if commercial: + for key, value in commercial_names.items(): + if key in commercial: + codec = value + search_format = False + if "Atmos" in commercial or format_extra.get(additional, "") == " Atmos": + extra = " Atmos" + if search_format: codec = audio.get(format, "") + audio_extra.get(additional, "") extra = format_extra.get(additional, "") + + # Ensure format_settings is not None before looking it up format_settings = format_settings_extra.get(format_settings, "") if format_settings == "EX" and chan == "5.1": format_settings = "EX" else: format_settings = "" + # Ensure codec is not left empty if codec == "": codec = format - + + # Ensure additional and channels are not None before using them if format.startswith("DTS"): - if additional.endswith("X"): + if additional and additional.endswith("X"): codec = "DTS:X" chan = f"{int(channels) - 1}.1" if format == "MPEG Audio": codec = mi['media']['track'][2].get('CodecID_Hint', '') - - - audio = f"{dual} {codec} {format_settings} {chan}{extra}" + # Ensure audio is constructed properly even with potential None values + audio = f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" audio = ' '.join(audio.split()) return audio, chan, has_commentary From 50f6cdd7010135bf14d16070d01fac5117c06c29 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 05:20:49 +1000 Subject: [PATCH 084/741] Allow piece size > 16 MiB ANT has realistic piece count + .torrent size requirements for uploaded .torrents, this code change respects those requirements. Existing torrent hashes are not affected by this change, but all newly created torrents and torrents needed for ANT will use the following constraints: Piece count between 1000 & 2000 pieces with a .torrent size < 100 KiB. Perhaps in the future I might add an argument allowing override. --- src/prep.py | 152 ++++++++++++++++++++++++-------------------- src/trackers/ANT.py | 33 ++++------ src/trackers/MTV.py | 82 ++++++++---------------- upload.py | 7 +- 4 files changed, 126 insertions(+), 148 deletions(-) diff --git a/src/prep.py b/src/prep.py index 1ebed8b11..e991ba21e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -34,6 +34,7 @@ import tmdbsimple as tmdb from datetime import datetime, date from difflib import SequenceMatcher + import torf from torf import Torrent import base64 import time @@ -2079,19 +2080,72 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): return edition, repack + """ + Create Torrent + """ + class CustomTorrent(torf.Torrent): + # Ensure the piece size is within the desired limits + torf.Torrent.piece_size_min = 16384 # 16 KiB + torf.Torrent.piece_size_max = 67108864 # 64 MiB + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Calculate and set the piece size + total_size = self._calculate_total_size() + piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max) + self.piece_size = piece_size + + @property + def piece_size(self): + return self._piece_size + + @piece_size.setter + def piece_size(self, value): + if value is None: + total_size = self._calculate_total_size() + value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max) + self._piece_size = value + self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set + + @classmethod + def calculate_piece_size(cls, total_size, min_size, max_size): + our_min_size = 16384 + our_max_size = 67108864 + # Start with a piece size of 4 MiB + piece_size = 4194304 # 4 MiB in bytes + num_pieces = math.ceil(total_size / piece_size) + torrent_file_size = 20 + (num_pieces * 20) # Approximate .torrent size: 20 bytes header + 20 bytes per piece + + # Adjust the piece size to fit within the constraints + while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit + if num_pieces < 1000: + piece_size //= 2 + if piece_size < our_min_size: + piece_size = our_min_size + break + elif num_pieces > 2000 or torrent_file_size > 102400: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break + num_pieces = math.ceil(total_size / piece_size) + torrent_file_size = 20 + (num_pieces * 20) + return piece_size + def _calculate_total_size(self): + return sum(file.size for file in self.files) + def validate_piece_size(self): + if not hasattr(self, '_piece_size') or self._piece_size is None: + self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max) + self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set - """ - Create Torrent - """ - def create_torrent(self, meta, path, output_filename, piece_size_max): - piece_size_max = int(piece_size_max) if piece_size_max is not None else 0 - if meta['isdir'] == True: + def create_torrent(self, meta, path, output_filename): + # Handle directories and file inclusion logic + if meta['isdir']: if meta['keep_folder']: cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') - path = path else: os.chdir(path) globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") @@ -2106,66 +2160,30 @@ def create_torrent(self, meta, path, output_filename, piece_size_max): else: exclude = ["*.*", "*sample.mkv", "!sample*.*"] include = ["*.mkv", "*.mp4", "*.ts"] - torrent = Torrent(path, - trackers = ["https://fake.tracker"], - source = "L4G", - private = True, - exclude_globs = exclude or [], - include_globs = include or [], - creation_date = datetime.now(), - comment = "Created by L4G's Upload Assistant", - created_by = "L4G's Upload Assistant") - file_size = torrent.size - if file_size < 268435456: # 256 MiB File / 256 KiB Piece Size - piece_size = 18 - piece_size_text = "256KiB" - elif file_size < 1073741824: # 1 GiB File/512 KiB Piece Size - piece_size = 19 - piece_size_text = "512KiB" - elif file_size < 2147483648 or piece_size_max == 1: # 2 GiB File/1 MiB Piece Size - piece_size = 20 - piece_size_text = "1MiB" - elif file_size < 4294967296 or piece_size_max == 2: # 4 GiB File/2 MiB Piece Size - piece_size = 21 - piece_size_text = "2MiB" - elif file_size < 8589934592 or piece_size_max == 4: # 8 GiB File/4 MiB Piece Size - piece_size = 22 - piece_size_text = "4MiB" - elif file_size < 17179869184 or piece_size_max == 8: # 16 GiB File/8 MiB Piece Size - piece_size = 23 - piece_size_text = "8MiB" - else: # 16MiB Piece Size - piece_size = 24 - piece_size_text = "16MiB" - console.print(f"[bold yellow]Creating .torrent with a piece size of {piece_size_text}... (No valid --torrenthash was provided to reuse)") - if meta.get('torrent_creation') != None: - torrent_creation = meta['torrent_creation'] - else: - torrent_creation = self.config['DEFAULT'].get('torrent_creation', 'torf') - if torrent_creation == 'torrenttools': - args = ['torrenttools', 'create', '-a', 'https://fake.tracker', '--private', 'on', '--piece-size', str(2**piece_size), '--created-by', "L4G's Upload Assistant", '--no-cross-seed','-o', f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent"] - if not meta['is_disc']: - args.extend(['--include', '^.*\.(mkv|mp4|ts)$']) - args.append(path) - err = subprocess.call(args) - if err != 0: - args[3] = "OMITTED" - console.print(f"[bold red]Process execution {args} returned with error code {err}.") - elif torrent_creation == 'mktorrent': - args = ['mktorrent', '-a', 'https://fake.tracker', '-p', f'-l {piece_size}', '-o', f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", path] - err = subprocess.call(args) - if err != 0: - args[2] = "OMITTED" - console.print(f"[bold red]Process execution {args} returned with error code {err}.") - else: - torrent.piece_size = 2**piece_size - torrent.piece_size_max = 16777216 - torrent.generate(callback=self.torf_cb, interval=5) - torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) - torrent.verify_filesize(path) + + # Create and write the new torrent using the CustomTorrent class + torrent = self.CustomTorrent( + path=path, + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude or [], + include_globs=include or [], + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Ensure piece size is validated before writing + torrent.validate_piece_size() + + # Generate and write the new torrent + torrent.generate(callback=self.torf_cb, interval=5) + torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) + torrent.verify_filesize(path) + console.print("[bold green].torrent created", end="\r") return torrent - def torf_cb(self, torrent, filepath, pieces_done, pieces_total): # print(f'{pieces_done/pieces_total*100:3.0f} % done') @@ -2197,8 +2215,6 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) - - """ Upload Screenshots """ @@ -3193,4 +3209,4 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID): if int(tvdbID) == 0: if show.get('externals', {}).get('tvdb', '0') != None: tvdbID = show.get('externals', {}).get('tvdb', '0') - return tvmazeID, imdbID, tvdbID + return tvmazeID, imdbID, tvdbID \ No newline at end of file diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 670d63595..cd1cd7690 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -9,11 +9,9 @@ import math from torf import Torrent from pathlib import Path - from src.trackers.COMMON import COMMON from src.console import console - class ANT(): """ Edit for Tracker: @@ -76,32 +74,27 @@ async def upload(self, meta): torrent_filename = "BASE" torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") total_size = sum(file.size for file in torrent.files) + + # Calculate the number of pieces and the torrent file size based on the current piece size def calculate_pieces_and_file_size(total_size, piece_size): num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) # Approximate size: 20 bytes header + 20 bytes per piece return num_pieces, torrent_file_size - # Start with 4 MiB piece size and adjust if necessary - piece_size = 4194304 # 4 MiB - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, piece_size) - while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 81920): # 80 KiB = 81920 bytes - if num_pieces < 1000: - piece_size //= 2 - if piece_size < 16384: # 16 KiB is the smallest allowed by the BitTorrent spec - piece_size = 16384 - break - elif num_pieces > 2000 or torrent_file_size > 81920: - piece_size *= 2 - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, piece_size) + # Check if the existing torrent fits within the constraints + num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, torrent.piece_size) - if not (1000 <= num_pieces <= 2000): - console.print("[red]Unable to generate a .torrent with the required number of pieces and file size constraints") - else: - console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 80 KiB size limit.") + # If the torrent doesn't meet the constraints, regenerate it + if not (1000 <= num_pieces <= 2000) or torrent_file_size > 102400: + console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - prep.create_torrent(meta, Path(meta['path']), "ANT", piece_size_max=piece_size) + + # Call create_torrent with the default piece size calculation + prep.create_torrent(meta, Path(meta['path']), "ANT") torrent_filename = "ANT" + else: + console.print("[green]Existing torrent meets the constraints.") await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) flags = await self.get_flags(meta) @@ -190,4 +183,4 @@ async def search_existing(self, meta): console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index de14e5dc9..f5d3069a2 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -12,6 +12,7 @@ from pathlib import Path from str2bool import str2bool from src.trackers.COMMON import COMMON +from datetime import datetime, date class MTV(): """ @@ -66,9 +67,32 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if torrent.piece_size > 8388608: + # Check if the piece size exceeds 8 MiB and regenerate the torrent if needed + if torrent.piece_size > 8388608: # 8 MiB in bytes console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - prep.create_torrent(meta, Path(meta['path']), "MTV", piece_size_max=8) + + # Create a new torrent with piece size explicitly set to 8 MiB + new_torrent = prep.CustomTorrent( + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], + include_globs=["*.mkv", "*.mp4", "*.ts"], + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 8388608 # 8 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + torrent_filename = "MTV" await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) @@ -207,7 +231,6 @@ async def edit_group_desc(self, meta): return description - async def edit_name(self, meta): mtv_name = meta['uuid'] # Try to use original filename if possible @@ -232,47 +255,6 @@ async def edit_name(self, meta): mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name - - # Not needed as its optional - # async def get_poster(self, meta): - # if 'poster_image' in meta: - # return meta['poster_image'] - # else: - # if meta['poster'] is not None: - # poster = meta['poster'] - # else: - # if 'cover' in meta['imdb_info'] and meta['imdb_info']['cover'] is not None: - # poster = meta['imdb_info']['cover'] - # else: - # console.print(f'[red]No poster can be found for this EXITING!!') - # return - # with requests.get(url=poster, stream=True) as r: - # with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['clean_name']}-poster.jpg", - # 'wb') as f: - # shutil.copyfileobj(r.raw, f) - # - # url = "https://api.imgbb.com/1/upload" - # data = { - # 'key': self.config['DEFAULT']['imgbb_api'], - # 'image': base64.b64encode(open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['clean_name']}-poster.jpg", "rb").read()).decode('utf8') - # } - # try: - # console.print("[yellow]uploading poster to imgbb") - # response = requests.post(url, data=data) - # response = response.json() - # if response.get('success') != True: - # console.print(response, 'red') - # img_url = response['data'].get('medium', response['data']['image'])['url'] - # th_url = response['data']['thumb']['url'] - # web_url = response['data']['url_viewer'] - # raw_url = response['data']['image']['url'] - # meta['poster_image'] = raw_url - # console.print(f'[green]{raw_url} ') - # except Exception: - # console.print("[yellow]imgbb failed to upload cover") - # - # return raw_url - async def get_res_id(self, resolution): resolution_id = { '8640p':'0', @@ -307,7 +289,6 @@ async def get_cat_id(self, meta): else: return 3 - async def get_source_id(self, meta): if meta['is_disc'] == 'DVD': return '1' @@ -331,7 +312,6 @@ async def get_source_id(self, meta): }.get(meta['type'], '0') return type_id - async def get_origin_id(self, meta): if meta['personalrelease']: return '4' @@ -340,8 +320,6 @@ async def get_origin_id(self, meta): # returning P2P else: return '3' - - async def get_tags(self, meta): tags = [] # Genres @@ -361,8 +339,6 @@ async def get_tags(self, meta): for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): tags.append(each) - - # series tags if meta['category'] == "TV": if meta.get('tv_pack', 0) == 0: @@ -385,8 +361,6 @@ async def get_tags(self, meta): else: tags.append('hd.movie') - - # Audio tags audio_tag = "" for each in ['dd', 'ddp', 'aac', 'truehd', 'mp3', 'mp2', 'dts', 'dts.hd', 'dts.x']: @@ -422,8 +396,6 @@ async def get_tags(self, meta): tags = ' '.join(tags) return tags - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") if not os.path.exists(cookiefile): @@ -569,4 +541,4 @@ async def search_existing(self, meta): print(traceback.print_exc()) await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/upload.py b/upload.py index feb7ee52e..c2384a842 100644 --- a/upload.py +++ b/upload.py @@ -158,7 +158,6 @@ async def do_the_thing(base_dir): console.print(f"[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") exit() - base_meta = {k: v for k, v in meta.items()} for path in queue: meta = {k: v for k, v in base_meta.items()} @@ -207,11 +206,11 @@ async def do_the_thing(base_dir): if reuse_torrent != None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) if meta['nohash'] == False and reuse_torrent == None: - prep.create_torrent(meta, Path(meta['path']), "BASE", meta.get('piece_size_max', 0)) + prep.create_torrent(meta, Path(meta['path']), "BASE") if meta['nohash']: meta['client'] = "none" elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) == True and meta['nohash'] == False: - prep.create_torrent(meta, Path(meta['path']), "BASE", meta.get('piece_size_max', 0)) + prep.create_torrent(meta, Path(meta['path']), "BASE") if int(meta.get('randomized', 0)) >= 1: prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) @@ -244,8 +243,6 @@ async def do_the_thing(base_dir): if meta.get('manual', False): trackers.insert(0, "MANUAL") - - #################################### ####### Upload to Trackers ####### #################################### From 666cc0770f56bb430188382615769a8a1dd4dbe1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 11:01:33 +1000 Subject: [PATCH 085/741] FIX - image reuploading Remove unnecessary console messages --- src/prep.py | 22 ++-- src/trackers/MTV.py | 280 ++++++++++++++++++++++++-------------------- 2 files changed, 161 insertions(+), 141 deletions(-) diff --git a/src/prep.py b/src/prep.py index e991ba21e..eed05710f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2221,9 +2221,8 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False): os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - img_host = initial_img_host - console.print(f"[cyan]Starting image upload with host: {img_host}") - + img_host = meta['imghost'] # Use the correctly updated image host from meta + image_list = [] newhost_list = [] @@ -2236,11 +2235,12 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i image_glob.remove('POSTER.png') existing_images = meta.get('image_list', []) - # If the images are already uploaded, skip re-uploading unless in retry mode - if len(existing_images) >= total_screens and not retry_mode: + # Only skip uploading if retry_mode is False and the hosts match + if len(existing_images) >= total_screens and not retry_mode and img_host == initial_img_host: console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens + # Proceed with uploading images with Progress( TextColumn("[bold green]Uploading Screens..."), BarColumn(), @@ -2290,7 +2290,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i if response.get('status_code') != 200: console.print("[yellow]PT Screens failed, trying next image host") img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) img_url = response['data']['image']['url'] raw_url = img_url web_url = img_url @@ -2307,7 +2307,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i if response.status_code != 200: console.print("[yellow]Pixhost failed, trying next image host") img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) response = response.json() raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') img_url = response['th_url'] @@ -2325,19 +2325,19 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i if response.get('status_code') != 200: console.print("[yellow]Lensdump failed, trying next image host") img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) img_url = response['data']['image']['url'] raw_url = img_url web_url = response['data']['url_viewer'] else: console.print(f"[red]Unsupported image host: {img_host}") img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) # Update progress bar and print the result on the same line progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') - # If the upload was successful, add the image details + # Add the image details to the list image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} image_list.append(image_dict) progress.advance(upload_task) @@ -2346,7 +2346,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i except Exception as e: console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode) + return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) time.sleep(0.5) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index f5d3069a2..27c62f762 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -43,152 +43,172 @@ async def upload(self, meta): # Initiate the upload with retry logic await self.upload_with_retry(meta, cookiefile, common) - + async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] - + while img_host_index <= len(approved_image_hosts): - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + # Call handle_image_upload and pass the updated meta with the current image host index + image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) - if not current_img_host or current_img_host not in approved_image_hosts: - console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") + # If retry_mode is True, switch to the next host + if retry_mode: + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") img_host_index += 1 - retry_mode = True # Set retry_mode to True if switching to an approved host continue - meta['imghost'] = current_img_host - retry_mode = False # No retry unless switching to another host + # If we successfully uploaded images, break out of the loop + if image_list is not None: + break - torrent_filename = "BASE" - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" - torrent = Torrent.read(torrent_path) + if image_list is None: + console.print("[red]All image hosts failed. Please check your configuration.") + return + + # Proceed with the rest of the upload process + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: # 8 MiB in bytes + console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + + # Create a new torrent with piece size explicitly set to 8 MiB from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - - # Check if the piece size exceeds 8 MiB and regenerate the torrent if needed - if torrent.piece_size > 8388608: # 8 MiB in bytes - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - - # Create a new torrent with piece size explicitly set to 8 MiB - new_torrent = prep.CustomTorrent( - path=Path(meta['path']), - trackers=["https://fake.tracker"], - source="L4G", - private=True, - exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], - include_globs=["*.mkv", "*.mp4", "*.ts"], - creation_date=datetime.now(), - comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" - ) - - # Explicitly set the piece size and update metainfo - new_torrent.piece_size = 8388608 # 8 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set - - # Validate and write the new torrent - new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) - new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) - - torrent_filename = "MTV" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - - cat_id = await self.get_cat_id(meta) - resolution_id = await self.get_res_id(meta['resolution']) - source_id = await self.get_source_id(meta) - origin_id = await self.get_origin_id(meta) - des_tags = await self.get_tags(meta) - - # Screenshot and upload process - prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) - return_dict = {} - - # Only enable retry_mode if switching to an approved image host - prep.upload_screens( - meta, - screens=meta['screens'], - img_host_num=img_host_index, - i=0, - total_screens=meta['screens'], - custom_img_list=[], - return_dict=return_dict, - retry_mode=retry_mode + new_torrent = prep.CustomTorrent( + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], + include_globs=["*.mkv", "*.mp4", "*.ts"], + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 8388608 # 8 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + + torrent_filename = "MTV" + + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + + cat_id = await self.get_cat_id(meta) + resolution_id = await self.get_res_id(meta['resolution']) + source_id = await self.get_source_id(meta) + origin_id = await self.get_origin_id(meta) + des_tags = await self.get_tags(meta) + + # Edit description and other details + await self.edit_desc(meta) + group_desc = await self.edit_group_desc(meta) + mtv_name = await self.edit_name(meta) + + anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 + + desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc = open(desc_path, 'r').read() + + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + with open(torrent_file_path, 'rb') as f: + tfile = f.read() + + files = { + 'file_input': (f"{meta['name']}.torrent", tfile) + } - # Update meta['image_list'] with uploaded images - meta['image_list'] = return_dict.get('image_list', []) - - # Ensure images are from approved hosts - if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): - console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - img_host_index += 1 - continue - - # Edit description and other details - await self.edit_desc(meta) - group_desc = await self.edit_group_desc(meta) - mtv_name = await self.edit_name(meta) - - anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 - - desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - desc = open(desc_path, 'r').read() - - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - with open(torrent_file_path, 'rb') as f: - tfile = f.read() - - files = { - 'file_input': (f"{meta['name']}.torrent", tfile) - } - - data = { - 'image': '', - 'title': mtv_name, - 'category': cat_id, - 'Resolution': resolution_id, - 'source': source_id, - 'origin': origin_id, - 'taglist': des_tags, - 'desc': desc, - 'groupDesc': group_desc, - 'ignoredupes': '1', - 'genre_tags': '---', - 'autocomplete_toggle': 'on', - 'fontfont': '-1', - 'fontsize': '-1', - 'auth': await self.get_auth(cookiefile), - 'anonymous': anon, - 'submit': 'true', - } + data = { + 'image': '', + 'title': mtv_name, + 'category': cat_id, + 'Resolution': resolution_id, + 'source': source_id, + 'origin': origin_id, + 'taglist': des_tags, + 'desc': desc, + 'groupDesc': group_desc, + 'ignoredupes': '1', + 'genre_tags': '---', + 'autocomplete_toggle': 'on', + 'fontfont': '-1', + 'fontsize': '-1', + 'auth': await self.get_auth(cookiefile), + 'anonymous': anon, + 'submit': 'true', + } - if not meta['debug']: - with requests.Session() as session: - with open(cookiefile, 'rb') as cf: - session.cookies.update(pickle.load(cf)) - response = session.post(url=self.upload_url, data=data, files=files) - try: - if "torrents.php" in response.url: - console.print(response.url) + if not meta['debug']: + with requests.Session() as session: + with open(cookiefile, 'rb') as cf: + session.cookies.update(pickle.load(cf)) + response = session.post(url=self.upload_url, data=data, files=files) + try: + if "torrents.php" in response.url: + console.print(response.url) + else: + if "authkey.php" in response.url: + console.print(f"[red]No DL link in response, It may have uploaded, check manually.") else: - if "authkey.php" in response.url: - console.print(f"[red]No DL link in response, It may have uploaded, check manually.") - else: - console.print(f"[red]Upload Failed. It doesn't look like you are logged in.") - except: - console.print(f"[red]It may have uploaded, check manually.") - print(traceback.print_exc()) - else: - console.print(f"[cyan]Request Data:") - console.print(data) - return - - console.print("[red]All image hosts failed. Please check your configuration.") + console.print(f"[red]Upload Failed. It doesn't look like you are logged in.") + except: + console.print(f"[red]It may have uploaded, check manually.") + print(traceback.print_exc()) + else: + console.print(f"[cyan]Request Data:") + console.print(data) return + + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None): + if approved_image_hosts is None: + approved_image_hosts = ['ptpimg', 'imgbox'] + + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host or current_img_host not in approved_image_hosts: + console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") + retry_mode = True # Ensure retry_mode is set to True when switching hosts + meta['imghost'] = approved_image_hosts[0] # Switch to the first approved host + else: + meta['imghost'] = current_img_host + retry_mode = False # Start with retry_mode False unless we know we need to switch + + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + # Screenshot and upload process + prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) + return_dict = {} + + # Call upload_screens with the appropriate retry_mode + prep.upload_screens( + meta, + screens=meta['screens'], + img_host_num=img_host_index, + i=0, + total_screens=meta['screens'], + custom_img_list=[], # This remains to handle any custom logic in the original function + return_dict=return_dict, + retry_mode=retry_mode # Honor the retry_mode flag passed in + ) + + # Update meta['image_list'] with uploaded images + meta['image_list'] = return_dict.get('image_list', []) + + # Ensure images are from approved hosts + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): + console.print("[red]Unsupported image host detected, please use one of the approved image hosts") + return meta['image_list'], True # Trigger retry_mode if switching hosts + + return meta['image_list'], False # No need to retry, successful upload async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() From 0d16638f60fb4a9fa00f8fb274aa17dc3e051713 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 12:27:09 +1000 Subject: [PATCH 086/741] PTP - piece size --- src/trackers/PTP.py | 67 +++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 91944df34..98b2761ec 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -13,15 +13,11 @@ import platform import pickle from pymediainfo import MediaInfo - - from src.trackers.COMMON import COMMON from src.bbcode import BBCODE from src.exceptions import * from src.console import console - - class PTP(): def __init__(self, config): @@ -83,9 +79,6 @@ def __init__(self, config): ("Vietnamese", "vie", "vi") : 25, } - - - async def get_ptp_id_imdb(self, search_term, search_file_folder): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) @@ -189,8 +182,6 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): console.print(f"[bold green]Successfully grabbed description from PTP") return desc - - async def get_group_by_imdb(self, imdb): params = { 'imdb' : imdb, @@ -217,7 +208,6 @@ async def get_group_by_imdb(self, imdb): console.print("[red]Please check that the site is online and your ApiUser/ApiKey values are correct") return None - async def get_torrent_info(self, imdb, meta): params = { 'imdb' : imdb, @@ -305,7 +295,6 @@ async def search_existing(self, groupID, meta): console.print("[red]An error has occured trying to find existing releases") return existing - async def ptpimg_url_rehost(self, image_url): payload = { 'format' : 'json', @@ -327,7 +316,6 @@ async def ptpimg_url_rehost(self, image_url): # img_url = ptpimg_upload(image_url, ptpimg_api) return img_url - def get_type(self, imdb_info, meta): ptpType = None if imdb_info['type'] is not None: @@ -419,7 +407,6 @@ def get_container(self, meta): container = containermap.get(ext, 'Other') return container - def get_source(self, source): sources = { "Blu-ray" : "Blu-ray", @@ -435,7 +422,6 @@ def get_source(self, source): source_id = sources.get(source, "OtherR") return source_id - def get_subtitles(self, meta): sub_lang_map = self.sub_lang_map @@ -546,7 +532,6 @@ def get_remaster_title(self, meta): if meta.get('has_commentary', False) == True: remaster_title.append('With Commentary') - # HDR10, HDR10+, Dolby Vision, 10-bit, # if "Hi10P" in meta.get('video_encode', ''): # remaster_title.append('10-bit') @@ -667,8 +652,6 @@ async def edit_desc(self, meta): desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") - - async def get_AntiCsrfToken(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): Path(f"{meta['base_dir']}/data/cookies").mkdir(parents=True, exist_ok=True) @@ -766,7 +749,6 @@ async def fill_upload_form(self, groupID, meta): else: data["imdb"] = meta["imdb_id"] - if groupID == None: # If need to make new group url = "https://passthepopcorn.me/upload.php" if data["imdb"] == "0": @@ -808,14 +790,49 @@ async def fill_upload_form(self, groupID, meta): return url, data async def upload(self, meta, url, data): - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as torrentFile: + torrent_filename = f"[{self.tracker}]{meta['clean_name']}.torrent" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}" + torrent = Torrent.read(torrent_path) + + # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed + if torrent.piece_size > 16777216: # 16 MiB in bytes + console.print("[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent") + + # Import Prep and regenerate the torrent with 16 MiB piece size limit + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + # Create a new torrent with the piece size explicitly set to 16 MiB + new_torrent = prep.CustomTorrent( + path=Path(meta['path']), + trackers=[self.announce_url], + source="L4G", + private=True, + exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], + include_globs=["*.mkv", "*.mp4", "*.ts"], + creation_date=datetime.datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 16777216 # 16 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(torrent_path, overwrite=True) + + # Proceed with the upload process + with open(torrent_path, 'rb') as torrentFile: files = { - "file_input" : ("placeholder.torrent", torrentFile, "application/x-bittorent") + "file_input": ("placeholder.torrent", torrentFile, "application/x-bittorent") } headers = { # 'ApiUser' : self.api_user, # 'ApiKey' : self.api_key, - "User-Agent": self.user_agent + "User-Agent": self.user_agent } if meta['debug']: console.log(url) @@ -828,10 +845,9 @@ async def upload(self, meta, url, data): response = session.post(url=url, data=data, headers=headers, files=files) console.print(f"[cyan]{response.url}") responsetext = response.text - # If the repsonse contains our announce url then we are on the upload page and the upload wasn't successful. + # If the response contains our announce URL, then we are on the upload page and the upload wasn't successful. if responsetext.find(self.announce_url) != -1: # Get the error message. - #
No torrent file uploaded, or file is empty.
errorMessage = "" match = re.search(r"""
Date: Fri, 23 Aug 2024 12:35:58 +1000 Subject: [PATCH 087/741] PTP - imports --- src/trackers/PTP.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 98b2761ec..5d41c2cc0 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -17,6 +17,8 @@ from src.bbcode import BBCODE from src.exceptions import * from src.console import console +from torf import Torrent +import datetime class PTP(): From 86946e7f55d5c17226cb5afaf29d3ee5efa17f8f Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 13:18:40 +1000 Subject: [PATCH 088/741] FIX - keep-folder in disc mode keep-folder option doesn't make sense in disc mode, need to specifically set it to false when in 'is disc', and ensure we still throw a confirm otherwise. --- upload.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/upload.py b/upload.py index c2384a842..c92f8d578 100644 --- a/upload.py +++ b/upload.py @@ -465,18 +465,31 @@ def get_confirmation(meta): get_missing(meta) ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) == True else "" # \a rings the bell cli_ui.info(ring_the_bell) - if meta['isdir'] and meta['keep_folder']: - cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") - kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) - if not kf_confirm: - cli_ui.info('Aborting...') - exit() + + # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' + if meta.get('is disc', False): + meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True + + if meta['isdir']: + if 'keep_folder' in meta: # Check if 'keep_folder' was explicitly set + if meta['keep_folder']: # Proceed only if 'keep_folder' is True + cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") + kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) + if not kf_confirm: + cli_ui.info('Aborting...') + exit() + else: + # Handle the scenario where 'keep_folder' was not set but 'is disc' is False + cli_ui.warning("Warning: 'keep_folder' is not set for this upload. Proceeding without folder preservation.") + # Optionally, add logic here to handle this case + cli_ui.info_section(cli_ui.yellow, f"Is this correct?") cli_ui.info(f"Name: {meta['name']}") confirm = cli_ui.ask_yes_no("Correct?", default=False) else: cli_ui.info(f"Name: {meta['name']}") confirm = True + return confirm def dupe_check(dupes, meta): From 035e8787a78845d662d6a672a4f7e485aee8f569 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 13:35:15 +1000 Subject: [PATCH 089/741] Add warnings when piece constraints cannot be met. --- src/prep.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index eed05710f..d3919fc79 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2111,8 +2111,8 @@ def piece_size(self, value): def calculate_piece_size(cls, total_size, min_size, max_size): our_min_size = 16384 our_max_size = 67108864 - # Start with a piece size of 4 MiB - piece_size = 4194304 # 4 MiB in bytes + # Start with a piece size of 8 MiB + piece_size = 8388608 num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) # Approximate .torrent size: 20 bytes header + 20 bytes per piece @@ -2123,7 +2123,14 @@ def calculate_piece_size(cls, total_size, min_size, max_size): if piece_size < our_min_size: piece_size = our_min_size break - elif num_pieces > 2000 or torrent_file_size > 102400: + elif num_pieces > 2000: + console.warning('Warning: Piece size exceeded 2000 pieces!') + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break + elif torrent_file_size > 102400: + cli_ui.error('WARNING: .torrent size will exceed 100 KiB!') piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size From cff9252cd4cf78f47abb1a1984748574692cf7c4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 23 Aug 2024 13:46:06 +1000 Subject: [PATCH 090/741] Fix ui warning --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index d3919fc79..81c64ed4c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2124,7 +2124,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size): piece_size = our_min_size break elif num_pieces > 2000: - console.warning('Warning: Piece size exceeded 2000 pieces!') + cli_ui.warning('Warning: Piece size exceeded 2000 pieces!') piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size From 0e69dd878d29607c19d4286df36535a33edaf6ef Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 24 Aug 2024 12:27:29 +1000 Subject: [PATCH 091/741] Remove pointless keep folder warning --- upload.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/upload.py b/upload.py index c92f8d578..2ef0f1d55 100644 --- a/upload.py +++ b/upload.py @@ -471,17 +471,13 @@ def get_confirmation(meta): meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True if meta['isdir']: - if 'keep_folder' in meta: # Check if 'keep_folder' was explicitly set - if meta['keep_folder']: # Proceed only if 'keep_folder' is True + if 'keep_folder' in meta: + if meta['keep_folder']: cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) if not kf_confirm: cli_ui.info('Aborting...') exit() - else: - # Handle the scenario where 'keep_folder' was not set but 'is disc' is False - cli_ui.warning("Warning: 'keep_folder' is not set for this upload. Proceeding without folder preservation.") - # Optionally, add logic here to handle this case cli_ui.info_section(cli_ui.yellow, f"Is this correct?") cli_ui.info(f"Name: {meta['name']}") From 0e79eb24d565d29b02c5e04ef843c5a3cf834ce3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 24 Aug 2024 22:22:29 +1000 Subject: [PATCH 092/741] Fix MTV exclude options for disc based torrent creation --- src/prep.py | 2 +- src/trackers/MTV.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 81c64ed4c..adf2b8900 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2124,7 +2124,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size): piece_size = our_min_size break elif num_pieces > 2000: - cli_ui.warning('Warning: Piece size exceeded 2000 pieces!') + cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces! Using ({num_pieces}) pieces.") piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 27c62f762..4aa0ceac4 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -73,6 +73,14 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if torrent.piece_size > 8388608: # 8 MiB in bytes console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + # Determine include and exclude patterns based on whether it's a disc or not + if meta['is_disc']: + include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list + exclude = [] # Adjust as needed for disc-specific exclusions, make sure it's a list + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + # Create a new torrent with piece size explicitly set to 8 MiB from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) @@ -81,8 +89,8 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): trackers=["https://fake.tracker"], source="L4G", private=True, - exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], - include_globs=["*.mkv", "*.mp4", "*.ts"], + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" From bc940fd8451e15a73ded06b40839cea7b1e23453 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 11:20:45 +1000 Subject: [PATCH 093/741] Add more debugging for qbit torrent storage directory Should help with https://github.com/Audionut/Upload-Assistant/issues/13 --- src/clients.py | 41 ++++++++++------------------------------- 1 file changed, 10 insertions(+), 31 deletions(-) diff --git a/src/clients.py b/src/clients.py index 14a756d91..3ff727dab 100644 --- a/src/clients.py +++ b/src/clients.py @@ -12,12 +12,8 @@ import ssl import shutil import time - - from src.console import console - - class Clients(): """ Add to torrent client @@ -26,7 +22,6 @@ def __init__(self, config): self.config = config pass - async def add_to_client(self, meta, tracker): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent" if meta.get('no_seed', False) == True: @@ -63,8 +58,6 @@ async def add_to_client(self, meta, tracker): shutil.copy(torrent_path, client['watch_folder']) return - - async def find_existing_torrent(self, meta): if meta.get('client', None) == None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] @@ -103,7 +96,6 @@ async def find_existing_torrent(self, meta): return None - async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): valid = False wrong_file = False @@ -165,10 +157,14 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.print(err_print) return valid, torrent_path - async def search_qbit_for_torrent(self, meta, client): console.print("[green]Searching qbittorrent for an existing .torrent") torrent_storage_dir = client.get('torrent_storage_dir', None) + if meta['debug']: + if torrent_storage_dir: + print(f"Torrent storage directory found: {torrent_storage_dir}") + else: + print("No torrent storage directory found.") if torrent_storage_dir == None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None @@ -188,6 +184,10 @@ async def search_qbit_for_torrent(self, meta, client): local_path, remote_path = await self.remote_path_map(meta) if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): remote_path_map = True + if meta['debug']: + print(f"Remote path mapping found!") + print(f"Local path: {local_path}") + print(f"Remote path: {remote_path}") torrents = qbt_client.torrents.info() for torrent in torrents: @@ -215,17 +215,6 @@ async def search_qbit_for_torrent(self, meta, client): return torrent.hash return None - - - - - - - - - - - def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, client): rtorrent = xmlrpc.client.Server(client['rtorrent_url'], context=ssl._create_stdlib_context()) metainfo = bencode.bread(torrent_path) @@ -234,7 +223,6 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c except EnvironmentError as exc: console.print("[red]Error making fast-resume data (%s)" % (exc,)) raise - new_meta = bencode.bencode(fast_resume) if new_meta != metainfo: @@ -242,7 +230,6 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c console.print("Creating fast resume") bencode.bwrite(fast_resume, fr_file) - isdir = os.path.isdir(path) # if meta['type'] == "DISC": # path = os.path.dirname(path) @@ -258,7 +245,6 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c if isdir == False: path = os.path.dirname(path) - console.print("[bold yellow]Adding and starting torrent") rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") time.sleep(1) @@ -275,7 +261,6 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c console.print(f"[cyan]Path: {path}") return - async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): # infohash = torrent.infohash #Remote path mount @@ -323,8 +308,6 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) console.print(f"Added to: {path}") - - def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) # client = LocalDelugeRPCClient() @@ -345,9 +328,6 @@ def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, m else: console.print("[bold red]Unable to connect to deluge") - - - def add_fast_resume(self, metainfo, datapath, torrent): """ Add fast resume data to a metafile dict. """ @@ -392,7 +372,6 @@ def add_fast_resume(self, metainfo, datapath, torrent): return metainfo - async def remote_path_map(self, meta): if meta.get('client', None) == None: torrent_client = self.config['DEFAULT']['default_torrent_client'] @@ -405,7 +384,7 @@ async def remote_path_map(self, meta): if os.path.normpath(local_path[i]).lower() in meta['path'].lower(): list_local_path = local_path[i] list_remote_path = remote_path[i] - + local_path = os.path.normpath(list_local_path) remote_path = os.path.normpath(list_remote_path) if local_path.endswith(os.sep): From 25e523a459a32cbad1fd3faf138c27d8710b0f42 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 11:22:14 +1000 Subject: [PATCH 094/741] Print to console --- src/clients.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/clients.py b/src/clients.py index 3ff727dab..dd1dd797b 100644 --- a/src/clients.py +++ b/src/clients.py @@ -162,9 +162,9 @@ async def search_qbit_for_torrent(self, meta, client): torrent_storage_dir = client.get('torrent_storage_dir', None) if meta['debug']: if torrent_storage_dir: - print(f"Torrent storage directory found: {torrent_storage_dir}") + console.print(f"Torrent storage directory found: {torrent_storage_dir}") else: - print("No torrent storage directory found.") + console.print("No torrent storage directory found.") if torrent_storage_dir == None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None @@ -185,9 +185,9 @@ async def search_qbit_for_torrent(self, meta, client): if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): remote_path_map = True if meta['debug']: - print(f"Remote path mapping found!") - print(f"Local path: {local_path}") - print(f"Remote path: {remote_path}") + console.print(f"Remote path mapping found!") + console.print(f"Local path: {local_path}") + console.print(f"Remote path: {remote_path}") torrents = qbt_client.torrents.info() for torrent in torrents: From 8758f0ffa0266c784df5144addbad144314bb886 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 16:35:46 +1000 Subject: [PATCH 095/741] More notes for example_config --- data/example-config.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 1a26a307b..465b92ced 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -37,6 +37,7 @@ "TRACKERS" : { # Which trackers do you want to upload to? # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP + # Remove the ones not used to save being asked everytime "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP", "BLU" : { @@ -219,15 +220,20 @@ }, }, - + # enable_search to true will automatically try and find a suitable hash to save having to rehash when creating torrents + # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes "TORRENT_CLIENTS" : { - # Name your torrent clients here, for example, this example is named "Client1" + # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above + # All options relate to the webui, make sure you have the webui secured if it has WAN access + # See https://github.com/Audionut/Upload-Assistant/wiki "Client1" : { "torrent_client" : "qbit", + # "enable_search" : True, "qbit_url" : "http://127.0.0.1", "qbit_port" : "8080", "qbit_user" : "username", "qbit_pass" : "password", + # "torrent_storage_dir" : "path/to/BT_backup folder" # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", @@ -251,9 +257,6 @@ # If using remote path mapping, use remote path # For using multiple paths, use a list ["path1", "path2"] # "automatic_management_paths" : "" - - - # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "E:\\downloads\\tv", # "remote_path" : "/remote/downloads/tv" @@ -292,12 +295,6 @@ }, - - - - - - "DISCORD" :{ "discord_bot_token" : "discord bot token", "discord_bot_description" : "L4G's Upload Assistant", From c8218f0f8dad8cf9755ecf479380c2bd9a8ccb13 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 16:39:45 +1000 Subject: [PATCH 096/741] Add note about recent debug additions --- data/example-config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/data/example-config.py b/data/example-config.py index 465b92ced..7b8a0d082 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -222,6 +222,7 @@ # enable_search to true will automatically try and find a suitable hash to save having to rehash when creating torrents # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes + # If you find issue, use the "--debug" command option to print out some related details "TORRENT_CLIENTS" : { # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above # All options relate to the webui, make sure you have the webui secured if it has WAN access From 935154856127b64e32e984b7111ef764624fa026 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 19:39:13 +1000 Subject: [PATCH 097/741] immediate break in reuploading if using approved host --- src/trackers/MTV.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 4aa0ceac4..46f43a994 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -51,17 +51,15 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): # Call handle_image_upload and pass the updated meta with the current image host index image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) - # If retry_mode is True, switch to the next host - if retry_mode: - console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") - img_host_index += 1 - continue - - # If we successfully uploaded images, break out of the loop - if image_list is not None: + # If we successfully uploaded images or are already using an approved host, break out of the loop + if not retry_mode: break - if image_list is None: + # If retry_mode is True, switch to the next host + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + img_host_index += 1 + + if not image_list: console.print("[red]All image hosts failed. Please check your configuration.") return From bd5f528a8650ca279b4ec0d0036066f7118f142f Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 25 Aug 2024 21:18:29 +1000 Subject: [PATCH 098/741] MTV images, sixth time is a charm --- src/trackers/MTV.py | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 46f43a994..56c071f11 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -47,21 +47,30 @@ async def upload(self, meta): async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] - while img_host_index <= len(approved_image_hosts): - # Call handle_image_upload and pass the updated meta with the current image host index - image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + # Check if the images are already hosted on an approved image host + if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): + console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") + image_list = meta['image_list'] # Use the existing images - # If we successfully uploaded images or are already using an approved host, break out of the loop - if not retry_mode: - break - - # If retry_mode is True, switch to the next host - console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") - img_host_index += 1 - - if not image_list: - console.print("[red]All image hosts failed. Please check your configuration.") - return + else: + # Proceed with the retry logic if images are not hosted on an approved image host + while img_host_index <= len(approved_image_hosts): + # Call handle_image_upload and pass the updated meta with the current image host index + image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + + # If retry_mode is True, switch to the next host + if retry_mode: + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + img_host_index += 1 + continue + + # If we successfully uploaded images, break out of the loop + if image_list is not None: + break + + if image_list is None: + console.print("[red]All image hosts failed. Please check your configuration.") + return # Proceed with the rest of the upload process torrent_filename = "BASE" From 5a6c214ab0df750169def84983892a7d526fa557 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 26 Aug 2024 09:03:43 +1000 Subject: [PATCH 099/741] Increase accuracy of the .torrent size calculation Move the piece number warning to when it actually applies --- src/prep.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index adf2b8900..b1a15673d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2092,7 +2092,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Calculate and set the piece size total_size = self._calculate_total_size() - piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max) + piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) self.piece_size = piece_size @property @@ -2103,18 +2103,18 @@ def piece_size(self): def piece_size(self, value): if value is None: total_size = self._calculate_total_size() - value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max) + value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) self._piece_size = value self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set @classmethod - def calculate_piece_size(cls, total_size, min_size, max_size): + def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 our_max_size = 67108864 # Start with a piece size of 8 MiB piece_size = 8388608 num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) # Approximate .torrent size: 20 bytes header + 20 bytes per piece + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size # Adjust the piece size to fit within the constraints while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit @@ -2124,9 +2124,9 @@ def calculate_piece_size(cls, total_size, min_size, max_size): piece_size = our_min_size break elif num_pieces > 2000: - cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces! Using ({num_pieces}) pieces.") piece_size *= 2 if piece_size > our_max_size: + cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces! Using ({num_pieces}) pieces.") piece_size = our_max_size break elif torrent_file_size > 102400: @@ -2136,16 +2136,22 @@ def calculate_piece_size(cls, total_size, min_size, max_size): piece_size = our_max_size break num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) return piece_size def _calculate_total_size(self): return sum(file.size for file in self.files) + @classmethod + def _calculate_pathname_bytes(cls, files): + # Calculate the total bytes consumed by all the pathnames in the torrent + total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + return total_pathname_bytes + def validate_piece_size(self): if not hasattr(self, '_piece_size') or self._piece_size is None: - self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max) + self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max, self.files) self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set def create_torrent(self, meta, path, output_filename): From ae3cccc2c5eeda7bbfad23c992c9cd660ce62ae3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 26 Aug 2024 16:04:04 +1000 Subject: [PATCH 100/741] Squashed commit of the following: commit d000333de42a016c027d5ae7195edb3b9da6ce6d Merge: 7dc1403 5a6c214 Author: b-igu <179387180+b-igu@users.noreply.github.com> Date: Sun Aug 25 23:50:19 2024 -0300 Merge branch 'Audionut:master' into master commit 7dc1403885037d70a4f71277fef55964ffc2827e Author: b-igu <179387180+b-igu@users.noreply.github.com> Date: Sun Aug 25 19:48:00 2024 -0300 Added support to AL --- README.md | 2 +- data/example-config.py | 9 +- src/trackers/AL.py | 182 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 4 files changed, 193 insertions(+), 5 deletions(-) create mode 100644 src/trackers/AL.py diff --git a/README.md b/README.md index 076b66d09..95143e891 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/AL - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 7b8a0d082..5013ab63d 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -36,9 +36,9 @@ "TRACKERS" : { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL # Remove the ones not used to save being asked everytime - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP", + "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL", "BLU" : { "useAPI" : False, # Set to True if using BLU @@ -214,6 +214,11 @@ "announce_url" : "https://UTP/announce/customannounceurl", # "anon" : False }, + "AL" : { + "api_key" : "AL api key", + "announce_url" : "https://animelovers.club/announce/customannounceurl", + # "anon" : False + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/AL.py b/src/trackers/AL.py new file mode 100644 index 000000000..4aecdd66f --- /dev/null +++ b/src/trackers/AL.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import os +import platform +from str2bool import str2bool + +from src.trackers.COMMON import COMMON +from src.console import console + + +class AL(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'AL' + self.source_flag = 'AnimeLovers' + self.upload_url = 'https://animelovers.club/api/torrents/upload' + self.search_url = 'https://animelovers.club/api/torrents/filter' + self.signature = None + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '1') + return category_id + + async def get_type_id(self, type): + type_id = { + 'BDMV': '1', + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'DVDISO': '7', + 'DVDRIP': '8', + 'RAW': '9', + 'BDRIP': '10', + 'COLOR': '11', + 'MONO': '12' + }.get(type, '1') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p':'10', + '4320p': '1', + '2160p': '2', + '1440p' : '3', + '1080p': '3', + '1080i':'4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name' : name, + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category']), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + # Got this from CBR and changed the encoding rename + async def edit_name(self, meta): + name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","x264").replace("H 265","x265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") + return name \ No newline at end of file diff --git a/upload.py b/upload.py index 2ef0f1d55..fc33d083b 100644 --- a/upload.py +++ b/upload.py @@ -37,6 +37,7 @@ from src.trackers.FNP import FNP from src.trackers.CBR import CBR from src.trackers.UTP import UTP +from src.trackers.AL import AL import json from pathlib import Path import asyncio @@ -247,12 +248,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL':AL} for tracker in trackers: if meta['name'].endswith('DUPE?'): From 14c81f20f2238828974cebdc7f8df1d1d837473f Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 27 Aug 2024 07:29:46 +1000 Subject: [PATCH 101/741] Update PTP exclusions --- src/trackers/PTP.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 5d41c2cc0..7c8ce9f77 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -804,15 +804,24 @@ async def upload(self, meta, url, data): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - # Create a new torrent with the piece size explicitly set to 16 MiB + if meta['is_disc']: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 8 MiB + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( path=Path(meta['path']), - trackers=[self.announce_url], + trackers=["https://fake.tracker"], source="L4G", private=True, - exclude_globs=["*.*", "*sample.mkv", "!sample*.*"], - include_globs=["*.mkv", "*.mp4", "*.ts"], - creation_date=datetime.datetime.now(), + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) From 858b8e66618fdb86f91dbadddfa74c0f96fe5fb5 Mon Sep 17 00:00:00 2001 From: tiberio87 Date: Tue, 27 Aug 2024 08:54:21 +0200 Subject: [PATCH 102/741] add SHRI tracker --- data/example-config.py | 5 ++ src/trackers/SHRI.py | 179 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 3 files changed, 187 insertions(+), 2 deletions(-) create mode 100644 src/trackers/SHRI.py diff --git a/data/example-config.py b/data/example-config.py index 7b8a0d082..6b6f2bbec 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -214,6 +214,11 @@ "announce_url" : "https://UTP/announce/customannounceurl", # "anon" : False }, + "SHRI" :{ + "api_key" : "SHRI api key", + "announce_url" : "https://shareisland.org/announce/customannounceurl", + # "anon" : "False" + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py new file mode 100644 index 000000000..47fe6d614 --- /dev/null +++ b/src/trackers/SHRI.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import os +import platform + +from src.trackers.COMMON import COMMON +from src.console import console + + +class SHRI(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + ############################################################### + ######## EDIT ME ######## + ############################################################### + + # ALSO EDIT CLASS NAME ABOVE + + def __init__(self, config): + self.config = config + self.tracker = 'SHRI' + self.source_flag = 'Shareisland' + self.upload_url = 'https://shareisland.org/api/torrents/upload' + self.search_url = 'https://shareisland.org/api/torrents/filter' + self.signature = f"\n[center][url=https://shareisland.org]Created by SHRI Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '26', + 'REMUX': '7', + 'WEBDL': '27', + 'WEBRIP': '27', + 'HDTV': '6', + 'ENCODE': '15' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p':'10', + '4320p': '1', + '2160p': '2', + '1440p' : '3', + '1080p': '3', + '1080i':'4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + ############################################################### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ############################################################### + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name' : meta['name'], + 'description' : desc, + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type_id' : type_id, + 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except: + console.print("It may have uploaded, go check") + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId' : meta['tmdb'], + 'categories[]' : await self.get_cat_id(meta['category']), + 'types[]' : await self.get_type_id(meta['type']), + 'resolutions[]' : await self.get_res_id(meta['resolution']), + 'name' : "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes \ No newline at end of file diff --git a/upload.py b/upload.py index 2ef0f1d55..9aebc13b4 100644 --- a/upload.py +++ b/upload.py @@ -37,6 +37,7 @@ from src.trackers.FNP import FNP from src.trackers.CBR import CBR from src.trackers.UTP import UTP +from src.trackers.SHRI import SHRI import json from pathlib import Path import asyncio @@ -247,12 +248,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP','SHRI'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'SHRI': SHRI} for tracker in trackers: if meta['name'].endswith('DUPE?'): From fe21811c0a74d094ec51be19117b03c19605fc4e Mon Sep 17 00:00:00 2001 From: tiberio87 Date: Tue, 27 Aug 2024 11:00:05 +0200 Subject: [PATCH 103/741] add SHRI tracker --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 076b66d09..4f32372b0 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/SHRI - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs From 70b0bd861520adaa78b377d0c43cd7d5a8dc2a0a Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 27 Aug 2024 19:20:22 +1000 Subject: [PATCH 104/741] Attempt fix UNIT3D anime TV integer issue --- src/prep.py | 90 ++++++++++++++++++++--------------------------------- 1 file changed, 34 insertions(+), 56 deletions(-) diff --git a/src/prep.py b/src/prep.py index b1a15673d..dd422656a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2581,23 +2581,16 @@ async def get_season_episode(self, video, meta): else: #If Anime parsed = anitopy.parse(Path(video).name) - # romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title']) romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) if mal_id: meta['mal_id'] = mal_id - if meta.get('tmdb_manual', None) == None: + if meta.get('tmdb_manual', None) is None: year = parsed.get('anime_year', str(seasonYear)) - meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title'], year, meta, meta['category']) + meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) meta = await self.tmdb_other_meta(meta) if meta['category'] != "TV": return meta - # meta['title'] = eng_title - # difference = SequenceMatcher(None, eng_title, romaji.lower()).ratio() - # if difference >= 0.8: - # meta['aka'] = "" - # else: - # meta['aka'] = f" AKA {romaji}" tag = parsed.get('release_group', "") if tag != "": meta['tag'] = f"-{tag}" @@ -2606,40 +2599,36 @@ async def get_season_episode(self, video, meta): episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) if not isinstance(episodes, list) and not episodes.isnumeric(): episodes = guessit(video)['episode'] - if type(episodes) == list: - episode = "" - for item in episodes: - ep = (str(item).zfill(2)) - episode += f"E{ep}" - episode_int = episodes[0] + if isinstance(episodes, list): + episode_int = int(episodes[0]) # Always convert to integer + episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) else: - episode_int = str(int(episodes)) - episode = f"E{str(int(episodes)).zfill(2)}" + episode_int = int(episodes) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" except Exception: episode = "E01" - episode_int = "1" + episode_int = 1 # Ensure it's an integer console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') await asyncio.sleep(1.5) else: episode = "" - episode_int = "0" + episode_int = 0 # Ensure it's an integer meta['tv_pack'] = 1 - + try: if meta.get('season_int'): - season = meta.get('season_int') + season_int = int(meta.get('season_int')) # Convert to integer else: - season = parsed.get('anime_season', guessit(video)['season']) - season_int = season - season = f"S{season.zfill(2)}" + season = parsed.get('anime_season', guessit(video).get('season', '1')) + season_int = int(season) # Convert to integer + season = f"S{str(season_int).zfill(2)}" except Exception: try: - if int(episode_int) >= anilist_episodes: + if episode_int >= anilist_episodes: params = { - 'id' : str(meta['tvdb_id']), - 'origin' : 'tvdb', - 'absolute' : str(episode_int), - # 'destination' : 'tvdb' + 'id': str(meta['tvdb_id']), + 'origin': 'tvdb', + 'absolute': str(episode_int), } url = "https://thexem.info/map/single" response = requests.post(url, params=params).json() @@ -2647,15 +2636,14 @@ async def get_season_episode(self, video, meta): raise XEMNotFound if meta['debug']: console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") - season_int = str(response['data']['scene']['season']) - season = f"S{str(response['data']['scene']['season']).zfill(2)}" + season_int = int(response['data']['scene']['season']) # Convert to integer + season = f"S{str(season_int).zfill(2)}" if len(filelist) == 1: - episode_int = str(response['data']['scene']['episode']) - episode = f"E{str(response['data']['scene']['episode']).zfill(2)}" + episode_int = int(response['data']['scene']['episode']) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" else: - #Get season from xem name map + season_int = 1 # Default to 1 if error occurs season = "S01" - season_int = "1" names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" names_response = requests.get(names_url).json() if meta['debug']: @@ -2669,39 +2657,29 @@ async def get_season_episode(self, video, meta): romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() - if romaji_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if romaji_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff if lang == "us": for name in names: eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() - if eng_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if eng_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff else: raise XEMNotFound except Exception: if meta['debug']: console.print_exception() try: - season = guessit(video)['season'] - season_int = season + season = guessit(video).get('season', '1') + season_int = int(season) # Convert to integer except Exception: - season_int = "1" + season_int = 1 # Default to 1 if error occurs season = "S01" console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") From a6e649ed02f3b542771e02506294104591beb88b Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 27 Aug 2024 19:22:16 +1000 Subject: [PATCH 105/741] Docker-image for anime branch --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 006845314..d96e23a6c 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - qbit-torrent-check + - anime-integers env: REGISTRY: ghcr.io From 2740cea3f098cc1eefed924ba22949506eb215f3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 29 Aug 2024 15:35:05 +1000 Subject: [PATCH 106/741] HDB 16MiB piece size limit --- src/trackers/HDB.py | 58 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 9 deletions(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index b8eafcb7c..37f9cd799 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -239,23 +239,63 @@ async def upload(self, meta): # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + torrent = Torrent.read(torrent_path) + + # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed + if torrent.piece_size > 16777216: # 16 MiB in bytes + console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") + + # Import Prep and regenerate the torrent with 16 MiB piece size limit + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + if meta['filelist'] == 1: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 16 MiB + new_torrent = prep.CustomTorrent( + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 16777216 # 16 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(torrent_path, overwrite=True) + + # Proceed with the upload process with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'name' : hdb_name, - 'category' : cat_id, - 'codec' : codec_id, - 'medium' : medium_id, - 'origin' : 0, - 'descr' : hdb_desc.rstrip(), - 'techinfo' : '', - 'tags[]' : hdb_tags, + 'name': hdb_name, + 'category': cat_id, + 'codec': codec_id, + 'medium': medium_id, + 'origin': 0, + 'descr': hdb_desc.rstrip(), + 'techinfo': '', + 'tags[]': hdb_tags, } # If internal, set 1 From e647884e4c3ac0bf95119a1fd6364dc209355fe8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 29 Aug 2024 15:45:28 +1000 Subject: [PATCH 107/741] Import datetime --- src/trackers/ANT.py | 1 + src/trackers/HDB.py | 1 + src/trackers/PTP.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index cd1cd7690..4aeb241e6 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -11,6 +11,7 @@ from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console +from datetime import datetime, date class ANT(): """ diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 37f9cd799..06cebce3c 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -12,6 +12,7 @@ from src.bbcode import BBCODE from src.exceptions import * from src.console import console +from datetime import datetime, date class HDB(): diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 7c8ce9f77..b3ca3b75e 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -18,7 +18,7 @@ from src.exceptions import * from src.console import console from torf import Torrent -import datetime +from datetime import datetime, date class PTP(): From 8625f914fcce056b97abda81aeb7db28fc9efb6d Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 29 Aug 2024 15:59:04 +1000 Subject: [PATCH 108/741] fix console error on image host retry --- src/prep.py | 213 ++++++++++++++++++++++++++-------------------------- 1 file changed, 105 insertions(+), 108 deletions(-) diff --git a/src/prep.py b/src/prep.py index dd422656a..26566927f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2253,122 +2253,124 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens - # Proceed with uploading images + # Initialize the progress bar outside of the retry loop with Progress( TextColumn("[bold green]Uploading Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) + while True: + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - for image in image_glob[-screens:]: - try: - timeout = 60 - if img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - else: - console.print(f"[red]Unsupported image host: {img_host}") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + for image in image_glob[-screens:]: + try: + timeout = 60 + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]PT Screens failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + break + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = response['data']['url_viewer'] + else: + console.print(f"[red]Unsupported image host: {img_host}") + break - # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') + # Update progress bar and print the result on the same line + progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') - # Add the image details to the list - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - image_list.append(image_dict) - progress.advance(upload_task) - i += 1 + # Add the image details to the list + image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_list.append(image_dict) + progress.advance(upload_task) + i += 1 - except Exception as e: - console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + except Exception as e: + console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") + break - time.sleep(0.5) + time.sleep(0.5) - if i >= total_screens: - break + if i >= total_screens: + return_dict['image_list'] = image_list + console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return image_list, i + + # If we broke out of the loop due to a failure, switch to the next host and retry + img_host_num += 1 + if img_host_num > len(self.config['DEFAULT']) - 1: + console.print(f"[red]All image hosts failed. Unable to complete uploads.") + return image_list, i # Or you could raise an exception if preferred - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") - return image_list, i + img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] async def imgbox_upload(self, chdir, image_glob): os.chdir(chdir) @@ -2387,11 +2389,6 @@ async def imgbox_upload(self, chdir, image_glob): image_list.append(image_dict) return image_list - - - - - async def get_name(self, meta): type = meta.get('type', "") title = meta.get('title',"") From afc05bf7d342463209822aaf63e7be0ce7e37b53 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 29 Aug 2024 17:54:17 +1000 Subject: [PATCH 109/741] Use announce url --- src/trackers/HDB.py | 28 ---------------------------- src/trackers/PTP.py | 2 +- 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 06cebce3c..c8530261f 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -198,11 +198,6 @@ async def edit_name(self, meta): return hdb_name - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -220,22 +215,6 @@ async def upload(self, meta): if "Dual-Audio" in meta['audio'] and meta['is_disc'] not in ("BDMV", "HDDVD", "DVD"): console.print("[bold red]Dual-Audio Encodes are not allowed") return - # FORM - # file : .torent file (needs renaming) - # name : name - # type_category : get_type_category_id - # type_codec : get_type_codec_id - # type_medium : get_type_medium_id - # type_origin : 0 unless internal (1) - # descr : description - # techinfo : mediainfo only, no bdinfo - # tags[] : get_tags - # imdb : imdb link - # tvdb_id : tvdb id - # season : season number - # episode : episode number - # anidb_id - # POST > upload/upload # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() @@ -316,7 +295,6 @@ async def upload(self, meta): data['tvdb_episode'] = int(meta.get('episode_int', 1)) # aniDB - url = "https://hdbits.org/upload/upload" # Submit if meta['debug']: @@ -341,7 +319,6 @@ async def upload(self, meta): raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') return - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") @@ -370,9 +347,6 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) @@ -514,8 +488,6 @@ async def hdbimg_upload(self, meta): image_bbcode = r.text return image_bbcode - - async def get_info_from_torrent_id(self, hdb_id): hdb_imdb = hdb_name = hdb_torrenthash = None url = "https://hdbits.org/api/torrents" diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index b3ca3b75e..c35a082e0 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -816,7 +816,7 @@ async def upload(self, meta, url, data): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( path=Path(meta['path']), - trackers=["https://fake.tracker"], + trackers=[self.announce_url], source="L4G", private=True, exclude_globs=exclude, # Ensure this is always a list From ed3a4f27e4c008f45ceacaea96219f0d2162c437 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 29 Aug 2024 21:28:08 +1000 Subject: [PATCH 110/741] Add more more debugging in valid torrent check --- src/clients.py | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/src/clients.py b/src/clients.py index dd1dd797b..518babd80 100644 --- a/src/clients.py +++ b/src/clients.py @@ -100,61 +100,89 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client valid = False wrong_file = False err_print = "" + + # Normalize the torrent hash based on the client if torrent_client in ('qbit', 'deluge'): torrenthash = torrenthash.lower().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) elif torrent_client == 'rtorrent': torrenthash = torrenthash.upper().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) + if meta['debug']: - console.log(torrent_path) + console.log(f"[DEBUG] Torrent path after normalization: {torrent_path}") + + # Check if torrent file exists if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) + # Reuse if disc and basename matches or --keep-folder was specified if meta.get('is_disc', None) != None or (meta['keep_folder'] and meta['isdir']): torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True + if meta['debug']: + console.log(f"[DEBUG] Torrent is valid based on disc/basename or keep-folder: {valid}") + # If one file, check for folder if len(torrent.files) == len(meta['filelist']) == 1: if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): if str(torrent.files[0]) == os.path.basename(torrent.files[0]): valid = True - else: - wrong_file = True + else: + wrong_file = True + if meta['debug']: + console.log(f"[DEBUG] Single file match status: valid={valid}, wrong_file={wrong_file}") + # Check if number of files matches number of videos elif len(torrent.files) == len(meta['filelist']): torrent_filepath = os.path.commonpath(torrent.files) actual_filepath = os.path.commonpath(meta['filelist']) local_path, remote_path = await self.remote_path_map(meta) + if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): actual_filepath = torrent_path.replace(local_path, remote_path) actual_filepath = torrent_path.replace(os.sep, '/') + if meta['debug']: - console.log(f"torrent_filepath: {torrent_filepath}") - console.log(f"actual_filepath: {actual_filepath}") + console.log(f"[DEBUG] torrent_filepath: {torrent_filepath}") + console.log(f"[DEBUG] actual_filepath: {actual_filepath}") + if torrent_filepath in actual_filepath: valid = True + if meta['debug']: + console.log(f"[DEBUG] Multiple file match status: valid={valid}") + else: console.print(f'[bold yellow]{torrent_path} was not found') + + # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): reuse_torrent = Torrent.read(torrent_path) - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): # Allow up to 7k pieces at 8MiB or 4k pieces at 4MiB or less + if meta['debug']: + console.log(f"[DEBUG] Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") + + if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" valid = False elif reuse_torrent.piece_size < 32768: err_print = "[bold yellow]Piece size too small to reuse" valid = False - elif wrong_file == True: + elif wrong_file: err_print = "[bold red] Provided .torrent has files that were not expected" valid = False else: err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + if meta['debug']: + console.log(f"[DEBUG] Final validity after piece checks: valid={valid}") else: err_print = '[bold yellow]Unwanted Files/Folders Identified' + + # Print the error message if needed if print_err: console.print(err_print) + return valid, torrent_path async def search_qbit_for_torrent(self, meta, client): From 1e1e1102fb393914ed18af302703db50894f6256 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 30 Aug 2024 10:51:58 +1000 Subject: [PATCH 111/741] Add torf input for HDB --- src/trackers/HDB.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index c8530261f..cd7bf52a4 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -13,6 +13,7 @@ from src.exceptions import * from src.console import console from datetime import datetime, date +from torf import Torrent class HDB(): From 5478c8c1229b60a526b0ca5f1ce986b846c1dea8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 30 Aug 2024 10:53:04 +1000 Subject: [PATCH 112/741] Capitalization --- src/trackers/HDB.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index cd7bf52a4..253d83bb7 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -220,7 +220,7 @@ async def upload(self, meta): # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - torrent = Torrent.read(torrent_path) + torrent = torrent.read(torrent_path) # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes From dc4186d9efbf33e7f4e697933423abdfd677ac74 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 30 Aug 2024 11:01:02 +1000 Subject: [PATCH 113/741] Revert "Capitalization" This reverts commit 5478c8c1229b60a526b0ca5f1ce986b846c1dea8. --- src/trackers/HDB.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 253d83bb7..cd7bf52a4 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -220,7 +220,7 @@ async def upload(self, meta): # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - torrent = torrent.read(torrent_path) + torrent = Torrent.read(torrent_path) # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed if torrent.piece_size > 16777216: # 16 MiB in bytes From 63c05e8ea24d182e212a2b2acfa7d17ba2e5f367 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 30 Aug 2024 20:26:10 +1000 Subject: [PATCH 114/741] Add linter to master --- .flake8 | 2 ++ .github/workflows/.flake8 | 2 ++ .github/workflows/lint.yml | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 .flake8 create mode 100644 .github/workflows/.flake8 create mode 100644 .github/workflows/lint.yml diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..0cb611f43 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 220 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 new file mode 100644 index 000000000..0cb611f43 --- /dev/null +++ b/.github/workflows/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 220 \ No newline at end of file diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..5105bd60c --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,32 @@ +name: Lint + +on: + push: + branches: + - master + - develop + pull_request: + branches: + - master + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 + + - name: Run linter + run: flake8 . \ No newline at end of file From ccdca89163f082d2d7238fff8f0c2c45bede8648 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 11:41:41 +1000 Subject: [PATCH 115/741] Wrong meta for HDB torrent creation --- src/trackers/HDB.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index cd7bf52a4..f1682b71f 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -230,7 +230,7 @@ async def upload(self, meta): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if meta['filelist'] == 1: + if meta['is_disc'] == 1: include = [] exclude = [] else: From 04d46058e4fc3e65629a89497685b5566caab5d2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 11:53:19 +1000 Subject: [PATCH 116/741] Remove linter on master --- .github/workflows/lint.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5105bd60c..5527ec6b5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -3,7 +3,6 @@ name: Lint on: push: branches: - - master - develop pull_request: branches: From 009f3eecef1dc06ac62c3a45149364c4c1c6142d Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 15:36:24 +1000 Subject: [PATCH 117/741] Update bdinfocli --- bin/BDInfo/BDInfo.exe | Bin 676864 -> 685568 bytes bin/BDInfo/System.Resources.Extensions.dll | Bin 0 -> 137376 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 bin/BDInfo/System.Resources.Extensions.dll diff --git a/bin/BDInfo/BDInfo.exe b/bin/BDInfo/BDInfo.exe index e2462867eb63eba301d627c835e8b0e11c7eb69d..82e0a6f8615daa2a9f2e64672173e730673caf2a 100644 GIT binary patch delta 155954 zcmbrn31D1R-Tr@bXL4uBB+bnvGubD!O*+j?(k5xkG~EJiS<7A^X$vi-EDfcDlY+u@ zatoA2Y%6t&VnsGlQ4vvjT@VGdA|eXZiiou$Rz*cctcZ%`|2*Gw@5!XCzP`VIA>Vt> z=j{8r=bU?H)BRI7y*BmMn@^hk>D?C_?!T9fiAF`)bi)V-3}e_Qw0|mN0bU9(B7x$U%UVNlX&_u$b#z>xtXYwqFdka;_ z81u>#iuw3~bdzH{BQ?H* zb@a|m*94R20h-zKAwN!5x$o$xznBfiP7Y>Q0{XI35rO0?t+f3kNVs)PsM9E`M&yjM zjQFr|bUVUqHSBz26gr<~zsQ81>va(=tYIxP8)iBIn2vHYKHCT0hb$&A-E-Gt$ z$u(kFd?380Y-iD~Xf3;_IK3hsKSdSu3{}jt@aUY6A}gL>5oeqCPt zc}2`w52ApXr^9R(pmU;Gn?z|@$Q_@7K;@q~as|u^G1xM5H^GaMcTrzYeKH57Y36A- zGD3{<^_#vz9Upnhu6A>DfGO;XcZ@E8;-Gbr88)nu4Y1z6X`nb7Io}DP7Wuj=a_`Xl zX)w6PK*FwsBD}A>qc*$`aZ)8Z!3Z}P<$Wj;B-Nk13RX;i61i`6BDRe8Ts+bIY%}?8 zD16!X&=>f2HJy6G**r8o9f9~@ML@A_YkPmjeDo(b!l=aaCV0LL zo>#El-4m&8*|v7104wyhL;31+ef4EWi(jvt7IR=+sfIyReKOU=UKL-uS9=X)@<5=;7@(@4j;m6t|`N6E{>@t|qVmrCMcghSPAW8GYA#q4L+1WsgGu=?Y&Uy9&zU1o)ndR9Fi?3HtFRLyd zWp_F6V;EL;3q3K>$gJ#ED05qpyIB-@IQxExGs<23q4C(I_h?FQ(#N~_Q{(aUD$nyV z`|nTE_bSi(F&7eQyc5+rXnYwlw~K$ZQ?<;?^dJhB?IY^;|L$8o%^vGL%FPrIltVd1LW|wXL-rX_5#kTsv|a0=xzW zR~DbDofJ6RZ^Z}Elf6+pb4oN&6F3;daAszA&^{fFppe7Yz)slmXJ^E(ulB zbjx@EJ$D_eNb)uqg)^T(+^p;ep{|HU0@+#6^?v{?a3#ma;@x!(wYk8RXaZ(sC|zgz zLM%`#RD8a!-T4q)Wsx!ARGlyTVLW|_wnWQ_#wIEn`Wp*0AvDpK=waVrS=lJ8_n-F8 zJMZiZTWRQ}(v{TPs6X>CqJhj}-Hn#p<=I7W7`1F`_D5h~Ticx3MJQs?fUhQydm@0g zflkAM#BwD7`N)qhA&{!(|ZQ>ln zY4H)0hgh>!^$#$8RW<(7hSgctjLb9w##bI=XG`@9#&E3W3`|})%#|BI!LyM~pvxM)3-&ur$ z@f6=`I=k%R;;GHuktRmni}B6Pt$h+ksw&V`5wAd7J1orJr`-UNdefMO)HfG@(tHUD z{+N~@&p#S^)ErDA(0%)^L^U+iRLOW8j#Z@UFxKZPLuf!!KxVvC4f$v@zls%q-_rQS z4XsPevex1gZ38jsR8pt9J&Ni$BEe$(sIBFvBA`U^Q%AKeiDK0GEHcKI{X9~f_Qxw^ zAbJp}Yrh(ZkVric1#~$!8*OvdNW^kniTr9S)=fOU?8f576Q@GnI&lQkME|4(P%oNv zdf8FM+a_h_OhoENhGqQr6&7OTHuGstugqFZ&Z)x4eHlmYFS10@>x3%;G_>*yiwh=C ziO@-Q9xy>}a&Ky)X&953Wo(&hnm@7x=HcAQVdIM2uh2Gp$u9w5ba{62^0J{~s(l(F zw!D3sy%X4E?_gz7^R4WMiyvukyznq8w!R7r?W2nkyi5V{Zi6Y{ddBoCFmK;PlPoNS zvuK^+%G6>^n7;;1o_7%R4}sm0eP$J)?Lz7CEDjXAHf;N}}pcYOgF?<=0svAzra zH|k_AfVAApF?_5YomzJ0-KwMF90g@dxG?6~+~vxuVE>M1Ef66C!^k z^7kUy3rSdh<1KjBfiiK-{6%GkKV8=8OGL8GWro%E&IH5ghCVLZu&-X89lmTMOgmg{ zg|AU?#KR4ut@O0YaxhUjeS$eU9hD}yqp%49r2=M56rtAX&E{wyh9G}vd*LP+nWMeb z>?mAIdsVPU1s*gEwe&W@N!)DDlg5XXVZ3lXEz?sf%uzH~zqM29RDgwMC2vIr7jC3s zys$<+m}0zn_>GkY!soww_x&~Z7i84E?eSb$1kX+MEs;>Y&T}oOKFM#xw@pk>t4r%N zW$atGKE&GaT$84Kpqy9vjUI%9nUWo2FEQDSCnf}j62-cysqS+zBf-oJ^H8rW>a#Iq zHir5tE4DpR(HF4d+Y|3bshIg|i|eP>J2YGVLQ;D~S5> zz-*ARacfG{@_Y`cT96L-T2%;A8xGgDxM6h7fJ=$Wzp;3TH!WF5Y`bv`HxJy5F2)Ts z=u1?UcsBcT8mT($KI%IKD!1sS-u@^IPKBuL{)c{FY6@~>HJ!$ZdyCMtm<cfmo-ct?CZ;aFwvu)GY&~2&i^eD4!(F2~Q)@(Bhp2q8F*&ocDWc%Z5 zqES1Vh|W0Q9L0p#Z%22uy$au#4Hs53?Bh+0(fn6E5V~sJGBeNadfhTR)MlWNmtD1P zxw0s=%kih{(G}C7Tid7{6rW7nQk`+0^|S3Bc-eiNsWh0CqjuTh=I%sb?s5ACi9^xsdbZSNqhE1T9t19qVJ#I&i-1dL&4q^kY7s3-#dT$~Cu%FI3;ZaS^D zDR^`Tt+q?>>vH3lG!1su<-hUMJQX4;wVD7>n1}!5K26jVcxQW4;wUEbl z_t^m_aY}cFsC!;2i|6{8Rgi1C`>F0;kXnxCHQmSBfi?`$M)#uBNx3G(uQKyxXmj&K ziK=bn&K2c7R9-F04^mkaCDv1n+!sZ;iOQc)nR-v|orvK;afOwrUfH;2ceFfpksHn3 ze1*l3T7_}0FO*AE8dpU9sg7Jb6fg)c-*P*KaJ=PGod_=*61^-fM6oV857w|oEBe9) z*8g$S3L8TOU%H2V+1?2#wgrs`-+2e+i^}L@MKn?}&M!j+;1aek`6MP38}Ke>M$C=N zil6UkYB|)#Q-|5WBph>v<>UQ0OZl<9cvNpw%b_;Jk8w7-#E%u@{aC8}SW(;#KMu7a zew5gV7WeiBySnMm$agu(IvbvuBTrL#jwruJ<+-AKhRVUB-Ph_Mu~@imh{u!95+YW9 z3|V}PJV$^a(5cW?*8~$Qlh31u74X{SPhn_F!irTG7_$;I^E69WD<|ownWrgM8&5$= zjSR!bYn0J!%-apqbi_OhU&Dn3^vBG-4>pyvSn`MT{}{z@JCq-*IQ9s}+h3^omxa?) z*Plg-KUQkrVF1S|gYSz0HLiojivPIO0qq|Tp24dN;xEa~?q>9{f-n|+=0Yq9ZWS_$ ztLT^T+2oFyQ}Hh`=vNL@{OPOf(HzB9{7dZP?T!Jf=u_j`pR2fveu@40QqJ~&#FQ%`7H7BG%;lSa($k&NHARyR3V-s#;i{1l$po#RDbR?*o$ZPB1m@OX>{P* z(i1Qu$986a2AKQ-a!3_NdYyXa)_PvEGu_rz}m^O-& z1vPRUy2R|yf$T8)Nk}_{b&)5{JljC}GWE>epnZ?nkul$kr8pXjMZ>8cJ7kAc9;&EY4F!Su?>cP!gdNTPivHA@c3skKV0CCa97 z8rYGH^6c{18u)Z15xRw;t*{?s1LIRQuwNs3(!c~s0~1uzz}T75OFm0{R1J)g$o_^| z!d8}i6{_TKA?V&Is8+^akFAVPw~_r0R-!RGhDH{XMixUO`#l}2M#c^?eX;5oRU>1p zqH#Meb%Vyace8GII9ktUkhbgD3>wTlo1SAgs3KEsuVG~IVXdwnMfHrCXV;y+NIg}n z6Mq}+#R_-S# z%iPadpsxdD|4a)qR8{0ECKy*($u~f-waOh$E0FyQbcGXKGw0+xu$ViWF~;mK91Xd79?Ig4GmIvJx!oWghq7e=h4QlEFXpw(Wu43V z0Jcs+H;R!(z}xU45X<_(1GNXOAER3vDTgK*#GHF102SV3&Mx-PZ=FW_ZhFpC(T)T$ zTvfo1nO^}z#+pyN!j5F$Q@m+@uY=}k*p+tG&gca3EQ3bR$z)uh^MYLa*x2mu2ODizTX1|u8x)Vl#wv0TDKcf0}1;AwMT z`Mz2^dW~I$9vj1)UAb-Tf+^TxIXo>Hj01+3oLbwd&7Oro%>3!_u>UD$Ms2=@Da4W^ z`gu=>^&s?bnfK9gELlq(`mQYAldOjBu&Zju%3-`fpX|DL&uTNzE;YT(j>{TBqNy(g z@8H(oRqL_gw!V4~PO#(gkzQn8VIAzGCV6(6x+^?oTVJIekBwPET){*an9dP4)*VEfZ&6z@8w;bf^#W~fuP&jRRjUn!6EOti31 zor(p5JMr@Bv;F1qA$dnon{2eTzqZ6b)H(65;@Ae~zxMBkXlsZ0=QA!w{AR$v*f4gk zIIUnzl88xVh>J05YN(n&gX#)l&$0=8Ff1u>7`ffPEZPxz8#t}A{YXW*W!f^2$BKy9 zv;9m2H(mIO%|KDMDp1UAN(03Lp&KY)TqLU{P}E~DP&dp5kP8kA^hZeNVSzT`#KbiS z6cYklZS$)4n6A@eNN=oCR?kN6Mogmq5_P#R`<;X`_3Y;j2au%{`(~1jT75zB6l@9up68n(jcv$f?_8?8HkBOyp zsr@?aC(skgcV$_WJ#gWa=u2$%n01{o$!x!Y+F~4ybgKofu+NIMX0b9QM*G^AQikzp1u|3=WF!tJ>N+u2-E-ZZAeG z^sWpyZaCX&31C%@_0qyouC}sG2c15DJe;z>Yh_v24#Zc+(eJCB7E@AU`**BYE5@>r9Idb`;=|~~E9!@b z;=|6Yj#QsNHrx}#k;B#W&0YL;mv_yWT957=zW~>Hh&3t8#cth5RJ#kbwqA;5RwvqA zT(9x@#%jag;^HcenL1Q{cv@~;<2MXUWxe__$z8X;7slNfdbtOau5wq$xgd=_R#k(7 zUJWX~bbPbpRh8q*bK6l-&rmM9V&=$uP-yLK*TFy8=_%R&k$u9hT3;5UBnD*$g+p@5 zz359i%X1&0H{RBpsu1orZKW^NR!Ug%5plwzc@(6FSN!7GFQ1?R0J$r;o$lk5oam z57)S?5%UjsKM4L{LrD2k9gEtjD5pd|TQ!h7xtYWHls%#MH@1K4W5rb`*43yjxjR!6 zY|E~Z{iothC$>5a0?9x`z0JS?wy7mxog6nJO5e+UKsR0WcKz1JnV5!Hv@umcFmi-4YDS? zNphsgXSEJF4>$hTZe&BVr3iUp{Ayd z%_!Q&zIpMxC$-~5)1OXS;&7L1ZL}rTZa3>LEsFQCw$p=#*%v~~x&dy`v$Y^;Eq067 zvpr|#Y0zbtZjCKiE;C7~_jo7aGtWJ&#y-rIuZ5y%to^IqAm>I@xV{O8G+?&*#a=6R zo4#sIiGRE0r1}J`a;!!;vEhTJ+5%dw8+<}HVP>over_txTXOvTd(c`sbDtCWtjJ%9 z{FlgRHBGxj&J(#<@3$ax~q5IG|9 zHjy6@`T63Fr!05ADryzV_e6boD8CT?2a!0LiAc{Dd9uj&i~OEQIR(NF>|Fta(@(bp z+k{yRhS@8Tfn62m$Tqw%6sR%t4j&sZ_L!S4gPl=|{3c}Rrgd6-Ewz_x?RC_y*V^l; z&1&stYOm1R8>oGk*4{{MPHS&69q3229$WqXO;>8|E!1A6weO{N1GMaFRqaII$FaJr z#eL%NrRcF;&1W2gcSY)AANJ+h9IzWLrzVsgv5WN#%(6->_iM}X;X%(n9jk%AVR&d- ze3aQaJC7zIDxBGzy&js<26NCKSUEc-ofREd`r>g;a4cVQoU<@%JCX6M+U2L#^sBrLYn{O9|~FCGY)S^EoY2TIG@Utl{>TGsvo+kw)u_7~U=l=j`!z7JYc^wQEs>4a$x$L3pDeQ|eQ z|LLHd#4MJb3_}jp`Agxn{}meC#a9aqdwCq$^T%eL;;OimFGt8&$QAfwS+%e{Cg*;P zejsY4Cg%R26b-q*DMfWI5EY|S2=m@g`j2%nFI z2P@YCmvTDQ4#j5#ay`)EWa&IS?BJFsFt8?3c;VbBw6@EOGrj=8)*W1*O^mrbl7baw zOJe5g%}F>=fE5HRBIQSz#-0mQqQfrW#PI`X#+;S#1Jm>B++|7;$h}(>c3A8ui|{@j zP5vEqC>!8{M#yZSQ0Wg&dExqKluvUtpuZdoi)x16PUzGfU+^ph-potXDe|$`(Z@R zZ3E0>Np?v|Hi!qgexM+pp|2U%5PSj-1ljP-G#DJR|oBw9d|}k(x7{|L+#9SD1FLh?q5G~j zF%)~*By8kaELbdvXR)YZRal*Yt|jK7c1B@G=E-hYk+E|}VZ^frf2o)pBMGOfvRp-*7#K0=Vcv;1-$2hFOm|R>xKXT(<4i{@&dI>G8aEvryMT4bV9y_LxMPqV z<#1?y&reiv}%S)u2ZA#x;)rH+_!Hv05=B{@BUG06%$BPd2L_r)raiRuuM zR|ZVj`munAr&Wo@4rJ5t6w4G|*+fxO+$^$#Ug5C%;oaZOBWx}<9$nUHha32YAQ=&5 z20CJm`6LP!6suf%qDyqic{Xr#Mp-TQ6i9XREJViTSfVb~;x#c5QO5MR%Wnr+Uu?I& zp#E9H-e5dG!g1ba^%Vn)SUuKQzdY8Nk{8zp~a~)khEi!@XT&8dQ?n zRGsKYFk!HG#xm^fxemi!?=QvBj)tt!HK=Bz7g1m#)IWMj@%3{P&ZShYr5J`7!DPtF zcC#*_Sx2V_3@p(Z^Gm9eo2syt7(h9@x{5%e8amXXv1m@K6iq;;C9Fg$gHh0KCP;(F z^m%t>Ny;TJ$CbUbz(Yzyw@j}v(3?n?p~CQjNYC`jV5|+h(ZHz$bO_lL>jh3T99pL1 zyh}HD9(s<0jB#o8@oJ5iHm~Ct?~i55q}3|vqn+X2^jMFL;tZlV*@KU4Wl3NMGwk&l z!`AxV{bFVXcH92lcKPm5aAXUuO=p31u$eL!yiw#kD7pouMJID(e99~@6dYOVxgCqc zHSYV2`>a$?y!$j(q?VW=<-mRS`L}i(DpGCQu734VZcVl3d85)=Qqf1o^Hf!5*;Ty@1PuG?Ea1N$F|mE#mPqexJ-t&Z*H<2!m$wXz)&~Dj*X22Yr)w8S4KIQ6y&&Clp}3WbIZzaN`oFtBb&^@* zx2+_0XDyt974@H}o?|9o=d;gD{+Z8|$?aGxHOWl=h0i84d4SI->RIwNszOrZ_>#gX zvkYe0SS3};p|$G$#m%AM;DUXQ91i5i0hm-61Bf#FAin$ILl1BC^i&lz!(N3I@f6ak zNU*nq&W;tM?SJ4=U&2bv?Q0Cg`(nYyzM4?lABy)?U@+~o;+r@-^tBFEmzln}BQI1K zQ_79as7c2l0zS7rE%lu|uBlf`Q}>-LZM{8=B?5ozGPg3uESb=@cU5-a`I(~3i2IDZv-C(_R0h(hy`jN+uP+aJDl3b3MJt^)y+GttnWQr?9$|>~`;&h} zrLeMiq5L;)obitEO}6Vub~MmO@Q^LOvGtXAVcW4J!^5bwzWkZ6k1rEczc^m&lYI8C zO1J(ed*~W4Wx}`K`BULlb_O!A_kQ)Y$`29vu?+m=x;|C*@@5+E^Z8=`m%8Dlu(rd> zzXyS&gI4}(JX66~s@%!15oETgL|9q8np^4fl_SYlDdpX6y-R_?$op;dn_^_wo4QH*r zugLqFto30+YWw@KtcXvLLzxTF-Hcv<{ug@)JM$+XIkJh6!7GcQd<@nvvnvLRqZhb# zPGsIz6cY_Yy%YZz8-u-=zT@*lrY>{@Z|dnWJ#vr-|BIzkzmQRfMXAyA%k}C%*eUg@ zSv+z|6DO#CsP`xpCuQR>3k}K_Y?Tdi-J^cNa~MPHaI6R0kMi~{na!7@S}erK;3-*% zlOFG<&GLSnNm1|8$&Vuc!?{U#cWvE@r2~A$sDftYSvLvI0r%a!I0OxKVN@`nsfi zw=5YZD_6PWVmNapjVpfJdQUMtln&%J!33))AH*ZamwkEHciZ7@SGl=y^S9j0@|+%K zker2JWv#n&8yfh@YK)iFZr$ExtvfDD+_?P0oe%7mHEwo5QR7o6KLgM#J{h7gFtZ#$e)awX4A5 z1?K)GQ*mwwrWZ=@mp}wAuhh!Lmt!WRobw>2_i8V%e^E2;%b$dbQe)JV;a~#a)jhk_ z-n`muLP*7*T-felkHR{`n`ZAv<{gI|=cYpDd1!F=L#)S}wBUI4s#s-U*tfoqi0?O* zXnN}d!##W_a>r*C8}(+6TU^{&2GtTrYV74y0bG`V#)}0EYu1l!e>k0#3VA*Zr8!d~5CY$(XD4ALo)XuoJ*3 z9X{pM5pt{d1z&8Iye2Xt7~WDTo_?GSE9sD#z?^{`pZF;hWD_CrN|s*yfGs*;EJBMu z!L46v{ib;F#kM1B$6hp;wM+^RYS_V5>71v>>rr~L=ed_l_-@Oqcy7L72IMZt?G9^I z1>8#WlDzb6q7hNa!;$!MN_)ue84Zl!}@xfh{f7UkKqBF9|?Ic)+pM#etA!3bN&%v^Fdu(JQ48vBu zz{!o(y7h4mu+JT3;`0FcioC!HjfG*=wU`Kvx7iBgaTe=6ixuN7PB_$JgJ&@^-eR?~ zIF)CJCG1U{U8w2)&rt(UX8Y5hpf_e?#l@GN?OcmRYwX5fk4NSnq!FK}mZEJ$4W%eG zdWe2KPW_ZPNu7U){#3JRocVlD&#(KZ;Z>pAY#C>v)ziD5|CihqaDDNFwNqNJLuB!K zo>0doQns0>mje`XQNVS@t!sN79$-r0>sjO7W$vf6{Cf6}rmOM3p2d*@IqHqm&PAULVGSZ@ZM)E8JC;*$du zRTIZxL&EU_uhI10O)NCe*G4yTle9TgeY?!Wks2!6pQ{}+M06CZ)NB(AyV1RoBJSZ$ zEb>OjyYS*(%xICJxrgOhpZkG48*+R3+{ChP^sN5QwQ9xbq(0Xu&xTx@&rPfvUSQ2S zuuY&C;!!V50}E`%DQFd(MHQ37wK#+{Z+J#<4O$jA$BI`E_cZ?dZTp393>iAt7&$08hd^JYWy)N0^46;;h6btSjxu@aPu$@^OM7E)R}pjW#@y*#4NDj zbZ@HgUGzhzBm3Mj4JY7uChf>)GA-{+Cd!hhdLPN=-f~a%VphRTMV@^df+>&n9_K&u zSg#Vf^14R`4>|K!LfK=Z zUh#1Vt^cnn&UqZ!izCzj-ebf6U)goWpJ+TJ9sfBJi5_*%6S>F3q3hMKJ-n>(!ppD1 zN2?{n_Bvd=${~H-ax4i>LV@95dSE(EUN|@xtL6#5#AzkRM%{CsT)V)DEOlHuxd0U| zTAuFWqFdZO0Gb>COk)KMt7ou_RWsY)`%B*Qfd+X4!u0wqRT1%x6f2~falwkC4hVe7 zOyXbwE)p$VkIoZE+)=CexQ-9s*!O6K&FovpY`fOP5l$W{+dqO&@;R)seXr|x8(W5q z3R@!5194}7s5{qGEu>^-e5{tbU$n zL27)Mx!G1}?D6%%^Mf>WyK3diH`x2M8&Ti?Az8=;H(j=$H{-^V9N(qpKZlf^edLrS zups^)l4b>%)q{~h>rjRF{@>Vp#FRQym{c#NZHJ|l2VxFQ=lA~~(s@`ixh#ljj>DM; zwl4X7*^w%jn^KE~XdPaN<5KFz6Dg%D*`R={K#8l6DtgydCGyA7TMlsM-S`;7jgP@Q zM@~Rh#kRvR7A2$#W9@4avU4>T%4~mPzz(8XWKV=GREc4(pJ7gb1rvPx#A=Ni@%;aq zQ~d7j-_>-xT-6A46U-_uT=B?j2XYD%q2>5HFLuzxq8uwjCGJuU3`K&t&CeIa&{B;} zMZZ+04s2Cb$hw%-yI1*m6n$LHX!5jWPnGF4(&R*%py{io#z6hXF$nZr1{&+B!wOx* z&BPAmci8gvzK+kjbeD$@WT_6QEMQ|s&Ujs=)Nk%rt5pE(w;B$o&+xPyo}p7p;<$;0 z6Zs)@By8j9SKTqw7_ijx%H{DP6DM|AeFmi~;7W{}JMHlBfAf!yCHd^%rt-LgI5y?P zYPpIGcAH`3?pFGG|Ew~N3pJgiZf94LB3AZPJStPint6#`o&jf118c~h4vE);LwNQK z;OtA71ylr-9|7e@=oT6LqJFGE;|BU|YqMDH3wN|`bsoa2C0Y(>IsV_aRt?cfeE*9B zx7nq56t*#I@|3wncwJG4EEl`v98LOMa;YXq<*tF+P2PUaa>SS9Oeob|J$5m1)nZ4* z4bvyvY8=E(B)F19#lsG?Zja(-Cb<(MgC#ZtNAobx3ThDqBQKv2B<>l=`#OIb(sUbq`uJxZOJuyQv(q=Ke_2GQz~ccFD~i*Q2^=4$wyY6N;uS6zS(+o{TI zLKa_mSQ+ey7!8v14z^gtC)E^_-dyqR!;z}Gaqny^#g0S$)<>jb{7 z;c`M=?qKHp^!X${K=uM;>! z!#5@aUM_HMz2imdm!yM27ip_|2p0-mqT#m*y9F-Q@JoaZ0#|7GNkYHCH5%Sd_@{2B zeqBATj#HWcUecF@ZqQb*A^f($jT&A?_>jQO8m=Jxh`=oxo)9}NDw+K9-;Vpz$3Vd52))K@0dD2xv{S97) z;7)tyWPuS4&m){AuvWuU36B!ktluv*||4L1`0GsVxmk*h|>zS#z=KM}Y>!`~3@61Yag7YRQnaGi$VCEO`+gNBa~-XL(JhMxg+GFJ%Q ztmz%Jx?JEE4Yv{=Cvb;`ql7&I@6a$y*eviq4ObBc1U{hQ8HCOoNv8f`O^+r0nZQRi z>?3?i;4Td(6Mj+PGaA+r-YIa8h8E#_1@6`GZwbH+j?n#@zDl}M;A*8R@GQclz+DMH0$|ij`R z|G+y%X0gDH8vdSey1>mE?j>vzxJAS76Iudy0J`(fZ;<{Ovl}yahqn4T!o33T)9`M> z?+AQA!|jA$5csf$*AaeH;G-Ij5Z)|s7onPeA|s51<;XMMIHGGV4i@?bWa{l=c>AQudwbgqFR|}k>;Rgs$5ja=F>j^Ug7iqYG zaFW0!8eT+LB@mTZG7t05b4dS=`GJ{Rp{*WIxL@EJ4Ksw#3S6h*RKiCDZqTri@Z$nE zYFI&dtH8}3bow!P8JWD$EgtRv1L4I2cWAhe@C<=>X!tDQT!Hs#_&DJdfe&c-dBPfj z4{PY$OZqPi&}QyYO^bxD3f!e3rYJ_{hXS9`@JhmO2;8IL#e|;{xL3n-33mwGui*&} zsU!3?O=lAh3p}7hW}~+j0vpO@Q)3s|CutO&6@t4 zR$s$dYUU+%;Ua-+G#n(HCUBjGClWRa+@N7Uppz*Vx>3^h;Wr7-6ZoixUnD$1 z;4TfZdSGPw1U{qTZG^1?_h@({VaO4>SJQWsqS7Iz`!&3j@aFbwt6&l_|i18DC zuF>#4gzpE${BxbAYe}yWs~a?2MtG6HjT)Xzc)GyN8se0Zk(ncKi-ui<9Rhb~*h**< zI=MSEoj{6d8e)2%h6dqp1U{hQpK&fKvs>W98oomKb%Bp+_(Q_`1@6*t7hzGL^Ngnd zLpmySkA|NhTq|&|h94qaDsaDsHxn)p_?m`S6Ltzbpy4oKoxr#8ZsXR7<)r*H3`?bL z9$Z5Bd-SKMq#Dj6d_iEXhTViu2yE6cL5Md_IG?OxHQ|Q@rrR7ZBz!!JrBb0YwADB2 z052CfSHoWt4hmeP;U2<;0+(p`ZNhGWOEvrwVS~UGZF>IsNm9SiHQMU!gnvRWhf1p9 zdkJ3>xIx2f2)`|GqlT9eJ|u9nhARj^B5(_!JO4a|^hTjOwAJ~9?-F>2hSLaF2)s|j zHp1ftKA@pZ*emd14NbxpfsYcZ`R8A1L4!hfX{-AQ|AM{;OQjnAh;X04JsLhq_+5c} zHT*ImmT2L8zlQe{-X-ugf$0Cj{U0X1Md$%-^%lY_1-`8ze^@!QN}&HJuj;HLJXv5w z!}ADd3B=luTgOi&JW61*f}DRYAdLu}tgTKb{2LlSmP$1|itrVIGc>Fr{JucU!`zU3 zgkKT3NW%kBz>f)B;zG(+bpj`Acsk*^0@E5ELwKyf85;Hyb_txT;iMYQKkJ1q(zKRVeFB$g zSWfsyRBEi0YWSuN_!EIEH2e+WE`e(_e39^T0@rEyT|g(ZQ|JawAEDJ71Y*J7&ArbM zULkO^hIbGy7q~^kt%S!3+@awpVUNH&G|Un@%|h?fbQNhp-~$@s(n2Hi2C5oXN;N!| z@Mi)a)v%B7DS^8*oJ{ydfzN1IM|h_rbdRPM>3aq4)$niCfExtv*YH(BTm;5Ss^L!v zPZD@Q!>0*n3Vd6`uMxIIW&Y`(aM1U{hQy9mz|_^^hn332l#V*jXyXAvd^ zI=eJoL>d+PjD|A_4J#WQ^HLG{gb^Cd4}*Zfe{VAPI!*MS`8m0JVs!%hIbLB1x}u<=byKd#)YP})oTe& zfipDB5&i)Q!%C@!7ZScGaFK>f3BM(9iH3^_9~8KBvYvm=BK@#X?4`R^4?os|rBZ=w zG^{7g3S6gQh;W&}4I2KV5)eO=frxI@@b`q%1!57_n}6;lZ4$agTgBb+SSl5`L&I+n z{u#kxrBuVu5$+XupN4l6en;Q~8g3{2g20Cf)%^21(vJ##R9hV(yjkEb4c8Ek2z*Av zvkA`^xJSd|2u~EaSHq(TX9(OcQ0AXW(uC00wABW}u)qTvh6(=)ft6AX{}};%S)jk& zE4$YSzb7!F;m-)aB(PRN&Od)ZdXLa%ZS^t2EdnQN_z>Z{1!Ae+ttIymt`;~$!w(Ri zB5=+XWP!dnGy*6{5Lz`VdM8vcRsVu3p}+(&qZz&kX2mT<1X`!sYO zC!Hen0Zs9<(pV}L_^^ie68;MUU8#onF>0)o3f!gPX2Kr|d`8173BMt5kA@dJq@NPH zSJQI|cL?0C;R%Gaj`3c@*@VLa4`|p)c(%Z|HEbdr5a{plN;y)&`DaRKMALtT0b>Gd zHT)xCnZRZZe@^%s1m5p8e2(zP0@E5kLHJFG+zhbJ+zBF=b)f(4%v~&+t3+-R`2msG zON1%*9wA>6`C5rQDC+POn$8e;rpVPIH;ep$NbJbM&KE>JCGrJNIxu)uG*wAj!rn0? z_L?C#h{R?#)DMV!ROHhlUljSeNOP*V6**nxfXItQ=0t82dArE_wRAdjUlPqrBKM2@ zhe#X_M!@(53dk8Ej}y66NhBz+(FS)r5H@F z$YVvW5_z}CFNoYD5=UiV^G%V#G}VnFJ4+ecOXR(xen{juL_RHYugF(LzA4h|rvKHBJZ6X-D3MD=pHTk@t%Hg2-JWpA-3l$X7)k5P48!xQD*gmdFlKD>fmkslO!zsQG0J}z>%$d^U_QKY{Q^N-G4JswE0$c)GnL@pJ1 zsmPqjjV1C{QSTIaugC{QJ}vT1k&zkn`B;(5M6MS(D)LTBC--U5>=o&sNe5|>b48vi za+SzyM1D}@eImag@@bJTi2Sw410o}{=v%$WPLVT29xL)xk zFNl0uj!MLr_(X_3Dc`Ig9F zhQYLoOo}{Ki-(i+%57^kILu70o z9b`nFByv#Xu*j=KUMKPcBJURYfXJ_i+$Hh_k^c}Go=-m;DV^M8(aaLLMC1yQIgz)P z$h$?oPo#f=cpSM$=kuyXNh+HCanaE2;-mIh}kK0AwC-Om&kBEFq{-hT=d@K>oI+6E_{EEmYMZPHVHIZ+M zv=-4-jmU{2r;8jAd8Wu=kynYlS>z6pcZ+;b3+!Y~!{_&ddr2z_i) zhK@fkrq^nJX^P*Wx(AQW-OA5&dx_ua`s6?HIY}B{OiE0}`DWTS^GsG}znT9io^6jC z_ylMf{kRiaoH{dyKg`QiWI-9zZTggjwFK#An5Z`VhsDvx!Z&FsH%=CwD{-^CME%1O^&YD0hLfup!5@i%t9hZs z){m)fdo|>r`ei`(=hfv0k<|}Vz%7Fkf+oPwJxlytQCp&ce8AW_dKV(o z$)C+VXd1>cRBwN6;bSnC-;cbMjq6l@9+kxbNnQj`(hJJv27+~ND?*DA3;1men7Z`u ze;rt^TF`LV^5;JV4|@WBt3ZWvKJ3rmPg?2e3)Gg&Z>#yFNmPasZ8H~y5))>&gzD!w zXp}~X-wvYm4A43Fb{%VaqOs?M<*B|Kgu6Mko%uAO+&n#VAe5Lq=Wcq@wh3}buCb27 z9Cg%~f+w=X_&xq`-I)9-cXN(|HiMdGIH9(CU_1x)9)8?QWehkZp9gkdkipFqhEG21 zEB9|?1}0H*R%Y5^_}jRHBvXGF_R}^B?+{N=zpc#g5SlaI^Xq6GZ4K7@&>whVA{AEV z(ZetrxRqg_Ki>8e#M_RDQ`d~Q8g>vMey?d$r9chOqU0`aC1Yv)5cOB2hku;t(@;ww-*%Vh+>_p5$hy`N_Xlf48{$fB)a6gUZe_gbj%{WoJ9 zP=KK^vr4DdGvkA)TZbQyTpXJr&PCw794tC?r` z*>y0DWWI&i;bP6pp{aD6B)ThqsN|=R@sk}jU=yizCs0R+?);a9%-JBzcP?RIcS znm@Z99^h7$nKglAmP&lTpGR-$PfxO`MDqOlnm@F8+Z`d*&q2ZwGAG>iO~f^DFm|$z zDq@B&V#EDCxE=^*=E2yB+N`sTRl#U1{_@!#C*d=U6^G}oPx2;QerV^y>kBW2$0hwu z1mE}LyKbE|QGe$9C@_>b^G2!&nw|UW&{pO)nprAW0=5&734^JABpP#vaQ~IC5YRc= z-H3~2@;i`qi!huU)Z7NmvFKOvVQ_o?4rr|WCm{z5RFiS)VM)0T7O2KYtm4D0iJoc* z1YMPXmQ!s9G%OII+Mgy}`0`K0#E z{w!(;HLQHR*1uf}(~B$%)($!Cc~n~+TdMH`;8IhMrVG?ie{itC2foD(9bO$mV#V$A z3mAN1PugAE^$5PN%MMA`;r^r`y;OEwS^)?0GlD~f3yaa+6CJ5FQ*d{(YN%tiMl~fj zY2Sk19+2eW+R7s&5A3kisdhN~TKF(B2hELNGmlj>2bhCc;cXVh5o%ijogWNTE*Adb zYS@Mg|8zAUqvjxlna7xg8=^L#0PsOGDwagEBu{bILxFDjWF6{sjANv6sNYt&AC;^c z58(y`HQ|`!PB?I1 zfJ``0PX2V%)i~72JR*{s=`v)Mnl(glg`0Xuz->8t!V$rZ7&7IMnFZ9GOLbKR8{%;* zHu7%7qyiW23I)r)hk!pP>w`6O5>g}o3K$5HVGesw_j(5DWAV*;Smk5(8Yp=Qsa2_N zRp_wP#RdHJoy@mzeg?Pm^0xE4pbX`oK&<;;hu@+6H=&Hp|031xqKOafmN~65;+?09 zF%9LfaYhBG+e1D$4Qdt$-LME&Yj!ZRt_DKTv0L}F#4+B2^>qIs)l z{94_SoGZVb%(F06K^{NwE!vD1n4C}>CS*q8TU4q^d8nbV3%bmmw3MeZHXm1+GG?^J zbc*eEF{74tlub#t4<1&cHlgP>%JG+3T8Ak=E@4G_TP}>R++BE*4n*H9`tN9cljxt) zdR)5Gy88LTcL6e0Dt%Oo+f|CIHXF*5t0A%>G>)+`JB7c&I28!hib4&RuA4l}@oT*@ zekwFF8#N-k5gb0T%D6T;wie!kN1;4rd;UI1{ESM0Pmg`vWqcyf{w`W)us}s-Ni8zc zO?d-y;%QnEedC4He~vBcF;%0n415VyF4SCj9ws68h4zPBkH}yoL>`dG=40N%5U8P-VmXxm zA+)W@0!mr@TQ6^$$x)U%jg;fA6GF;nf$yrWy6qiAS>B$LnB9hzg~Wv9CTQByWe8sx zUJHGRhwb@2@FR&Cc(_1SNSat|-c-yZkw0vpnHH+0(EGp1W|+qW*grdh@Ni{PXegBb z8T35}l26>Hg!`(2qe11D9h^=T#vqu*qLI{zq5O-`An`vX!_6wVGK@;Ql8J7|bxWb` zcfgq5#fP3R@Oemy+7Z~oMbUP!_w%^1WhjbVptb%mBN!_%hN1k+B7Xrn@CGskU1?S7 zcon)`h1*^{C2xH3)VLExn!KmSRVcU(WYL)jHT7q^$%m})}j&~=_hv7QciLltYqn3MiKT2ZbtQ^Yzw zi1id;ohn)vFGZ)j!6sBQc7X?0L1g{}p`wGkUf8WM&oYQ6<4(2II&^E2U_@1mU^i=o`Jvm3I4+O``MpB9=X-? z&#>H!>dEK7tDa&M%Kr*ax2wG#O#)FWL2Y_jC%YMjrAFBs0HX5SUfrhh z(7MgIux|5->|x?qf_#qs0A26@cy*iB`WZaZj`Sgp2SolAa^O{zP%K)Lc|XbnKYU)Z zJ&MbB@I?iyd`OH#qG6A;7?Co1@z;z$wx7(6a48kIehz-Lhvgt1W~mnLwkVcV#u*}R zmm25QdMRlkV@*mdchN4cZhwO=+o;J?8C!^DQO}c-szNiwmk(u8;u}I7--o{|@pqn4 zyl7vIvkT7omwAl-NHhv>GK|lS=~XSF?Ef%ntiYnmY#p<~8kWI;>kS1LnZg+XTfm>#QBu>MPtGz;ZP zMpkh;b`$Ds>QY5wJu49k%@|0jE4t0zi~%={m)G~gNqZiPG`Jm2ImGRP(YO#hE>XBc z@vdq&p*;aN^=`&+3Z|O;x8QSZ-Z^1J-t7TTqFUxivwsz$Qs!DOiPCu**886oHD^)L zdSx7Ku<>6W9l0O(ow1`HEKmdXARe<3Wz}&4JOt3&>MCMAwNQN?sH?(7=uM})oh&2Q z+w4vsdZKoNBUaPsCTa8^`&~pRSoj=l+4brs+ZwxGUe~)qP)J|HpgHwpgzaF~MtP*_ zT-Gj2xcKcqx!M)x4S9Ixpk#Q#RztfaLqY#gfoVd~F;Xtu?RC7c`iU`&NvP7b%SwoA z8T6C%A^$8Ss*}o*^aK>i|Hs~&$H`SydEiym)vvm%*Voml>Mf}vNr$4VJ4q)bbP`B9 z>EAao}{RtSW^3$Mdsisk_#;}UkI5m6x^u7jwE%E%&a1EPY1h^PS(6*s_T7-cm4 zzTb22eM@x*aAtnLzkc1HdhgtG&pr3t?c8(MM~vN`9c30V{fuehk{VlUOC?q`>=^rF2(Hq)syr`JYMmR#W-dSrKr2iP)v{#pj(LPW*|h#mn4u0 zyK&Wvlf3!D6ke=K`Nnnm$s5lb#rP;W&?P?IWjeIKo^8l(Ja4_C`6~<_N)e{P6clD8 z8hx6lCZKtPG>B=vr}%ts0vh3y+pMFKX2@vN^4=GfQ2GTkC0TqgGj&y zwt3$X#*koK-TIu4k_D>$$JMQ?C!ndUTW3!|Q(3o;oPg$^ty?Ue1=N2*X0y%={MMj? z6c(=m?I?@c8+cs2yQR=8lPPq?YYp+hZ4=O%=aWV~OycyfH-T(R*Oo1hhK& zAzH-=yfn)P5%iyI0Xk&R0=&1^1pbd&0Hcq#0594E`VKdmw6qWjWvj^c&wfiCDujJC zCakufW^d9KUTIB5_a9hWUUCjT&!_^x&Ord0G^Gw>+i(sB*Ds?<5pP#>iiA*hc1klh zA9u8?mkj*|caJ8@oW16?>N1Uut8>+@zIoK5?o4&|ko`fWCbaM!FHh!^GPo3)1H)kR zNTzFmimateEoM%(C##hgT(ZoGOnb80M#4GI-r*>Yvzww{@iAo4Sh)E<9LD$O%HN?A z`1$n;V(EzZi91o55QvMV3paD|uC1kuTTw3iZI^c0>o*&I zZ}poB8vc6lOL?*r0Jj4-r)2ei?i9K8H(kogExhScqg;=hF5y#U=y-4+Emlxi;%d>V zZ-1)_x7}i~Cn;CmTGgW);8G-(6t(4ChkQ57M%M%G!?vRuvKYAmW@6gG zQ9v-g&kJkkX3q;35cT#K?3MudegqrkDrjN^>!0b-b^&i9*dgE;L9PcUMs6lJSwIYQ z)1y-ayoJNGQPgixe2C}*LD_+WRiAH!w>_~1J7@@-IPsq*c3jvR>ESzn`8zGWLA;P=C0ln=lkAk0TijlebHJM4|# zOkARP48o7zr2OSorpM6e7*T4~dh`au<&Wa2cssX3jD&1XsBu0FxJFjD6Xi}w+>gup z#W_ls0$KA}rEAKJg28XBdL=(_GU7%$kwO+myqSxlABsJ-pc*~y_R`Rm0)ZP-8jI7tf)mk-@ z=o|n3>f{@TUTw3-g_Q_z2Jga+Uw*aifc3s4kbK`vo%uKZ?bQkqCsjMpnAVL?yfWMS zfy#YfO73Jl4xZsH8j2*ekr9E81x+N-@#6xKBos(B(-k^55@oiFZm&Px){OjsNCT>MaI*hSVKoik;23R%K*o<8W~gB3%)=nQ zD=gduEnNfe_pckciQ~-bOnu;s`O;)3;$fPxj)M=`X~&G#Op|*rQern2m!UW^%~f`@ z1G02)nh`zE{3MDxXN4&{mdhzph`t8v?Y zZXKq!s;{46SL3q-EEk<$nT(yu&Pv-E>{NZjTSuqaCiKg}wVBos3%BPqKX>1)K3*(mw0^RHXwADCzIcHW+n z8(#7=eu^D{?rhQtI;5)!s=BUCtXW)EMJ~66;C2mr^ymnAbcW!r5Ii}6!5OPN*q}J0 zv_i#q5}VWh_Wr%trcV3J-d`E|7`Uf3yN~+$&MTM9#|~jbh4v3>u@dbW z5Oan-!)}}Nscfbg;eCL8fY1AYx5Zmg*#{`rMX)Z~MZ2A>dqS)|D^s#dKJU`o;;pFc zlH%P1-aU4Y-9g^HA>LM|&+hYC_f5#!tIQKcX7?%Hz2Mzz_u8H0J+rhrl-;xJSw8Ps z6Y{QPt*Go-inZt(+nb3PX4*6Dt~qyRGqWQx^xOS@4E=8_21RA}>lnIFOYGT*V75Km zo;>GH3}74Zs~;Y2!}l+$?Yi8MC*R(vS+e1E)M$LH#i&23@=Q9LnKOWyQ!c*Yd*;{Y z22qd1fhA2@z^LXDeP%BGv7C7xb~6#|B-p%hh;SF+DGHnna0q&6u5$;q^nuin9vJ?O z%5^uR~8N3qP7+;f6Y(G!r+rY%j@K62WoTL-3(^NAZ*zCOiU z4S{@%RT->pv3Gt*ZR+FqrM`kO^k&H3nyyMFmvaMWZs;Vt=K8wi^7<$S-}J&N|FCRU zLm*)>fcJ^`mEMu4l1GR=vA6nt84URQnMhy;|7#J+Z-E*7Jswrbyi zS^`+X#rM+zq3z+DKy_+%z_a9Z0Cv@mpdx5ToqQ2UJJRZdKpzG_W+{!jZhma2UKlo1 zwI{U!rluJz**c3a=V19UsT~c$w|z9GQGK81m@3Z%Z&I@pmTbhgo5Y`D_ZCa|Fqw*X zh96n8p2OHNkF)E|LXDl{cJp=Y(KVja}0))8xC)HN;vdP+X3G2QzCoFl+ z5Aic8IHgvGseQB25KDQL6g%nMjTmZ03)VWH1&nVB^232J={h-UFy}{xRt3jucV!49 z1#>9rg(Yf0*)KoW-!E@rCvRB(VGzpUGi-Z(mf29P<`3~9N)Df}y&qLbZLeH-KUKDE zISwinCm=%vc1rr4M4592ebcm&5jwiohYwukM$yDLv4CY{>}^-`lPE*Z57{ZH+0H$X zr*twrWi=F@QgZm42k&{c)qeE-exp2Dc}RIYZ&j^aWKCJNMsQ+$w?nGj(5mT5{( z)hRT_PHB~8_?Dqhjm30M&*6nU5oskCYJyw{G4f}5Jsd$8I33#B6ce_oN-5L^#k62C z{G(`!z*QaDORu-o58|`pF~A#{NojfYH`@3}mVl!Q4cBJ)kMdz-vpe$N>RoI$_0O}@ zS{=!&yo02CRNyZlo{s@^r7`ef*)Et*xK_dF27wn{1fxXl^5;H7`TDP2Cof1>=V_FD zEgA4T8CjjY=&doMg07cu1;ZEfr%CNgP<^zlzKp$>49l1HB{(YR`tp^^m#t1UJr9SdSwubxgP-vda!r;ENrKPFZO0YRm?gBYFi7&yK&DfJSeuAl$z5&uj8SVo%0Pc%z<~V_)UUs zns=MxohyC|Em$$n?ai+7nfENkUKkpIr{nii{1&1?$|NXY;y1zl2fSCn5yo54m*)d_ z5vF^>_#eW(1N6(RDE*_71gQhN$O95 z(4VBnsSvE{BvW|98cOO@SR8c7(I_>E-CI`gip8}0Qa3tLlwZ0V!J)2_e50WS$G+tl z6NuQO^!-3}vxoW_s<}_r+-nfBx0w#~;IalI2T&lZbkxgLhlFF=K}W4h&N&b48z4=U z9H@H>X{w6bjWku^19Uy{B8zc#Y|gunQMsFl)yE!q)WE-p&6%X;ym7d_5wh|SW3d{| zx3^i1!-^?5wjIc?QkOY|>8&kRCB}xWc>h6Yo5`v&pZ$qRJqk)_cMjz1#GlXpL=OIh zfF1jq70<-CjIFGRPxCGp8a@>dLh94+43m(4t9UAEd|Dmr6}Zkykve4wjdKRP-S>Cu ztH63G%Qzf(2~PQ_nN}DI2b?MK^I7~haxfzGEJ>A-`nFVj!22IW^)Hy%?8S+vedLBt zIkUG?7F1ZSabb5`Yzr%V)!eaDYT}c<^_2GiK2P4Rp!nqZJ|s<{Y4R_Dqu0w4IC{N* z;s~gN*xmTugCDkq#lDK)gZROF(Ia>dJS7f$5y##M4A3wyM+UM=J0QpcruSm=HYRWe z`!YaYA~ZZZ5{NnYq3eqc;x`{ZF4-K0-wAKycN}PVY6;WNvC?sg!tHp^oSu*)+B2sl zWQ;a5U7E|Iy=^`1JsmxrV?VErcQxhfd-4q$neEv<)|!s*SH!@(W9X!wRU8ma*{H^F zLqN6x3C0|z*W+D1c5XV}?!h?2%BH%u0Lla_QC!wO00d`=eiMJam-Pp-BQ~87rzOv< z^Y5s!R?k&MXt}hzixaOrtO^63b#2$fSlHtI6lvaLK@C;_hWA)tc?)F88Tia{)hHo+ zCVCFSjj~wPP=`KFU#OIrrRvZb7U~ejW^iAIr&`@V4bUe1zK9?Cjo2uD z*jXBT89&%LZ~0@bZ@>=*4KCouNDbU?N7zMI;bE!6{@)eww|LH+Ht7_bdr;lkbD(`7U_%TJD45^Y`GX z8EWsD$1tY#d}cU+Y(Pks^)rI(Ty&Sb3ybcg=i%`5!e#vd`i9DS1KiN1 zv0eC$;CC*5)GpMqw0_>|_ZVoNz)z~(j{vBmp1EF^^NolfI9^-JRUA;qxLOGA3GfHCEdqRec;sRD&j&eQsO zG}|c%7WP>y+>70iR7+?m2YgD+bKBZok3dDoL&S;l2qJ5ni!`1@c~ly4M33n9xk&6r zADxh?uHF{dSe))sNxzPnqFPLT`rN)mnKNFzX^ih3r=z0Q31O=Io&<9DKE$7{w`x>G zD*K$rnaj;q)^cFUrrU>|jbv(}ltckj`E5fnsl|YV#o?X>A=oC=ih!zbvIYP=29L>6 zJ^-^NNZka?{2$$Zu<~GLOTB;x^IIw%V5VEGET~k<%pUMe&gcbW-ykKe?}#q6?gkbbNpxM{T%9U78mT(l4^iyuQn{JwRQt@< zRM*TYrK07!ei4*9EO)0DI8i=V_^yP^&vnc_R-2fYLr9k3dqWh|Y=tY}F|$Kcaqev< zRtdz1RvUxd;0Hk|u&Mk9n#y@haB(M$JPRd$88~J8_ty!$1pySUz}7deQYzh<-P%8t z-HHo-B(irhGE6dYZm$=YUhpj{woWs>SC~@YriS(AlEG^S*Lrfl+_}3RqKzNa`-Dtt zVu?hDeo8`LfzXvLBq8&hy@v2*A-r-zLMhd`$EmEXuYoaMjT($u1+}5IS-+>?&62WK z_-zO!RR~h(ZfwHg0|kg2LG)6c*&OGiCE;b29U znPWS|IjLS_d}Lah?K(+nM6gXo~$gVF<<9U||cG zY=t*q@|FkyUqkpBu1UecI7a!9Y_(zuG?OeMk-Op0NXR@p8%06RI5ez!hDT65%1>(p z`Uctn;TU;#HrfPZ&My8}W`;uc&sSyaxz7q0Qfe*)bF))YanQdcR#x;c!mIxb72d>= z6739f3*>GK9)SAZ!U;W^ zto-1*u1(qgWUir<$Tpa2s?uqeFSk}XO%Qs?r?7uYjr5O4;`yg{l;40tOBBDyt>K9x zMuwFL~9R`sL(>Eu&rt5DR@FvlTrC(P9-}(BnM0!hEPuf#h#iQ zNFImc$Lu#YVPp}`k47TS_8%NVsW>DCRI1xVMA&6Y#%w>L5ZsS|$~w>BY54t^M3^f< zC<-C>$obMZHT12;qke$L+%Zj^hNGd@IR*K)paZBx`5s306QFx9$z`zOj4Gw}5@TNw zuAr+^x0<2lv0!9^J<|ZIy+Pp{aS2GHGXQpd*o~InLyu-K6RW);Jdq7f;=wEE>cSp& zdI3r;Hu6&hqe`!p?bgoEn10{22acBNT5jW2dyb!I*|c=c*5Y%(tB^R8;@*ski3O|Y zfs|?s7&XL)UodcSJ*usPCTBaEba%>V-5%y@hcctB&bAAOk(7(nD8v~@yN)3Gph*<+6Ov?EU zP%6MUw1nT|jUc)VSoghW&slcU1?`L!`+9p*Sng{9{->cNIE*3NKbR;}1LPc}5sHIw z8>I&RGxOjRH#J`>`>r5TE05V90BC8iC^<{a(VOjQ1wA?@2`B zkCU~N@y1vTHX$qpp4Dl=p)O{EC9R4FMWsupZpeVgg`ll+{3fhgS6VKNFw0k-6uvoF zAvHuFvXIJZuJfBw)9w5YX>%Vz3sA^FZ;LT4L0hU)vJGV#B_7v;(DqadstQ_=hZU!w z2~`y*YC<@Tf$?0jvI$kI4HR=yWt_Fb0b8`7Y6XYut-%E~wX`BtzQu&_5QL7(AjE}& zVE1yU^6M~xzt#{bIr9KXvbLq*Fk7@BE5t-mc%g=hCWI>3ovqp3jk_aucemNqySv+M zYqxGvDOHx(d7@gAUe%kYE*^}m>giHfm9|B>!j-WnwzNy?numqVSPVT?3_Sfx$eEf$ z;l4KI#@RtEPui&xP)Oo?Y3Ji7{wP25tG-lX=am)E5u)s)r`{kZV zc2c!B54WMB!Bv|ECrvsUm!N{U&@%;?iQ-D_E3S&DKE?jVi^e6mATIPu0cN7OYTi~{ zuocwG=s~gkYSp3>vE8x!*J6_}v|%|k#PhvzY3frFN>8x8)AM2}0!i_c%x@YM?1haA zI6-GesOwHCb>Mskq_eOuf*q?urDRi@Y&M7HVA61dIK4%6BzOwmN)%!GK%YsJe*%jc zG&ZWZ+cWl#u<|)kN7D!z?2$8jRTV>=ml|vDj_2DL$wYCCt>}(-VPH?wN%A<7I)w_- zl!ic*Q>9583zqX|M6DAtQ5;l1{E2WkR+o~RVDAai(deh{r^d+1>r#nh)d;xLaMS*IY8Y_Zb4_J=4g^JY^3-n z1l4)$_m#QfS?5D?&(BjJeBpf<1|bYi*UYc#=>VX2I8=fC`{ z89552r#;0Qw#00+oWBYQ&2!$$h3fh@Ah77zR(y><%E^h_BM=e)bXE52SGK=BQT~|J z0*}=oQT}YGN{ElD5z52V4%G!e28I(=+H61eBYo26!|O%qGK{2#`?-%I?Z+|OPx(Ut zZLv?)#T3+gVf%>)Te=imaG9?|gc3Jx91~QaS(w-OxX-3XIkJwVUyo$Yl*~f6=e#bN zay`h|(O7dqE)E!9R&FlRNtZHvN+n|u(FiKxl4$xV79krcS7{?*hMmf0vemP*ibJIL zBUCS%3*XWeV3GL0$1Xo0KFRw#q#5N1Eiv*Bf^4xG920Oau{R6&2Eh*o1T5IZh!ET& zAXII7l>J#^1gF5$qqhbihJpqQ^V@(QMHK}s;iwUHxZ4A8cL3fIfOiTg0<)?mMntr` z0xU~ICxa!bLDm@^n<&naxJNNREohM#N*xpe{%ZiIKb$JKog^OGjh!|T zxAJ0sX)0z?I27Do(g_6<0u_vDQNfA|!7|D>Kp`?dan$DXq;H2a$~Ff#M50V zmf+G_ED6xD3Lm=BauH$VA`G$ciipKh&o}Ap5=VW0|8V9I=N1Dg=*~K}$WB-lev7!J zUSwGR!qrFlN#SFNp}h)Fx3L16O)Q+tWLSs;>9qQk`AIr?!d)?AD|HmJ5LT*MCLvq# zLuj_95OJq7$rm=84JVah+!aAm=~i-5q_CXyiO7|8vOZJKy_7su)u5#Lw9a2TjD9Cv z=w!LSh&mY-gFfzUff&M~qB;sLr!z(Z-0#Qf>E21zM|z^%p=2ZF&b;!=osO=2CM*O| z?qh)vbem!)94>aT_El-n%Tvu?mDFvd4g3nzS?u3oibL&}q4Jw`nTx};W?@b1y5njN z3c9f+l?oWc9enym0Bj_s^2M+xQT`|a$HV?{)G`J&WlcEewE6g~(;mQ)IzY;-gLx>; z!TnQ^2lSH!-; zq81f=cYceE98|j9g@GsYA?NHTgoM5PGEK>hagPot={A&v^BXlGGtNCLAf($663&lm zLKbJvnJRHf(MHRLo!oAZg~L7$w3;-hU< zFuI%wyH`iHswSZ1p-B+n~N$CCd|D%$RN5+1`*D$ z(iwzwd&zL=Fe3~l;rwx$k{J^&9J&o5;rvCKkQo;)9J&o5VZU&w=%8yn)t}O3lnvnm z4bgGRhCc4M{Jaxewz88UO^41mp?{Y2-K2iNj9ngl+>L7%r zIWrzh%Vs=Q>|&)*$idVi!n%fd+IS$!eIZC1-AbKv$QG^`^vSpC6bdJ;&V@zuNq%Z6 zewS9}giZbu&YzOsDH_zoPoz7CftB$hL=}3RxXC(Vp+7nrnaNuEA!}hHMYK3G&dB3i z`Bo*HnXTN zTFj!pXfGS}rC2JJrC_D92xEw(`W8AWI#uPbIv2w3T|v_6Ryu1#xM~vJsZ+0lE1gBy z=q$oWIgAoXxR-X6(qd}DN{eozI0*Yni>VFkEV>OLVP9u4H^MrLZbL{|be7m(%&xG~ zqT5gs_LUX~ANg8cW$4H`un|y&72F+yXZUadIyaVY4jU{acfZH{^i*>rqVNG$IXj%8 zMCXlpA%T)7`|X=a;(jSH^y^NO&r?`wrr_miNevedVX(=$Rbw0Fzui;BBN1SP9wyq76qpf9C}13$T))F&`BZO!ISZ!$)ePEb%UvG#YO1exEBjE(WzV1 zHucrT6WQk_msdG+!3q23tFUjLGo7m;H2)G@*gtyv=;x4Hfwzx-fgl%;5kHy;rqI4f z%w7w|fHiU;_&D?M@X=_P*2sLK4;mX@7@y{0SqCfa1DAn)(iwn<<1B@% z$I4+@U->(S0V*qq{_Y7I2K-y+^uq-@!g#9t#VsV z(IFQ;DDFeWJ@);B<6TF)-zdN8Fn|(+; z=Y>6Q3p~NObf)kr!fNm%`*M`xaG}T+(TV#c-;0v$CnUKD$*Td0hlor{nuo$E={d5> z{UsK&-439gVU!H!FS+1(H>PDTp{jAGz(+M&Rdb~hO7X4%xm#pFsf{6TDPr+HlHOD3 zKe84N^{f-Pe&keO8b(eX+q5`-iq{4q^@fFk8Z6ZYdlnky#C`SPpU_C`#`0~b4hw!2 zz;ZHg!4E4CNe9FF^nwS+p?3hCY074r2iWLv&r`E=8ss>kyZm=B2LJctHH`O7cn@Xp zF}HsJ$k^rA@d{tEhL1K_wmpRFHSrY+=XCJERV^*J&ZHJcT2CLlW=XuO2NzJ;wFf5c z4DMFX*y;I%()xQWb#`V5=ca2it)(q?>)6V{_@q^nEPImH!kFk;2c9*Rh*TxQuF)Z0 zT{48LLs~$`p?b7)CJa%lX2gNXMWI>`d zj4MIyn!|sC8b9{S;yVYsJb=9jR5oY&L+H^&3NRTGw=-2dG0n4TE#O1!tM~IfW=`cLxHe-aIflPZ(IVtS4$V$bhZU2m(#4(x?fKA6)C4Q$gt~=6y5Uh9DL3 zd6f~8Cg+?RFh36<)QVmsk9`wWsk15;dmcZtPlGDwoFuloi^`$#^0rk@G3l_loIvMX zLY*^S<%}J6czk(C&7=qR!0K#CtF2M^2B20m{|+o1qGrAtz)H=)1Tmh1&}s&wy$ZCb znfh#|VPGr$>g@(mGak>{m-j-IEI5vM1<$~n>ZQQ1)@(iX!fm)YuTklvI<1fDls;;J z!m5QDX@Wv&va6w5n$TLCY)fz!FAm~&Bkd+=E_}7)@RyL;HnbO1jLl*lJ9#i}uWGUE zmY|STi>^SVDk*H!*-_0vRjYqDc3z+~(B-EWX4Ni?bY$mQ5~urz?Tl*mt(LFpd`)Ro zpGKrADeO#0XsG7@ztx*|LrX2F5EHfN|6!{)jh^P#A5q~yj6OHq=y?UfrKqCVe)@S@ zDIp^wuraPP4H};jhHXgo;EmG{LHobUke7P&KtskHA=UxyGlq7=K^COtStrl>s$g?v zre@$+2;WkpbOkS<*$pN&}EI<%lCfR(anuRv&Jkr;;-Wr2G$>IR-+d*!D>X_Lrh zDxxcR)(v-&5Jr=!FTak>EFo>tSOfL49U=Pa0bz?LZg;??)VAPV!1G~87`$a@(a8tj z4VEB4WSY9s4KoaSWEe+5ixG;H4!o!jt-B(03W@;Xp$+6GOzg%X3CFAJOy;2}fT2FnQ$k6_4_Nshx*k6b{pEag#~buNU{ueF&JKI?@m;O&-MaPOxD zbF0O<2n47z-LTMFQg%ipf)C$bdhPAB*J?ES_7Xp+2gRtUZnskggeK+Z!T61?+H209 z^U=oSg2^bsP*rtbL@jEylf{b>pYskrcJfiiBj;R#XPEdeLX3N4C!o$&QmgKJR2W?v zWfb)6wT?G|SG#jrfMp*X62ci`2x-&Z&gGSU#>=?^&`_nFkahPW1;uw(gyJ|bC*wdl z${UXCN-}&G9--u6C0x2~w(@M)TJSq_g;nMIyfcWr-?p}R70A!dv}QAtM*an028XVs z-n^TRwo<;Ot@s{-?XYs&@#POG@{wyXv~=DJJiOnBSKl*xyvCNcz1a=^-5++=*##fG zu4edL7AEF732^Cft^uM97RkVM9bK@*iNA)X8OttZK(PMAL#{AVy~C zbCS*n_$uCnr!$6!<=l)X;$Tm)@FB2FE%*wbR!aiAGCv5<7A$mRvwA>;!&Ymw#K#Y= zkyBS^7jFSoqWmRraE~6wb^!NMq_9RMhsP$|_ru+BHBLrhMHOEVe;($DY|h238k|25 zoNaKv5IEc6{1BKc;^M6U-N(VJR(L1M?1N<1LTzW~98(X@`%@A*ZX>f!dIQhBKym{Q z-~I+3L!IiI9s?JN-#XjOa|@?NU>hip}TNE6(*Up;Yntq&FZ`- zFF%B^SL(Q+o5J};4lQ??%`w|>a z+k056hs$IoHryhtqQg!S+yShv2glh~iBIEhLxeo>(_4?Zp(Q3xT+W1>L%+y;xlfgX zWValJmL0+`B3`pYxJ-vy81A_x>D)&nv2;Z6>`8R~BtJ;Ct z+_>j_6%O}IRt-58)0)aT4*->NFJlvV5MZe{mDRZW4KC$8#Bi^|?Izd8wvRjvr}H&D zLRnD69#DL}1t)7LlggDxK$3F4j%RLeN6PWO0cfHcy=Qj4T6<{(cdRm8g}g6*6U4=D z;o%+&Y&PGV&9=4{UqVUD*c0pFy~6C_Klbt1>~5rusXh{zcd_XOBJi**+svNXSlxmC zOS=+9>g_}Ut`bRotag%$WX{N+z{~kIgu=#lP938S;EjXWh?G@d9hmtg{sWS|}-eX&GgaI`gJE7&gkfbOPvtyHI;_**0$Ms5(=&gBs0dt-bN0o?S zJZh_~KNFdZS@W+T5nSFGCD0OcF9uo&ijT59x{)%~PM}GltgMkM`FJ-T-OhWNIgf#? zXDamP<8V=VK>8#^Z42hEVY(u_ zh68V1D8Gf&C@3^>ZB-;V}P$nxT2(|C1Oji6;flO9-Zzt4q zs7+2dD+CeEiUUr)_L2WAJ|1L+AIN0I1c7e(p!Y%iePx1BCMzZkwXZ-XD<%j;=}x`& z1rw7x%KSW!1id?|9%(jNVgMwHkE0FH=ESK8f_+mp{=3944CuZ`H!NqXG+R@A9~d^7 zCn_in{z)Cevx+d*uC_kwE`S$(Aq5L1x{(SrX;-Eit z3ywd*P~l&~y_6d4$514^g@9TuQAUrT`{hI#R+w9`h9>jFLf!fB z_d+lghTa_JiB)2Diai&2lo`NzO41(mBB-s%3#qdZ-<+gou;5sx(#1SFF1E`m*fY#n zahya4GL{bK#~6`%iIE>7j#1{c%I}=>v_wiw&iRoKL%E|Dw?>`;#`$p={cWP3CHf~} zG*%I;k)IO%Gap^$>;bF}fZ^-P|HolMc9I&!aVdT3IYLwzBArb4h$j3n?abQQI zrikZW>zhQ65RJ3U0s0$6Uqm$a5o@%A3-PREtMfc!*^^GgbQXwTwDVMdj!}^VTNT~= z=T+DD7-~qBT5M5y{YjdMZ-TbGCr=3RP1ca8(@Xd)=oKE8FP!J^c_QhJ} z1>Ol`@V(5TB8nW_D}Igy^sZJCUcV08Bm}dAax3daWF2Q4soJefP*uAP{36;!V4JcH z7$3F)1G5bnnDyJhY!3!zdoVEDgMrxw49qHRVAg5Ch+kJ|
o;Ku-GofnYmL>V?i zQf|gjrfnJ3E9rg^nV{W|R=B0UW1y2%Rw&L#G5T(?LRAia8i6tY5DJamRlFxAI_$#?T3GSNoAVaF%vYkWOvb6t$*h+|^lFzL5!EB5UKS+Kj zKu+0>DDsn~Y)SMjZR|@FdwF+pUL-{$3t@mFKPA{ov_B{W%l#~3aSn!$_E@3uU#q|$ zqwznfz#pseRU4vwPSE&F1sEf60=+R!V8j{X0Q0m$KdiTd0Jak0#E~VYlMh&}X80d*5980ez zj~5YS2c1gZMELpHci3o_&9IA7K12iqplH@JL(kpNWv+Aubh$b?_UY>-IFvaR19lx+j*Znw?A~ zW~JRpn{+yW`PWop&L@k%1$jyj&-!Dv3u6sRh^p9asDH8dF)Q_a2ohE09Et>|OKV_0 zz*~KKQukTmdrWXItjYr^zmrjZDTh-6&hHRiU)R2T>p%j|mw|KNhdBw3gYI?yB)AIY zTnM7RI(HG0GO`tvJM~bkCAQ@qYMNN?M8KT!uBh;;ba0?Ln;!W+b4gb(is$?Rh^vbz z!(eKIm$uX(vYM>(GJI=rLps*vt&vyYDAwXmsv3SZ%Nke&+Xag?)-C*ejJghGDp1)> zO*T_I@Eme6g-s@}B9yZY>@w+rJAuPu^0{fnKi&CD%MJbJA`<)qYt*=*@n}Mkx zTMCSqE}jpj3s(X=B?tg4TGvl7n9|6lfp*%bT{V2Pd zcXxNlrsdt;ow6l)w@h41qs%po{2AOcN$!=p71@nNe37L)TujbiLb-kts7$V78Bpc= z5)5-OaZ?NTe}c9Od;O&9!p$VvcG*U@_a<_B-Ci@zbI;m8G$MS%N;5Zs} z{|gp;Y;(s8*&RQgG+R=nUMB1QCR8N2_<<=znb<8T5ad7;asWkQx1v1kNhl0E8!8FZ zjs|3_)S=t_oe`lrr1wwh>T?Q6tlqS{WV2R9UR5ua!d0dx?KbDXSY*t5_wOLF+d`L* zP#fp)fhpedX?17Xva`P(Rtn8-^BJ0Hw)j`D!Mf6BG-))fr|{z}9cFg@r6l69bnWr} z!`S{69%xhY(Jth1B)~L;+5+5rVg8vlsZOj(y#jr6HTBKg8Ue`NN-}hK67?^*=|N3O z!Cu28$A93N=C^e?>H0UMChNS;tod&|#2ybc6~02d?@3rHyoRt|Ne<+Y2rLMpT=DT2 z&#qP<4Z^3ph8pC06Mh@u!^rF6NAQr_Ub<_g804au42BK}*^QZ4m@n$`A~@9N%Ag_B z&NXi=*6~Ui?Wc-W%xu+_{|q6q;9b4%TV=Wx6S-Awa9p|6(d!43!I2;b=9BZHe@N+*Ht7U*6ySrv$mae;^o z1VO*)l_Sg>SIpFf7#Wg)Pp7_O#!j&*TWrd;Y{h8V z9d@0$Xr>Z`_Y9RY&{5vT-V-v@I0JU_R67gKF+8M2#L1)4>h zvkJ7B(&r9fuQ9mQ-}yV5Orp$jo=jR`zlo|44e}X~g_D$i$9`7~Hnfdk)|<={{vFVq zJy0;1i1IZgK)LA(^YO7~p(z8+nT&a`Dq`1&>J*JAlPAA~^bG?r;9|AGtUT9uCe(!f z0ufC)v=#74_pS<=cfis7^Z6Gydooi_tsP#uJ!po5nutU}uol+Ha+_Csx^Y ztzo4dT>4jGCfC zhs$7(vPV(b5D6?wC#+{|9hYYA34ziwtSov@C%!}GYZ19WZOKtE6LE{*zzdRuphl(T zk{Fo1_>N^4>)AiR<>323rQFzey%!j%lQ50n0Q|9sp|%IvBiEEYhS4+`?qJ1!Go@d; z0I>=B4NJGHvvJ2@2mB(9F7QJf%!)Rt5vqd`Ezl4sC28=Q&TCgyr#{r?(l8A1DYQ_# zQCX1UIU7i=KB{TQKJwXEE1VdoR>c;f(!=71z3?Dp?!_5+zSZm~>WDaGb~I7uw2pfg zG!uu;Jfj8c$DevpaIA{WmM~7^$Hf8KqMwZgr=Oz2U|W@Gy@VYl-j8_6F)A6e5Wb4Fm57O{kSmSZ_0W&QS)L zXvEUOPY<{7q_ptRAgLOkI69(Q895dPAm!Vj9Gq$j+2A{2-18fL(Aw&YOZX-k%tWo( zYGwIv;fAsMop^_*CSzGJs(8#~EHP#>R($tbn0$*&#%lCaM2BWwiMI5ttGYDom$7_O zeKjc&yCgp6ZUaw?-{n0T*OjL__6Cek%u?!23*GK+6tj$lN6{%JWfOxGR@!|6J~Cd? zb}|Q`4Fd=pgNY~O&!pRSCOSQRdI=xjO3N%%9WcRwv$}N9Sp5a@hN(dw=2JRLWpY*K zrzBU$7F`gZ>h*wU3Kx>2!q8|(WHLV3tj3u5l2kB0XssW)9&^H(l;xxlJG9au0E|amxxe)RpwWWiYxKeo(eIu$UA=mn|{q8&HR|pBE}zL8Z#r;Fs*h zM;B!BkTZl}!IDO{(#x5Ih)fP0qjE2rLlUx8TIQZY23Km4lijEgO)>#j(~C>{vtTrK z`^E7k-f2k3R`d_XN&*=BE!xDbcwbe4{~K^Pdl%bXf#+BBz6!rb@HTwljQtirOp3(z z0MC63ob40-fT0^>ukb3uCOG)Vj%ps>IG%|8j~if%;6t@6h&oH^R7%x(R;l__P0l$C zg|i3WyR98%)^K)Hb$%Fk$ENkDMI7?%BXj(PJ3U;PPkrd*sv4=Gl`D&DsmFf*j`)-# zt799G${WIhuz(;4<>_t2MTi8-Rp5aS_HMI8Eo@s`Z_b}#TWE1;Y{kxaYjZo~&D6Bi zSmhfLq}VaGV<%J=PW_~})vVo$!4JVme zV$Lm9oDA%qDsI6+e)GHo|5Ert!^BR*4`m)}M%37CQG>3U-Yggz9eU}LTi~!-CkDlV zTl_^GZEdI(pvn~uN3wSe1tBP+R%HEnp5&gVXf=%JKK0QC* zj`?}4GOU6)+5GHK(45TGYmdRe<4xAtAf>3*HeMC(pIqa*E{#d83Axn&C znqp%QUK(#({_~i#BCa=eRmT=0w~*sViL8XrL_J84U=(KYv)~-KA-$}gt+LdEg$}ip zU5`1r=f?^jI#udewl1hq`HL(WU#J#}ohit2XB|qyy$V%Mjni86BJv6_keynkqsLL- z>Qnd18q>IS=Vh4So?ug6LTat5>pfGd#7v`(;8rsnbc}s~mAs6tq{-eL>h0tqV&4|+ zD8SK+D`iQ6YZ5So;?m82z$S`Q@oZa*ZJqRVmMQ0OKs8OMrbnohYglcS{v6o#%vUoD ziQ-R?+1!ctlQDc4rF6_OaH_*`^W}@2bSGpge!hI!6Gjrg9?GZFjVSc4^{>Ir?G=3c zyVl44cp0vp{0CadD91mEk-roCx_~nfb`)C{fE9vo23Tx+09FXT9e^BRX)M)YVq|}E zpqkX!?+7T&s4+C(?|K4D%+R!knCezjU^rnwm_G*OO1{4E5+;}dF~$IhIq&o+o3p+? z60O64m{~C(8V5nHbC04$7&Md;KwxqgPN8XATtz{XM2rkzUDzXM@iS zHtrWup~E*&vOfB^ZHmLrw}piu&iw|Nsxt+27nV@g$ZeZMn+tRZBdX6MDo@3Q!w|?Y zR(Nxf58-^{lyFQ$xz7hN(Otm0M-110rkN{sk`SWJg>4e#uR)60j{J)i>F^3;5LK>F z3P(jRxA!bRQ4(%jG-n0Zpl?}{0LvKwZ3juA+vF@^B=<`dIm?Ee$F)nANKKbXJ7ISt zm`%lSZ)vtYJJN&vaY~X&^epU!B~bV;JisY!A;eXN`UOy%q-|1}iNw5w^pz0*u`TnRxg4@s2B1wpfT|>S^)Tl-xF*f%l5j_j-Gx7aC|YwRbY9|t<;9-Cq>gaDF@I3F zS`p`75ik{+)zQopT$h$y0(^o2R9j$wp!uiH=ihyYM@E~rwv(btoA-i?nz_#%Gw9(MWZ_UpMS3Tm~ z^#MbnskrHnICIaW&|gM+!~?xTx5;e6$ZHtQ;r`7l)2cD~OSr%jbqdqRJIAO_t=48joOD_RWC-1JO~%7{U@P>({6ib1sdR3HZ3m0}QQ+M&?z4aA_^ zh(Q=)+zhgm94%7rxnNlK{!NoCe{&J2 z)P-mXa%3U}_$vXhpPr)mH!l4U|C8R3Mjp4dVGIKNPv#fvS;%R!A(qyv2mw?jkU;N3Wx-I8G%N9gL zrZ`+Ybep^+>|X@R90?Z>+#G-GCtT?1-(Jit3tt3Ew;?3#7mw%>7D;f`_w(?VBH=&} zlaz__3j&}`{epnLO`0J)9dD9EPKPN8!lo@LSe1mLQSD0LNJ%JPRT6~VDa;^ce$uTJ z$4wQA<2esr80IN$p?IjaMc9-EQ{*lVGMsL|u`reH^1zes!WebtAEjb3@$RXCKiwuP zf5Gy1&kH=6L4`Tc!aDGaaQmZBI!q=JE<8fZGLj2KyH}B)O1XqTG-`bkXU1o8&cMBm z0aTAoPt!*chVFp16X~LU4YV&49fe!Q$w9PxcOZv^nHrUYxHe<|OMveMa?ow$APhOa z7?mUE^i!=*eY5lESSyOjSXdfh=TH+>+T$yGg#Mo6D2y^=<-(~eNkn1~=~ zLFZh^+8Uu}|L(LM^BBe;9$XpyC2wE6K$T>&BEMj=E zg$yALiQs}Ls11=3KAoWDeUh-q|yQ|GGjcMv$Uu*XhBeQxOJd& zc75zW_m9P({cw0}^bZKpnwI69KHS67t3T2^eH#y2$yu9j=s#@ZZp7ZM;!DV-;u6G^ zmc`<_sxr^C^ku8cEBQ{j|I&ny=GME1A~?RCdpusa2n$;Y^g*KiQhEaqJlyRPhj`A1 zfU;GvE==%lAvhZAok_VSI6`q7Sv)Qj%fVkZJdLMXO{y30s0sPW{)q-9b8lU3aDJz% zd@NWIRb@`lRFzjSe&W7nj_p*H)ybS%HxrQIB#lrV5TK6NsN)Q()>(=S%=VYUseGLx z{t>5fYD1@SA{ip<1Co;TxoEMUMMVpxCgpYzdm=bTa<2fal+wOv-I+l+_t+4;GX(z% zu&0^3h4cJ~!jZZchVY*c!9C7I^wsLnkEsiHfk>f(V|XgU>N4xI^9znYvWq#1nR4+J zYn@i@JBZroEX5;JT{;OHCJrTNDNpPlJdDni&hM=V%tcofQ6!I%d+rtVPC17oi0mU( zQnXZi#Up^~yMwzh_GY*Gdn=OaxJb>w_UW=2;#}0?Os(WOwwtL>+h+7F*w=t}RXM1s z-K4E=X~0ImW6&f3sq_S9zWQPjkcVyT7hC z@JYgAvEe~=X-748FMGv-|Nm;I?C7SM*Xu4a*r8}d9)G0VVnu69y z3RX;`uw`P{$H)tP%TF}8dd?YH+ zEKj@aL!7&jqFyKJc8&s(b2J{A!%GWW4lgbvc!WJ@A0c}>hrsX%ISF7N;^l_sw%JR3 zpC$HU_+f_v?i^l>pvw`o?*UZU!}n%!dcvF*%7yY`Nl=ioIjm)=y|}U(+F2DqS434H zOHmJ3g$iV8v_O_d3uI|!fkP+TPyDd~ghwgd=BB(vy zJa4H9u_k&L+tJwUkHp(NHYa`6*aWo@l${}thgER255ZQ^L!{hT!b`ZpUNz`0^_q+_ zo6HhzfuPy`6SOny9K*so(mwJaDg`Uk6^i7@@YdD|WD*(1nm-ERr;zorV3l3`IO-g@ z6O(n0Bln~1qntCyJg=A^(tt*Jl*EVEPWXtb&rGq8G8^0H*;5p4{rRK~3xfO2)V}t! zgN?LG(+h*k7;pbE2y=x%OAV(fiWB|E?c^0sI*8)xE7{Bxm4OAFCP`&(@j)0z!xecJd9Q19A zxPdv8ZOc@)EemB^!CvMC`Bn(!+p@RHH|Z%dV1V^+X4{#7RJNUJYetf7%6}i(h9Dg2 zR_0rw)NUV*T6wg6bU5plJc-?Yk-pzO6eVN2>g_Bg)di`p`Ws>&L})^GgL{>PZC6C! z2pFkcc>4UYy|+W_El2;mySvL>E$_)vqPx3Q$%f8K8$0A_@X6PjlY|>3P)-Kv++N5J zZDA6~Jx}l*?v?WHbZ?e-mwr!nKOycZ?qe=bA{OVN;ptQw;li=JCe%3>3*@qiv02{j zZb9B1`rYX&e3yPtb{C8P6n8V<`-o(0_8rKCkO1y=pf*_^&dyK{Ml7W}C#3(UOk~gG zu2_V|M*crGk>A8pg*CEoQN2Pe!5ZK=HoPj@dKc3s$fWr4#QN$-H~LOkro zO5e1*oSqlbvw6Jd74*D|o_5F+4Mp|mo1ulWndJk&gR+RUEmhOLfq+F1GNj$?1>mtf zHx%ejT-v$Zfq|E{+ch92RHUjdL@34T%JJcICVa;}!{3Qeoo2tTVzejBXzbH>#iz+e z#c&m-oSEhJa-Ga2JoYd_>Lgd2FGRXBp}f%rG9P&NRC>0K_mo_{l%A8|iQ%G3bhuhi zvC~T4yFjKrXhRAm4`(Rf$HZ5UJIDFHEWS}Ax`M&fi}016n!tPySRZ*2i7Cc2=iY}Bn z$nkYbh^X~MmAN-x%xe*NveFj-eQQ#k2RvqA2_$GaMx6&d);`wR0;@q?J$J)1V;mH<5XfIo@YJ&puu;|EdbQNrLV( zq>Al$`*`#E5N0AHkpGY}7@kbcZP$RWhzB%F732t`?_CzPq6q_Ru*3{2Fk3OsN7agRBkJy<+jo=`g86- z+3dLHWt!cI_KD6`mfPjgay#)&%I&03xp_u`*e6bq)nFcJa3}f&cM{9(#Bt?jl-=nQ zkzOWwg);8t;WxOeeA-U^TKctrD1nypP+{Ozu% zJUZORE0{plwXu#gLU(6Q9{4QkNXyCU{NZYQwcM4EImKQrQ!Ls3>Gmn1?oVMWyFX1K zb4#nu!CUBGu?sRoEyLi3qD_vQS6N-g{CgNZg0)PX>tBNaoW-dOpd^V*!wg5K1Bd-t z7%vX>*)klRh8eD$hRLj*fajVh&ow?zVYw#Ca?LoFry>^*g8k=D1Nufl=Jb}+)lDh1 zo{K9uopGA3v*ALuPhlspmiqrxR>vXUg>YK9CZNQc+4mly`rebn=Rs9<^rx#|Livlz zpQt*<$`~~^nELp2xF)KPPYS-neMR1#Zo?&{>vBu-p6o7@_Y`-Fyz{m0yDowKxw|`` zcJENAI`?Y|sCSbb*SPxNI^*8BRLq@{;eV z*bCG*-42SNc2E~=9%v+Tvwd(kMoI^(ucw@0pP{~?v`)UEwB&bC7xo!m7&naA*t~}_ zb3yzMK$$ZE;Fx3dQ*T@B1MMwCnFH1?u^ZO=5UX3>9^-Dmu_*azuZ)574I z`7QRAvD@#9PdoE0`z*VAey2Uxo<6MoW3l;l_F0*3!DVLHkVk*hZ5jlgs^RH&kEU-O zoWIFF!0uU_Iom$lF3n$zfIT5*Gwrj-dcG9z_tv9!4MXN0yQrD;*t0d0>4WnJ?OFEh zwVAEUYnBHZD3;T{ZoA@B=Z&ar2RfKRk+TDnG^Hf-5u_m0Dz zYtI>nJI@}7;4sIRZgK1l5hQT-#t=q#2ixsUA*3956gLcmSmu1~Nyq#Ys@SHrnezj0 z*Pe%)O&6c~MEu zi%N3dI7uq{x$5-W>F5@XJ@D0d$z$gvUD1*Q|N6!o#s8vNzO$-w5@!pNxKBglMtBC3 z;OcCB&TYGty>y#fkaxQtEOzL@VyD|9crTa*Unqzv?vZ?llB656#F94Vj4_Q@OQ1Ga zx!d)mK!+ZkbZUbjFe1ca#(ch-JjmC2$_xobH*zBT=@H$b8&mo?bf3nQ-eK>M&V9iE z`_vJ*a2z-3oP=--yTd4ZhjTKJcwt7}%3J_ScHOkRFHrUI5pN~xjzU+O-Xy;bN+C3dPEW#?Ab|)gjlu4xfH-eI0D`H;-Ok@|oLoJk*?Ta0c%YEt& zUcKhGYZ-FnVzn%Gu?*d0IyO@d*8{WUVqf6v^UaV`XTq+BF+UN8g@do9FH@9UW!vT5 zW7b<~j9=tnhXYQtci3g#F}M?ku>FqR-IMK$E@7HO_Wf#4_}kOa7-Q0mtv`t&u-&BiSsG;l|PZs%i=x>Y-7@k?7?0tatJf9 zdhzkOQ;;=1s7Fhu$75&>u}AUPfHzLB`S(B&|3Gytb`<-nq!sme7Sl_sP=kAMD%@Ng z!2qwpjcr7+UuUBD&*4@Yu4mjjDT+Tci;F3MZ^}jSKZ5%a@c$qd1HEtrt8q;F%kXH# z?y)fbG3uvz+xKyN z_CzW6s7vbls`te9zeklzX0{c>U5T*|N?QK_(%u6@Y3|)$N5%~3MdnH3#Q`z?OkwGe zD=?W+UL>bP7|FVRWF2-t`k_CDyp1F2J_or;pkd^8w5aY{DHQoiA0Ax8w{pGX;hww70UYj+^~ei zWu47Po@zkmc$_dgnv%!{bx&HCITJ_E!mVh>h3lB$G@7J7Q%oOox{GkuIm2fS*|C?; z8aWdfXN#a&RE2MVM|5{Q(e7qH^70=spENGtNMz2lNq|d&e52d!N+*n{QKur=#;ReC z=@BlZAx7jFHL>n(N(u}Z6J;Xui=*fC1UE~cOOOx`24b+zwK-==QVHjrvwc{Uihsj$ z*J_%AId_e`vCJdycJ~>1ceq~5DJ1B059Qn0>a!8CqX*3yllyd#dvq7>MXSkA3Ktd8 z=6C^HgvkuT`3oXB@fYU5i!a8SdMq@hXif;OPv-<3Zp&qUMP@rQIYHRu1mXMzQTd5; zr!uTM8$q|1-xQTzF;T}s*m8~ZgbP&ikpvLy4hAgg&UZ)U7hFlpF9B8u@_SSXCIN&^ z0ticd(KHb4ZVn=(J3k|u1mfCsd{i?g354_Y5&8d$2?Jad81NICo@fFDcR+}tduxyY z>MkQcVIx0beEl$TQvq@AJwaq}d&$ZIAd$JJfH0aEdKuls&SCw83flJ<2+@tl3Qz|V ze1VV+Hgb3Y%?h1x_<#rNZUn*l7D2FnL=Y^|2!h!kK`{R!2qsG-JarnADmX$y9j6fL zMi{BF?hpKYN3I}D_}hq)LkH;860skmw!+ZEM@qhjzAjOOq`0l-@8B$Eq=Nxd8plsy zlqO~HDTqWFS6d{D5Yf_Z+z^J_qWR$#21u0uoAfwOVcZG&<4}Zg!z)?D!FY>T!mNUU ztNR>~BzYpl(BayJWYNQw5&FA`WtgnVBJRR41mKY@A~xej*kth^#!WkyseIa`Qc~vp z1W7G|6H>~ADK|<0ZYE5*Ul%tM<|#z81fT%QjVW6Mpv20Jo(OJAtc*qI2e_FN${!bp zz|EXcZcxI_1SmH$9d0H-x%Y}2Id!;Vh@9Gnstp^y2r2camskk$Nk@4rO9qmQCBwIG z$&9W1E-tb`?CDX8lo%0^1+6U?WSRjHvms#qXj-ORgUl5TGCkT1Vxelw1!*+|%pgsG z4;J}ordzXMo@9Ql_A~EhzQ3ARRJv)u~AqF{vY1n z1HP(aYagDOQ*u%u{g6g-PI{vRLJ1*2Oemp;8ft)05(q{JDlKdR7)4M)k$|9rf{KEQ zqJpA=QsgQMie9)NiWe0WMHCB)_&saQ&PjsY`@Y}*E8)kp=UHpb%$nJ2X3ySdmzWlR ztD3|!&*8F&*;+Qcu4K#wqwYgz^ybnoit`!ZQzh*vLUmq#!>Z4IrSD#MnX6v+WHl^W5yM}&{Z03b}KZv^y z{LK%D=|Nt-Y~eidi#_coUgrl5={i4jTKl2X+7C*{__|hJ7nmfTc?%V*yM(w4r-2xI zG#2n81^xOTlGi<~&Uv1$cZGPJD>S9JHXy%?K7P`$(WHqm&mAZ|^PBowiN~Zgezg(F zqU#(H-@vQr&1Tu;>1VVLivB6(fs zQF`VJ^%d$ojXRHJbuUZd4Cm@Aq?x>~QItlb-mI(8tdGPqe_2BFkM24WxN2b4xM7^V z?qxBYDe4Z-P9k0x7j(#|tCP|g5{=fZB%9fb^6L7CyY|wVeB%l^Z{6#mIMed_3W?WM zh+;eyQW}k+2-$dZ$qv`Am+pH#Ns0XY#4>G?KVkr=BX4lW7w9Z_8PI zlt@k|JJ(OB&lPyXr7eoSZY4_%Y_y`JE^Z{Cecebv(bh;nG2Tc(W2=#X#t zlP#JxE(0%rxBHy7B|2*#{Y1u^wROIcrTa#fu{W}exsj#oMwW3mvW&lxC7wU~yPxUx zmUL>N`>n-|EL+xF*7qNt$o_kot?MnFo-#8`%aDe>)TZ9DzRZLhm6>=W%aj{grn04f z81{30?Mbz_pOh`leUnTUDRv5rii+BkCuJ++A-d7%Nx;neDZ1+ESB>esI~!^J?rKCI zt)q&|s+fzfVTPLDE{eoe$qJmyqUgZN`^j;;vkI@uvtqN3jy2fgwF|UaLz|GQwb)b^ zp@^ABQSQvo0QPJ42avV$IdXS0shTW>DiQ$IsybB#tA>(lidNyJ<~o;Fy-upbT7{Q8 z+rzj-tqZ&xxtgUr@D&0mOuw*>8Xg@9N^T_9PFRu_5*yrKNQEPfk@+}yw6*CBC z609NzUjgXm+vic6(!K1>xQ&jAT&VChp_vtRC2Ko;oUJ!BO}vxOlR=z!awSSLT14u7 zvkTVV$pujH9@2TdgYj-wDz8~v)4tC9yBnEn+pxCYc*8Z-MV}$zT3STuN=7Zik6+e? zwHUu)x;}!3{u_QFkZ7RmYtuHu^{wh%l?jQ1+o?el04rnk~t?u|g3Ce7|a z7~rEB1}x<5)Z{Si@tHp@!%BcP6|-SK3u`+GQ>;v_sXf$8-{lwqbD-Fqgm2}f_SO>1 za$*(u@!N-ty8g|GB-a^D`>daqxq(|h&@Nv`wf-M<-;K$f7|Q zUqseoG#Ih?1jS;gt^*OsbyZeVP~!erYsY@1R`WU{%>hxBg%sM;iIithjMk}nB;Y{v zbHv29v^ipu+KzQJ!8hwj!F#@05$8(lc!Mn7*-N-As?$-i5h0VPi)@%vOVm7aq0Osl zLme@$w)o-z<*8T%kDVDe!BwWTMybNG)M{W1QkyUa-=G@mn#g9(ip9v5hLHVV*W0xz zhGL^STmP+26=qg>y1y#8L50ZBmV91<$`gWZRn)nGwoYp(d_%a~u_2*K9*9WLk=wtv z&DZ#-8SkHY?VzNnCPSJ=t2+>H^IvOc(>Z6fM;b`n!z1MD3NYWn2hO zoUAw)&ZDiRq9x2LYHC}blQFJ4$=$hX9&NS8S>cA>lI%+hrMqhJNv;_0rDXM%nzyvJ z_MA*M?k0M#s$7O`RTq?L4vu-0wp8Fw6`zXbWP>WN7j<%0u0b0TRMjKo6pkU|sYxv> zCt|a(E6IE;XyH5@Q>+&4OvOD^*i<#|CYH6;9^y@K)R22gH^EYYBLsRY4;Ff9%P%KD zxznlK{Ms4kaiRY{vRz<$8-33n9R8|$AcqRO2N!2#6=oZyC}R4-DQW(B0`A!grZYX-K9tc~)t5H5D(qFAbt*ar?~LkCytipwxdJ|E(TAS=19bj@I)5Q;np&#{ z@zz!~n78&S#DX>TfSJiz#hIK{oXJ`H*>^HRrHa}C-$x{mQ#D> z)J{3IPfqQUQ+wpp4mq_yPVJ6Ud*iH{q(;FRXVql*lsZz)n}TgZ3u|IXnr$BW6mrdS zy_o#FftY+HaSt_b7LqOSS&Pcr)8EQSgUUpAm(s>oRYn`^zNx&mRZZiqy{eqIUR4NC z=p9uz^VYkng10_ZxA4}tY6frps%G-GNmV6pn^sY!s;Pg~t-KAWx{bGiRkw$6BB*LM zZ-cAm@HV7sZtaQ-=>9vXquE8LcG0O_bZQTs+CiuG&#B#WYVVxdIj8o`sa#)?P zJ>kyAPHpVe#!hYP)TX=^xE-@E{h8S+S6Xnjv1%a|S$RK=JVYRp4^VOuCs$Dt!Duy? zj!Wv3=s9w}MsM!ao4aVk?=GuaO3ARQyR~8&HmSorjk}Me)~XfU40uGxhW6NQz_GzH zYaCp}N1|4c5o1e#z16Z2WvOZ&m0)MoALzJEC5Mx<4^XDc2Pyd=C3*TvZK*7XPQLI_ zmtF&{er?@aN|_c9HAt_cbp0&1o@j7u3w_UCU>$&i7>Rt;Jes_H>CVY!ExS5Xv1B2+ z7ti(@D^|lFtafX$@pJGebnrPaM@0zU<>piMFs!i_piIr`a;HT*cujYsQr4i92IV?i zKj@@`?8CKFF3Oag%k%M`rv*ZctHSxfbfgE*npt-9lu9@)p87_JFq}%q4IM1@%#n1v zqloJ{v;Ze6;t2SP+QS!Rl94R&?J$VYE&=C6EY2}-922Nm4950u{D%oIO3@LVhKkQ$ zW?{Pp|J|niR&z`FDUT2S<7|Mw876QysPMyoA!^b{N16-CWk#W0Oiw8T4oUiafWKIV zU;pjm!6ctAwDe5748YfjiJ`SSsG@i7-2)r};t zOe341QcAZ&5q5Eot7*^aeVs{r;U>a8Ju3p>bg!Z^yV#xoWLLY`QbxG1t3=+aWS}7? zCy=Rc3E{wYgfH8E?O+$b$X`47h@Yo^F~TnXkwo%gH?2ysv6IU{_;W4)anBSh7-IU^#(7kPx+dK1oK$!|s(kq3Gbj$~0BvQ3c{xcFrk zV{@qDCBt(%M~kK{9Bm@R=;3ol#ELt!1|jX}LAgHYK;+8Ae{S8N(Sdr--)CRi%``$+w1?2B(E;NkY+MPuO zJ;C^EB&E9+6K0`^MA59x?qLz4Cl~iga|f!KG2GeFN1Pc(*c55ISl>GkA?XuBD7Y_= z_ijEiLTv3d$H4ykf=SyFDs<;aLPrO!vklt50DCJ}at8_*R!ok%<7O45L?KH?M*@qZ`B>N^?K z)%RElr?I3}PfE{eL--6!&N8m$e##y|h2GPL+V_4w4bF$6DUz-sZ0+Jl1e%Z7-{T8Z z_CYto@Hh(9p(DxZE|A#8RVv>Q-MG-Vqp6yAc_^lgqM=yBLvaU>-3lHfxlZc9Z5*(_ z4x!4nL=r}hCfpgzagNw-D%y3X$oIL4$lzLOepL1mf zW95{b&P|2tL`8JUC@LEzpT}4jC2vA(kCHo)wu=K^Ih{wz&ybD~=^bfjWe*{28%Mk5 z_TD+2BSdM}IU^3r{%m87A+2pHVJgS}6TG_@8!(Cxtyvw4eG?&S5opn3tUpERlN@yG z5Ojtd+M6o7SU@({x)aXrMz}SbFgf?9QXlbS3GI%oCOMsBg*$E#Li=lUd;BC5wnOyA#wCP=59T5rErK=q8gT2H<8ZIWUV8Kh2Zp? zN(lsu0_!cLD?sdr<%3}Txs)eutw$Q$MW`6UrdP+c#+(!?ikMvvr(&W7T@VQnKjgL^ z8HQI%Vm$%Y4#kAwsE(_sU_T>Yx=6wY;Hbh=V9_EOH~SO&5~Z|;W3RBI2mGey=Ys8K z_LUvXz_rXhOglFrWqYxoGydhT*+JI*JCN8Iaf;a{wj3uuW7d<|c<}|ZFFD^l@h7uV zu5h0Ci&=Xcl~OH~q+T1$`HX5|WqO>es1|l+uduRO_@K7~L}awi=*R5qUc~MYO_`Oj zZkcGs`Kkty?tamRbr-s8mdVVONbE6@#q9Uq#P%AZ6Vsmt5d9qoPpYs57y6gz%xp0; zAK6`k3P%a8AwN4z|Q#(`QPdlq=+2%u1$_>2~=DvyUc|ZoAybY;z8=o$^^`$NG@vIeCWJ zmlKG6E5DZ27)@Qdl<(v@)_p#O@_i@IGg~=~*zfX3X;AC85ycgXU*T(juw`o&iq&v{ z*i#rVm~_n){qptVuzy&Q{i ze8YVh>zdu9S&_0BQ2L^xFwt3!V&yw1CQRh0G0KAKf5K`nS9DRu%Hj^dkAK9vsuH$* z0>dv?W6$8{F zW|KJIP_=}ZAs%NsMb)tK9%l2@a%Q`jEmZd^AN14jE+LxB#(EChwFoD=g zwUS#;_o3yAT6Mn)MU(oV5zXa#wT9Vb?3j9HjKJ0Rdb$PI-s{=jjWRB1r0eA=hW7nv z=yC;BAw|z&4@O9Fcksi&Ie8m_mt02>E*Gb6Mml)HQ>@(@VYlBAwAqLlws4vId0=S= z!bhCXL;Y+lWtg5vIBLiaNM4L3d8^4(#OYBanU}LOB0~)B{~FSB*m6SOH<7+|&_Uq3 z!XpuGaV_^mMB&0qZ8ihL+7f<*bcSd@<%q}-Q!|bT!!63fjtIBd4wT|x7@@K6 zuy0Vrz_9Ou25WU|ei---w1jSPI+1ccKaz5F>+>Tp6KD~ij{X%msE3HOi2NSp=>8@< zBAnv(A=b#kg?FZJM!IK_H8R5xPY-fLW{Bzp5Yej05s5|UWIteX5Mhr%!hZgQ*El_m zF`FeHGv4k;+NF~Nfx$t9Hw6+F_!Iul=_!mISaOuH(vP&pt`0}Ap1T}P*tCTcqS)e{ z5q5Dgh_F55>_AG7Z9>>}I^lLse;Y~Zww=O}YeUay;75bwfRhKe1oj`C1&ke>9~mef z9z`*+xL^NB+(6WCFz|GQGxH$S|;I+}Sfvz!&fc|5a1K%2S1b3~x6G6D3 zqfr7;pN@nl@*hOzucD}k>H(`EaiS*z#qJh_?*~)$Z#AQRxjbPl9NirMDDWZPeLcGD zfV_EM!ff7&Gx9f~^}1J<h(gC;P}NC}c76krZ%YXear)n!KA1_8huacnr_(NWGe!+{ zM&kU(e=><|g`{3GYA>)=^daD+==Xt7P9p8oEvU+YV<|fvViiHq%)?g=@Jjk{t5uPc}w8Z(^ns z`KrleV0jj$yEA^yMQq_B#HXL7}#v;1&dgNh!`75Cwai@3@Kj8l40p}%yx42(^@ z0@M-kC-3!=F-}N|cz@5~{rx;g$=`!l2W-<86dyVmn@3+m$e$ddjHtRu-afENloUUY zZ4RU&Bo7`b^x)C)zXQXsF53U{PeR@=hLhy#NWvwZ30LrBr2|=y5kDR&-;7Ly{hp$B zz>_1p19K;BjzIjsP&5?eSWz+XTu~V?ZbT*UYEe}b9ivu94qKR*e`nONg>eCdtr)Wz z2QqeN3}>9fcn9M;#%CD+#ki8u5=iBw5E=_#W6>4H!;C?!{*u!#aQb1!PEGG@;Lvgu z(YzgDOUA11l=kIxFHTQmyq9qVu)Z-XVL8W}&{*$9oA(`r2|k3q1mR)G>ocr|WJn<4 z$N<7S{RwSI*W0XvWSl zHBFG|2+czb=zcb;n5jzcRs2W^MjHC>|X6*o`m!V@DdAe{U zWthu2dB84ce{t=DTQmx!c(czTNGQUkusPoc295m)_>Sv1@Nxf7f!}l>Jm2RF;KHk8pqs=~Pwhs9uCF6tKCo%IK_Aw94v z6=}sbpl)#_uLQf5_&`FfN8SQ|q&xBaQOxNTjN7_|K(0%>E1uly8cDqp9~ln0@yIX| z$!0{p?Me89lW=$jVN^~uaOYswP9e*{9Kt(`sEBE!Nxq5NA$BktrIgm4Ox^oG_O9_? zySLQ{+BHpssk^+Q2%kwH?2$*9&aQ0lLFp%^5%gL7}iCO5~i|E3ne zr6b!wqGwp$AAQOV^7rZ#YR#vUDV-BbZR#7F1B~iK=`)=u{d#jsztf!3GfFAFvy{@h zphaV8CmxzYr~^$0-Nu0=S=EcMrkwC2wtOm;YFwEaPVcXCPb0+@7RCEhI*+q`Iq8?E zKykeDP0a(v<=CFUUnBZ9Z!0g^3xRR=VZaxBDgA(z@Kdi*z_Z?j6YYdQvplcqIHdPC zodits+Z@qW{^pp9bc7t$+-NKBaggaK`B7wBx!F$Yk|q_9eCR`kPWL74Vjt2jX-bki z{7B*@3gP8!*OWWO$;!Turvg;OIi(egmRk2OS0;k2ngvKawWnZo^1& z&nQ|Yq@~j2+MRI+*2K6(-Qi2gn#%^#fOUtSF|KNGl=kcUg7K=L+NX+sLF80M;IqEe#qEk)Ol1; zit`1mC2?M07f1aleU9?2*8i9|c}cX1aX#ZWO{ki1#+SK>p$?Mt^d)T0 zIF#j4jIVGX1T`h?Fh#hEaUr{VoO_E-NZ>k7NbGz%`Gf=uNIo^YhgU&2hmh8V8MAIhlrotX-3N!#GAo5~X|wOq6M8xSTKAuj zzQ8lVpdOU#aOqB3|7R*p0QFRvzvHGaCi(SJl(F1YeLEuwsLU@ zJLdNn{$i_U-hVYq5`kiu$;M1>4|agrS8_}UuI}PWFjeuDjPHq`B*ht%Jvrip3=$Vj zHax#ebg;N)vfI*5$WY-I%lW*G4wLrdL@?Z>3FS#(QOwTB1N{p;>?`?Y|L)OF(bCj) z3KY|+MJ5|OrC5ZEDJC13HVMBS-)gdaqZreyS^XLB zV`)japK!5OdZ%Si1zT>ie%Zyi9KDX&SMuf-rB0XFVzTln{i5CY1&G@u$DqtO@eZ?U zk&pQxUQCJ8?aj|VEfd6YlTFSx`b8&-bxdnSSn1&CWbu^AwoV%j_7bxLVs}QNOcw8H z9d_Dy=)Ts>@vqz|(JA7h$-c}j2m8%rS*}8vD&PVBY8<^>0U}j+o2+u`enX^*K$9+> zietQpFxl7RW=5xrSd+CMb~{)qvn8m0wMZB3J#2n-OOfwki=%Nx0oTK*!uhdzau{Lq|s_d?v7ms7KV2m(WuxmYHf4}Lwv}zM%*6NF}f3J@VC#l zBe7$2wz$l?#o}48&f-sIdROEK8!kqon6RV)34u8xiW&6`=224M?|_Zm_OK?@dJnVx zF=jJ#Mm%R+ExJcPAKg{7G1(tuc13p+*}4kx$C!Q5xndBr1KbyRVuEH4Dl|`&nK~*o zPpo2A!_IUU>l?969)>!_4#Pu_ME4Z?nNcggmA%F1TIZk;>MPDMJJ4-+(6?apXAx9I zq{1a)W(VZ$QI!f0dzfry)QM=^PGPdrsIQ}k2@7Hoalx()7k;U@{;TQt(ZhwIm5z>m ze~T^>cQRY-=-2mquob3mbD@kGA=WV4C*I2T1$&R#5;4IQ7&B5h(x}XR!rr-A%qS6M zvW?jiKj2I>*|qFuF=NH-H2(doTD+H49z9O9PG@Jtn2_k0@uI(n#m7t#MIM$GQzA+{ ztV7Hsaf^rL$CQfQ9yTbZTpaYUu`xG`V;(jwW`=OJ;J0tqpu#yZw~91S^aa?RF}I6s z4_h5GSM>0(%`x-EFb{hv#t@av_Q_YvcE>Cf^G)5N_`P7WTT+?T!YlHFm}+r~*-8=F z?4L1jRP2+B;(v-+E;h9$RxMnqzsKAwHf3lQl_gy(MCUe| zeU~EfP;$?<#8zVbc)M1Ma%MGR&B$QaL!w(d(k&MDya?AiafDg5h|Y_5JtDf%Jy^o1 z7HeZ$xgHmXI}lwd9+=t*4+vi{+53sP^hk0?ty}Ku<9bFMH`&;{*Sww;uj8(M%6IdS zL9XY-N1E}Vd_i1dR^xaerck~hu9z&l2QkTGy~c3ljGP($g7DVVQ802mSdhui=Z$f_ zC?ZUDDQ_xRoXHN2n~gUKr7>IVI6kflEK}0yTZd$GyGYTOsa@~%{U zwOAH%kNXGl8?zembKYXS!J;IWbc@BM@CV$+k772{YVjQCPvV@()`4BdoiMtX5AmDU z&*FAwdh+~5EN6BA18=eG7x7-cwmduV5%;fRd3Vj;9{4QSz#haFi?_n@BbGSJY^8WK z`Yrb#h8WnBl&eKj&O!IpM#r(g#E@R3)5o~K#2#j=#Z%osa{nbnL47_UC-OmHrTDA$ zakoV_JQ`Z%M_R{&(kiV3>hgW&wo0R+sh12WBxQ}GN55~~Ub2_TCV)BQLX&;g_eZz4 z+-b63!F=QoCVQpd6}PXXSNKpVN5TB$IL#2w0iuZ%qe)jSeux&aP33NbX$@MB(*b#W zoK^+|-DdZf7fhBAd~8!WGyOtpxQb;?^NXv^=?(_)*+ zvnG48Lz~zzIlqK-t3}(vS+U{Lffu{s5AO)+Qelsjy(SW?5vOnhfzwp{EfgzVox0}5 zM#)vI)4L*Go@GWmZCGr4qwY?YTPIUaZHc=prx4SY$&D=2B|f@N^=s2~iBGB%!}(rG zY`V-W*X;QLC9y5!ZzkJ2rX;qdTtA(3ROn~!Rx;{VZJF4&G`5YrU^3_U$d8eAY?F-96+UlVyxK?(QkinataL+}%s2-2qDj6_#~8 z?kp1Gs*uiq&-I@&%!Lw_Z1)k!}($@{vxL zGD(K4th4;gJ*kmpscdq8y{@!QN9TXI;ZC|d%Kfp^fD~$NO!CJb2aH`y81HqR+&~?$1cR)CP$depz3GKMVdN#wq4|yEti>W zOD?fBnqemYC3d#_(_|?fuEoxiP4KiSjhG4JaX+hUZn9r-BWjiGY_hMIbu(GePbFshV@z(>}#}-yGw30>9jG~aZBZo9)=%Z02#ftz5i zc&J^hl-5Ud6?yHl<5tRe50l~n+0tas<}7lolKoBg;ZR~Fn(^LVCD(h{h1dt>anq7! z*;<+TIMuLPRJvaCGHM$I%^Df-BUg`NddIDipKInghEtG-Wav+Hx8?T2MAJgJO)A`xyTZ~ zM|SR@*}kbw;@^@3IuctVmRdvN-TgSgAOIf!>49{;5=IX}zcvKvg$;1p)zpFSW{;+JVm5xKjd0^IDHg%kj?;ZcX z^fTG7@dLq*Fgw7P9zKw>^2qW4daZT*2aT%uKzikqZnfAGKODMzW;Kphd4=+b%)@zOEu%_kI35!NLOdMoSAm!h=?UBURLjOtTYVQKtvIh$FvqkHMh_>=Oo)`|47 zxXLeE4WOjT#%WW|`{)`MWbz9rb zj{mpZWU{}&zLfDpb-oi|U(0({}T&T(gG_Wc*If zFzE%bi*ljKCWOw8za)>DY#G>3a%+*!c&+_?@juJx5t{9Q+rP?0X8S}il9|MJsf0oNBg2v$=i#j89PA zOm;_)KjV|sLX#c9Zc0(J?$DNn*rRFcxXG4)wNU3w76jHx<=1G-WUvgi!DMfNwKddV zOl!ogA@n$r<4$eb4a#;Zz+}&Wbx>g@qj=6zE|XCtWUEysqj1hqcipW^IiF!m$WgPF zY4(Rd?uu379%413bB87g-ITG0=@PN7UueQjYQM?$_l-%&S3jC8xnD{`52e?0)ndfx zwh6tI>t0=G&t};Pz18z8b)mPSioR;r{lr#^ol{SXe(E=7H5in62?NxQ2ef7S*ibQ0 zbu`xKVmg-EMWHHvShIaNIu)wf8#KFw;Z&&3J*G3Zgx`f~=aX6|LnevA%I_)7ex5i< z3{gcU>p6Oo7^=>0(Yj1HI!sxg(QMTe+`6Nz+sqIeib9J#Eqnki9H~Zc)21_!akRQ( zvNyT}h_N2arCo5Nma;ypEgxcbkI6DHdlaj6Ci@YgH(qUGwpu(sxL-nv`u$lx{^@w0 zq@teF#ZWv?Ruk}opSp>4vRcCIEBQ|Blg`QNqSiTnz=5b#{bsT=Ir~MK!X-q?=a|rI zctV--Hd#@xu?bUEpk{mknyNCvaQs^>5~fasGN*xZrs}I1M)TZ+nQGQ|+OA2yTGTQy~L+(Y!;@Ae zEL9gw_V|?ikh@isMVIpOlvN3LtG*`tx_N%cGBw|1GI~|QGPPAR!_jwae#mll%2ZAn zyDDM1^0VqTI%Cp}R+wo2)!_f7rchy~*xQ{W#=a^`6Q8n3^AQpSo(Yrqfm> z+^5oQy39kV#lb7oXpg}qiKtW+0FHmp29s?WVi_kbh1;E9A<<>#Z>sn~6nS`}=vbFo{Yi!|B%;1?6tsP@cO zI}T3R1(w6C262%j)~Ep{TRhMR1)X5hubU@{wQ82hWOOLlohEyHN|Ja;tu@)pQ$oSE znyl~GB(YAtVX`Th+19C}CM)lig!{t(ZL)>ELcxA9*@iZShJ08#=m7wX*lleB@CdHc zWW(@N>?11KWH*tSM(q-PAihGOH9VYu@YLeKf zYE9O3TBtBKs;5nQD78>Nrgod`Olp95O#Q=TcK;;txH@IBQ2$V{izXXflq5DOd})Il zF})}hEW~6#cexh4StXjx(X~8av&u5rzVsFWPZ+9?Nk2-j40u9~HQ7s(lEjnh7Ly&G z6biQ3WR;1(1wW-8G}+?B60oOCc588x*rHxF*^1&&u)~^hJUy*GW~MLLJgtliOdk+a z+PstJ}ubIrl zRgrq#vlH8cZ8CL>@N4>3^^;*z`l)@Z`hyt_5(yR-Kvn4Kx2Yi}qw2S*{Gj^kx2bW= zboJXBRbQ{8>bI#WM3Zd%t9(}vfMDw<4%DTn4`2MqOtlOsop+o(P@gUhZ3Husl z+^-VMjHKJ&NOwSWGj-?1J?;Z)1T$Uz!A6z`)g;q$7Je;1NM3q=v^}JmA?TbZCE;CD zN#{J)#KRs|8Woawv{A+nJ-XYYViJyd*vzQN#7{ho^8Kq(zLOr^?NOl#pPI~|3eyrl z^C&6fztueUThG>K)w)LPWe>|vJgfG5m|c9Sj(J$O#4pt)59^tDP6>LT$~;p%uYxtB z^S{A~=T)x;%8P2ghmA?RsG^+p8Kq^HW{%9(NkUqVm`t`J_O;2@vF@75N@D}#q$L1P zaOvvjBudL3lkxS>P)kWzo$`UiP|FGr!vpn}-Hlk+@H)%&i4m3}W?#vc`9npNv79hjp;v%#S-vz`8%Ka}TP`uv zGeoRKFUj=`5of`}|7hVENi#&8rB*YZlHx24r=)t_vlET>Fr~LOJc!?|74F#kZ|UvO zD&gOjqBtXrEbk5`?1$etlz2araJ|HeN3iM&Qf5zFa#0CE z5!4f(j3lk@p@tZereJe3eu%<7q0P$R03qjW=~zapv7;V2q+|9a9E@$pS0 zN$*bD_sgzNXO1IzDqNA`N8S;-sr6}#C~t@P&nn1Niec>0guay4rn)od@E*N!_bPMu zX`AER44u}!b+Rj!*0ATUlNjhI64iC)|1#VEL9Xj6HcPAjNer;MajeGzq2i(~X3@H>4e^pI01Fg6@rQD#0d+lkHJs$f zV*&y!=6KO#;fH}#<|u>Xv!O@_V8d{$AJcW=b~=#?)t09dsi0nne~Ym94+yZDa-FN8 zi#G~SgFO8&@rgqJKMha3c-5@_#_?Gfo*$#T>V~BbRSHi8Q3E{7bAZZL>f{YO_CKXx#wGR1?Fh;1cu@T<2JAH#Rt-^m5-g>!hoG@U0gOg=U-7lq=M z>|yl6q7R{Uh{4b{Zor!fWUrfTG;DE0#ic=%?H$}KA;qyV*B7BPQ{O0&fun}x&iwDF z#N!yK+D3`^rsVqLMg!5^7-kmH7rVnMj-k_RH_G6#|DP(moK2;D0j#U?!05VkokULN z_}A?i(26nyS*dz*#U`eApw3(btjnNXTpvX?TYxsv1@?A9udZ&~$-0Lc_U;WO6#xID zQ6rm%_6^f@fw>A(EFQT4D)YFin-?&Fnn#NiJo1k;4#AjhG5=c-EXa^jkGrwIl_{Y# z9u@jOl2FNY<@~*t{a6uU{?`=~(Ox(9{hxer>V#E-60>-%aD_+5-&?7Bu_b0h3kRcB z*mJ1N51UZl?*mDG7Y;cR&z6ydmys0H0#*?Hj<`DmNfzxooRE|TWYP2rWMN=y+qCWyhib5X!gSks3FIIKC8_NI3 zG{yh_X!QT0&9w}2eKOC9)N?rhZ#9|Pr1y<>B?CusiQ3b_7tN0UM z*Eyt}&~4hshnc^-*sBed@WtP9>82XGi2px#L8H2i20_$Np%0x6CHM0j@Edopo)2`I zW(6g5x}kl;bS?Toio+P#N_659ei$1NAjQmJp8p$~diNpIr+BYxY^eR;q>u746#orH4VMzV+R!TRS~M*k|1pwQcFnp31lYvpya2q{>v>4L!~)nj#Lx(G`Y(TK zA5CHw(Wiv$b-VwQDHK2b|M~lf4BL6 zE@^cB`}gAXdBNix4H_Gs-#1)0(=1};wQ}8pmMkqiyJ)$s(fZ%aYp{`C`|7iRhJ`ki z>vgTU_T@=euXi=-ybU9%;dI_`jMS%Zc>epp%SLS_%eu4XhPfIJ)cQ14c6~0qM4#fn zV=Be%ARs2umQ?6{9S9dQKo2jGZ~!DbP^p$Mz^)Q>9r5eSEHr1ui>IK^~`2tC7`cZ0rV5s z;6xLV0&FVYM+yFhnBkzxM*31^ZGb_d5{h7v1q>1YghP0Kt0`4{EBe49UgVxwg9VvQ z{LXQp56u*Sb!W=@toc9TkC<5$i|76aMbs~%Tp)ee;JxfSM39VcyJQvgKt35h4c)@EXG{M0>(nd z5rl>qk4-2Fp$sz^=P<4W;x&grx7f?+L%6*I4|EE;$QL3GPs~7??!G*VbbCBX_htWX z;smfjoMHR(zyNUxcOdl#M&mxOOJY7AB$Rl75MRBJPvXfvyoBrK7DUSNsGgL!;SoKk zi}6^v{0>j#Ntur)@uZxLC-6o{_kb$grAUPqTFJQ)Ks*}B>7~H+Vk4)w0P*50#(Ytn znk4eYk(3Oed%!OAAi2KF+OzF`vCFWYEIbatg^tj^?@#KgffWz~D0p6BIX?mJ*2wHy)cykMXIYg9C2?bJP&af*~O8_GA z_NXYi3-!i9YvgCAU^6+JOx*)o0k=-eg8FDqS9zY@+GTxsLT_0ujL7|BuazDK+-1GF zc&NOBYDNK9xZ8`3;+f`Ea-%rw#;KR7*tH5!q@v zBq@;4gIk&6vGRUu6?bx`xR#x$sGo4A0v9XQXx^8jIV6{`)9ay)RGWYs)K=gdY8J~g zd6(_7+NPg{gm%*o%4=Xl@w`;_ZR2f8u_ca9!t38`qXH}$w)||TCChdsMR$0zg}S{J z@Zkxafq^KfnxD;F!jZq*3@i751(sd%{loyVOBTEF6fZCU<-a;;v!%dBqhp5E8n?|t zyYfYgvCEn@{RrNoM{UZI{t<6m2>)R@YF!+0!m^NcUdJ38$bEZG{Pe(-&w7Ex&JCx{~q^2h-u>A|K$~~*F-h{b?cRzCJ%gQv2j^|=LjH;L%TcH8;b_cR;EIUVkl#`A zt=+BOh`a>68!w@T*JDZwWrm!9$u*x_A1}R!&GYJu$DUVu4MCTFXU&I0n0&ay(^)&3 zwG&x;!j{_QHLpzh)wCZ(CT7eiInlJFNolkkp1;Sd93@ApE7+k&y=HMOQB;oMHJ=Mw z!UfIZYL;;3j+|ce8vq%lC15AG^K}M~YCnkL5d9vV$eNIlY_H`GO+jJ-E0_yk{Ux?)N^%MNkEM zyb1r|eTrLuinE>K;*LRr_f1&cQ``F-WBFz7@XIK)Qia>Ia_;aUyxS)i`};m0spuJo zE26OcL8MbUYyd7x*^k@FsGLHTpZ%)O2!;6n5*s2{xyq~jJbj?;`{35T<6&B1mu9C? z&>gakjPBrj$aYtLF0|BVDfVgpy?j&b`!o9cUWHqgYCQL!6?yRn18Zc&D999r-zn#8kxJlo?H znQ$>h{)GFrqo^)JM8S*wKyso?1x|M%+ecj~^Pn6`=P?#eNJaYV9==Vk^8U``Ld(s* zAy-!M?wiPxS&Ty%H?e#^r2KkXL0k^v*jkPaX#BDH+ZvzaU$a;6oCV%q?vyR zOSZCPz9Pwd-j_RgU+&<&xSO?jH72ynS-X$5`&j!PYnQNg32TqB_84n-S&esM1N^V> z5;2O^r&xW;RDX=+#%1onY7~U`=vbzgVP&VD>T{d_b~RJ!{;S&jKI6mK1+URleyk#P&-cE(+ddl@&X994|c{LAdP4@=l7eM{d-MnJg5 zJz$`9spVXka){Q)1c>v+y zL(yFU)##gh12(EJ@Z`!;i>t$f0bA7O9-*Sp9vihjV7uxwE);$6MOVUXPR~P`#&-2p z*`|QCT=aJJefwu%S`a!nU>EO{UFyD^7a*C{br;YTvj>&UO%KKXYlnW?s{#`b2JBTE zL*@oltK)g^2OL5<9|s(@?;ddySR45Tq}^WD2)tz6tDt$P7;kxh zcy?f}*Mg|rzydE{boF@4@!>rKcUi|p^n?6V^!9`LV!q$F$9X<1JNb&jgP5>gRqfaE{mc%urEaxfk*R%f7_Tfz@8_0ed0; ztK&PU^5L8hfNiFJfLw16KZYvFtt+Y@9<#W>HD16}X8Zs`v3-GOv9t(q3dR^{uZ_rw=UplS|s`gs#ekdr{D=~Ou&=#+&!{L^ftLRBc zR=c+a6?mmi-2pt^eFt&{6zvQeZ&^2aZ_qBvYXqIJj!78hYvp};8B)k8 zp2xS)vC(RwF>STbUbI^1NHN|*?I^JP7W5g4+Z}N(Xs_4Hy?zTi%B?K4?9cp0;98;X#EhmM4O0f&(2DJ&p)( z$5#_i$Sszu#mj@e9sdYg8@!zzU235P_;yQ90m9JnRWxoGa?BpJB{;)DXVxLymu|0l#wE90@+8M>bB5J`G-KNtlZJ?=j@Qg_iEUtX3=2 zTENkhxVf`h#TQyb_F71?*FusRmhW)q>I}WcR z&9L!f%qrl8yXlg-c*ci78WwYQXD)<9I|{^(jF*Ea9ptbJfXd)w)`svFAd^2 zD02jl>liZLc4S%?;O|lS!1oiM#`)7QG-F5w2z=rVH8N!445X-AATRTg*z7x1z4I42lbg6>^d8q@3F$2$%l6U0$!O-QrYU|AL zpKqbVXg*h;&lPv(v9OZ+=?q-_GL+hPF?20U&O`D`=y^-eDU-x`%dzeshn%;>;1W(0 zmvF{Hv)&9#YzFP1b)$>LB}@3!1gCMy@?~+F^Mv|w_#y{6aS?^SpP1>a7AdiJ-~f6C z$LXb(R=7PnoCnoX3mr_ud1x)Qw3$F-r)7Sra}M`eHRDFdGZSd75QF2$7C1NExzrLh zX|A)tQRT+m;0POTwz14}KAS4%C+aA>JkK2r%_&b`Zdl-pOW zPGsDNf==TQa)KpGEwg*9hNQgDYUf7Bg9@u-D5h}dBBhu_5(A!eJAS}?k(zPTk+`F*L z-C-%-bZUJF?U*Doyu(UYgl!bROxqY%hKipArjC0q%-cIZ8`rVCCujdISL)99jt_e! zEZ4C-cz@U|^kO$$)>|C$5nn^W3xIhonT1o&{YVdLi<>|)v@QtWy?)0--Z3%HJr{5?RWx)a4TEx;Ph_Bd^0Z%y!AZb zQ4SV-h6C^CS`hBQXDb#1)5TrDiQ*n$skk3lE^2}M#QN|cEJYUp&jXeCPHjZmZ6Qep z&?fG(ZUip3J`P-A-3(l1eG<6Fx&^r2`V4S`bsKP#^*P{E*6raoLu|Fa2=cu3CEyP0 zE5M!BUBKPe*MNJhuLJj4-vqv6-3xrr`Zn;0bwBW!^&s%L^$_rs^ zGLtPkvQ1~!cC-Bxmc3Zpm$d^q?+_bRS;X4WwiDFHHtO|K!$uumZaWFQ#r8SyEGy30 zXm?y-?GLQI%-Y{_6>GHt~c$1ZQfMScE*<&cQL*W3=(@8_cMOXIMioBcuV{$ zQIgaWKQ$QT*hGqDz&NoAm?ky=+u{pJEk(BY0*IG-0sG){dM)u;G?I^EED>EGnI=ks zmEzXqd@&C(G!8eFznNSjJ`xY5?6oX|n^TY4LQZDGk4mTci%hgfomC5QZsN?DeAo<-+bbRI^PsoqW4ttRYN z6RIqQB`GXPfuuS$uL--w@&c9@K;A8NVF1ZDFz#XeoKXal+{M_9aa`as1ESBFh#*q$ zaT0zWMkvBL17kPFaf}NY_uSM6eYUXIyRh8AC<-X;V(i8^j&UL52F5*%uHIzX4QL=@ znJi@7!076`SPV}QgSj}yaf}NYH!$MM4Dw<)eL_Wu68!pxA007oN`bq81Wv?6H*6y3iFcHDXYV5KiQY53=XgKn{fzf>-o`QSi{4kg zojyrEoqcZdDex)sncy?oXRgmvKHGd=@OjhcU7vsYeCzYO&p6*bzF+!|^4sK>(PUzi zTbnFt@<5ZHn?yD3()2=8Uw@~6lz*&$mVd5)Pyhb@BmKwuPxdeOukv5%|GU2ka0O%q zbPFg9_$y#=;J*TWf{q7W3+fv@HTZ$xM+XKU3jQSc%ite_V?q)_`h^S+`BzAKXur_8 zp?`)3Iy*ZHofDi7JGVLG!m`6k!}f$73OgQlI_&GP!tmkYq3BM5FH6uv`62!K0)OZ? zu=cZ2D#dV#1k7hi_?LNyQ; zI^NP$fnPN%QS{MfkKyP4c9Wj~rc8dCu$&~%r4csCeioS4`9)yXP4wEwzOL7S4^4gA zz~<_>y}(Yx4gfdBl45)w;W78Szy*`2xLG+=(9j`AfbWkY)HWJ*afN+9hI~sQ-RygT zE9ljO%K5(v`xf{%sw?lAJCbH3Te3BYiB0T8F(d@?8oi}7FKIMN0(RoW;SmT;jIAUh ze&Gj6fUG3jlt&X9T42JDq%~iGHWUhMp`uHmW?Krh;n%Rh0;SmxzLu8I%}3c^)8Ep^ z?)N`;B-u&nezNA=bI-kV?z!ild+uZI$OBgs|1!2JjCJe*!!{`}uh|+F~WY zGmo-?iib}xtQ7omO$g70vq4=i*7Uq6?iMVZ4XU*Pb`pd0jI4*NMpZvlEPfp0rq=MHiB~mYkcXRj(heshJ9>DX0$2qiW{MuG8iuv<}`0dw{CW2gN zQ?;^oC+6b9AC{B;o?LSQ@S|-P1761A!WzPJr!y41zJs8B7Qs8eNbo%lpSWZZ@QZg5 zMOc6pfpB;Y;dgZtd@4Yc=Cxr!?*(4~%umCkOyT5ps{xO;QI+ho2)@39;GJJ2cz6xL zrvg;5yK>x2-s1^RT*3`!7bOb|D0yKG;Wu1GuzF=Hn7+NR9q@F9DCWG)!0*{gxZFkg zjV?04sd!AIRQnQw-{5$6GxqQ*d6qmfPZ3_mxq({Y6+nh3^b(FHyozHBwZcyU8J^x_ z)+783hZkyv;c><_5o(a8vw7*iGv7s ziNk>1;%>kmaW7!6cr)ODxDPiE0^&F*=FRw#Ye1aI5b{ zvsI!ATwo?}k=+lR0pdjn_5g4jAYO!EM}Rv3nIN-o09OE^efA)57a+rby!s~aYCtBq z*~7p+fOxfteT(W;%f5w(mmLMJ0y3eVeFw0CJr2tGfJ~Ulo&aoQPXRWu?}0uGkO{Nd zGr)a-Oqj!pz?%V?Z~^-P@CyN%Fqb_Cd>$afKY@K7_}B8qK%CQIuL2JN!fx2lfJXpfH|#awF+e87+0TJ5 z1!VYVV6OvT28fgG>609hJ8(eM@`s{nEQi@gnaDSH=i1A7m!ot*^y68qnPx3LcZ zZ)d*(yn}rR_yGGY;1PDpeET}KK9?5mw9SQ2-*3AJ@SyEtz^~dC0N!I;1bClq3E&}{ zA3pyua;_G>j-0E7|Am~Zu|in@_y}^Y#yVvQ;G@Xdf`thFca#vY&jlnGdc6?AkMUkF z#Ow#*)9g=dxAgClWUIF=u%&F< zZBN*qxBaW_*S2Z)fMb~>?Pzn{;TVu_laI*XlAn@ak$)q9B7Y`JO0`m_%u+5={;W)M z2A%tz_d9D`8(f#W`dzIbTqy4Sil zy03B9c)sa*-1B|U4?VAVUibXM^DB>5bI7|>?N|4xFRLfiC#Ef){@v-vrvGgEYjyux z_q!QqX8iXIXZ^JLf2luRFEvy(T-dOvA=a>_AtgQ|E?tRcx=@A%w*h)sKVS{Z1J<&G zfL?Y2FsHKGxw0^gwE<3N+W_lW9`HPN0&oU<2e6)9HBW|p9|Am|y#qLty${&PJ_2lF zJ1>%9-+KUOvu6Q)Yyxl&I|10tYUj(aZ(AO|RgmG#a59X4uE!yG8-~Hkr?WxC(PcWg zHTmgmu>Kh(9nzZo+;XXjIJ!uDL+4eS4wws-SX8ZW=QLBT;rcxza;Ebt$}5QZBz zavF{qQBvviMvRcN3qH|YPNkglacu2bgoOWegCO(+A$%+7^Km$ga2@#j7LI>=jtKob zYfAVvZb+kxtcDnCP~)|@F2!{jt`=OaxYprXkE;!%!Umk^+9=dv%}4(xvktSQdf^IO zSJDrhx3kx#|Eax2ByV^upQS9Tsv`P zab3?R$aLK8N4rSK>B6gc8$rPI%cD|wPbs{wlzv|+{p}L{-BS2oDgC`t`bmnD{!f+& zSBe(H&86@f(JJs7(JJuTQv6!Pk7A^HNBA21C0+&p1+K4%hwNVwAH{Xd?q=`HZoK;Y zJ4Z$w5dN!vK$zV?f(k8tJNz2{Jx4Sh)3jhJ7z}7Dqee6k)x$}DI2jAY4Ba1&>FGpV zOD6sCSU45*`x9YJGg1Lf562=gJrV+kjfqxcV`7uBHEe!i2YWdejUZi%#Z#$(uEmq# zWW*nh`xEJ~ZbZXjBkB(%BB8ipL}Gz-I-X2Lf)U-Hh(`5bBpTFozYt9Y;<1=NnbwU| zA`(eO(_ud(2y3B8LXQUg!ALloO6y^N!U)HcNh1$#^UfNGAeD%7_L0dMHFS(qpM~ z%7`R_x*mzeB7smcmM|~e#XiYt!L+Vve%um^rlRpw$Oy$_(Qq=5O6c)eFs(&HM%oC3 z)3JoErSy~*3WsBXBs3V)wV)Oh0>MZi9!exrv1BR`*5a{rFp&yJBB4||5e?|zr|Icv zBp3}u^l&5^OB$(UBAy5b4L~gy4hm?ZkAjLQ;X{nf6xeP zktkH4#}ct{GM-96Ct;XMG?nrj$y7KHip1k7Xes3nY8nc{2Px*!ZgxX18i$b@aSdtF zpg-gfq=Eq>hSo}G5j_}#d4@v>3?muV!8D?$V!8L22veE z9UC({vV%jJflSxhq2W!Nf?**P4Mn2yC|V$v48gSU4XsF{J^X=S9O9<}K{Q7g!bc)T zBBlirp#&@srXSEkaeq84EX(wEtq5=33M|V*^JD14+}6Vq(_1^Jv~2H=*33{x7X%1s zG4mTe?BZ1ce>xe_{2?@PQVXIV!c3F8KkA1uN7C`M5sYXtJs5)K(G@`tB;x5%I2hJ+ z16?_4zTCsM=8R}85ZA&%7?}neOv9uT;Z(}-`y+v1G7|Lbi69xT-=8$la5`EsZs@^q z*q=(kW5k5^L~C1%)%!y+A(0NC+rnI7nlU{VhY=z#bOJe(et#_Dk7%%REd(*bk$_Q? z(9;?UgY4;OI+!*Cy=+SkwrM2%x<3FjNJm4_K*|V5^+XuWln6v3fdCpEJ=1_$pc87b zP&AQ>8CudvB!j6`EG{ft5f`+u-ycoI^@N^+2ZAi=5L_5KSR@@w!(OAIU>XJ=g97xp z5i#PaKmtx69!%R)hL%p72Yc}oW+A^3PZ$XREe4guL+J47fS!yJW4I}3E)ocbLK=Ks zC?1T&L!mU>S|AdHY|&_<+*MlJ5<)6F*qO=>WIBiX2KKfZn-W4gs2PD!2ppj;BMP4u zi$3d74J68=C4o;Uti?Ng2;nfCgG9-x*iO|nWa;3m*G%2nt)HzgIXe?1w!zxej^ z7zUaK&IWb}DfB3Yk5I_)N0Wg_SVyD#G5A1?Aeg3-!8mC+6^BT%h#u9nWFjpDQ@S2A z!Uh~|AYw!#(NGwTkbn#r1q^fmbl+ew1P=ozfe{LBH-+w}N8)ga3B7nrKQqOK);42x zU@I!NHF0@r?RtUYA&M8P2iSD6VPmGFt4(hgC|sXvFYC2!b#c}pJ1y329qi6_X4Ar#DSBBXJsd{U6aBXjPDBIDUy~%uX4?EA)9%9wS|K7v0 z;zfP^Ta($L-PyrRqPI&}of&G$_GS#AS9A%5!Cl$@HG#+;qi49gV<_9#Yx?eFSI?|?9hhu? zcgJ46ue)#HioWjQo=kD*PS(S!AcelOqqjHHJy`tZes+mCW9#28zeO!48NrqKU4qY;n_r>=S0cv@hE`RMa2BzbCFnN=d)#2Z}#= zgso(9GP`4K?@*>F9%a8~^i1QHz-Uhpg8Xz*^lt6p9y_Z&A<5`d)l#;wA*if`ZSwvKJqmC zks10&R_7?G@w#X5pQ&pgQ(29~v6v|`Ka=|gIx}5*NAa#7u*H&`Y~Pp}gkcnqu?|sQ z(>37N+PckYFR&|uJ2FFCTRXD7S7v*=`gX6`*q*&919R-@7!q*D=Oz`mzrgxf16TR> zq4u5G?L(yhjoBSLdCYucg3UP(l(jIKY-g6I8hbci{ErD1;BBzBw{xI4>z~;@iW6B| zhr5Te{lzDKivRpvEdGp*ROP(*U)R{D^kI(PfoXFuHXqBB7SM&7a;zP*bxipy@fOV7 z+we+>0jdu?EH3<>x^rnfr`(PSIo@V*x8gnnYlTJFFRc-$L<53YjK9skDp$MLVBd5j z_Ej}uLGjgpV}E41@?Ab5AS}i|(GeOhhqSwJ^$EKHsRkC`$r32<)EHjVoO$kDEvWZi zEFcC@mr2}*H4Oa;-V)7+3>HzSIXjymrSS&My5g}9SZ%QQhw0Ko%zR{q6vAF}rJ{j~ zio_|@&?L3^iy6}YWwU1cge4$eiaJqS3;}NkB)y#!AKI@&=*Rt8@ayGy_d!(bKUev%ckRP&f=>{ZOuZwGoC8=^Ww5OJ{QI#l zRlH!PG=KTRMP-w@k_+3#8)6NbtsU{3z`|nI10jaYwO2_wEV~~6)0OLf{`kb2?_P7o z{_oz&N*eVYRqE9I^UR~xml4hGW1`37@QI>#Bw^mXS?X}Uj%Bu^&YOST{BW}r z|Dq&|3euZAO_JmjJa&P3JRWDQ$dsm8_@7piOZ2cNk6X5@8|ZJApyuyov#>%H)eYW} zy*8hqZjiCnMu~Qx&_pp&9Vw_AY(BBkyya@Cd9_2fDT*XHkWg@XY}lUjAdv%)B9n+< z>O`pKAN1xQMw-XNC2y2bqXVMK!xzn8UoBlC9x!XJk!Ehcn@tmN6sb-fxtnTSE8BQh zk3v<${lu1X$TCw4F}0pJy(OyEJibh=x4A^rqE0Q$M_BEXa0|}?<+xD4S)vnZ3(bSq zNEftdbt+b+9tFyQc4}mam!US+sRpzomtqc-lz*3$O#yVFj@(a?k$XKZ*#WIWOz(6P zM>l58f6yUIYP|<~RTK&(?{trM`uQ>n-7Kp0XhAi^+i2YuY2k90Z1)Zt0Ifz53NO{1WOiIaScm9w~z}gXb;6{uY$e{Yw*qCs-lK^$@*5;iqb5hmip5y!R|sYSi|$S zs0I3RN(-m7sRjCSN?V`oYf|%n#g_t9?^@V90kt3!rFBYu*2k3DD@VYws++98UaT4smD9=mUrY`@5o8m9x5i1-RJVB zDafC4kVDA-)y}0RQbCmAYe4e2e2jd0=j;N=D7 z@289-XLw}f43B&U7lL$;nk%?(l@p&!yfQ2%e~KPUCAOoK|Ir3r4mP7Dv{` z@4=|zvgoj3%S7ppYPdx7C^#s5W!ctbR}?sHNwJXX$^NFeE|h1P=U*o^=W6ke3Uj$! zYN6lbtahT;Sv4s1D}qFx(9>v_B@c6!=|ktq7-3{5x-=YZVW(Q?!k2M{Vq7U~nMp%H zHxTOGlVu{JDAc!|%uXpvwd`^!IhPB#9UWb^gDIq!9S{XBf(#rk^jZ<_jv(}iCKk$K zY$Fd{=(pSL@EK0jLM?29Kn#K?3S37?_@+iD{1C53@c$7mTDN<9D z!kQG6%cCI3qTP8uPqljsw^&|50p^k+iQ+aN>6GT>=GtVj$xcqwg~7*#oEOtT4^7H0 z1-UN%}<2o2DlkG4z=K~YO+H}k`snBA$#)= zdGp`)<{$IspKXGV$mgE)=AQXJoya4MddW#~%l<1LH%IGG5;9q8a-*lY-6{qJ z>M>NL^<`GCB!bnM2m@PhA*@(IRk{BhBjhtC=bG#l!4J~F2^wz|HGilMDqsvEPc4Sg z%n1x#VCY7;7!v?*VL3^0ur#e8A2e~`JI-NSzd867Qb>I zuQ-H>GG#)8LFNy_7D~LS$Vnf!->QhE6%G$slY$DptcTp8mGUgKCe7#WK{KI7q%FnH z6sICnyP?~Z6mpE35T!D7n@PRb_#EQokfJb5%u4NtK?akA5=RWK7DnuOlK^qz2Qx}! zt9?)=W@Ho4`4oBG&|+EM<7MQ{GID~ZJ(e$occ-sMro-RdJC>qlEm5>Xj!v=89M!PQ zYCrWHDB>{reUIW+c*#V>r$Q%bAmis=Antd&8~8*F120$H)WWNI1C;1&&H&5LHlbbH;N|WB-!Y6gl+k?!Ce~+tqd{2~Z1cMtFvZM+mO_b0RlW6Wu;8qVP ztC>bEJ}MFdeOsF3iYH2iKM-q(LsM9J-ke2UvZDBkm6T``hKp&R$*f8^5$Ow{t#z!oWm0wzDeE>z7w{~XBmaR z@)`a(pW2T$kV7eClvT1yYYwOp^A6rw)k4-==m9K$Go{Hjx2GvpQjsI0W@zp*+5|s| z>JT~ysOVBK#l}#YRTN$ha&n=@XUpX(k^P-rR*o2AV1*HQ3&UVfyn%7H7hBRRu<*0H zH|S((R7dZKljmK;BcpyE6b@LyFa_!;EjmQ+=yIMh!D)q;ELzTxkp`)+wc=}e>S(JK z+h7HkTft^4;5pUN4q8!ES6j4<74%>>057dr+8@1vsOspj6OY=}!oB!%3HE`kjuv>P z91vRfDG2GADrG1<#j{zN`J{352&X*E;ZYQfuCYRHR0~I}FD3*Jidb_HEPbY`yyU+Q zDdj|joYmuwoOKh`1vKn>6dtbe+0@aeJnHB(hz2s!c8y4~y&Nx%v^9m1R zzJ@ztsPOpe=m!}5@Os$8)o$yy53%3iEj*6Kb0ejP7jT+)gr`7uVYQ9`&(EZpxdPYa zsI?Fn{fs(E;Z?f9GD>x1IV_>H1JYVc^R*1jAFcqwVOn3XT5sVsZ{c;iXUxvIjbT!R zbuwgOjhIYw&+#rM`#eh>sx*SR<5&Zt-rQId4r-O=Y>iYoD<66i^*3}u7$nxcBS#?_ zx+3_adte+yQIEG`L@As1x#Rw%_oeI}v;?|J70ij` zuz14dl|A=3ty;@VN4f#hh)PPHUQ&6vl#<^%+jWuWDtmSeB4yWU`B3g6$Ch*5Wfh-H zt%v%LL%q2YmVr;;7j9PO!AFvav}!9YgJ@-^X0f)b3@Dc49Wz?wno4G_U$yXQDcd>v zPzz(F1mb3K#!`=R5%FbHmA=9!&%!1pGNtb8m)$I#g+t^ z6H`lU@Vj@K>;%o_hAZ#kn)eny<38mxbOcWO47(ONl-Afdy*#n!I-x-Y+OT$TNDE;1 z7m|&P?f?xmo@$?6^i+GtHhageJX$?8xD$xv{(PQ+rldrcDI{d!an_4jM*1$cwNM-Ybq-wmNnzzL<7775Eyg6&;v> z(XJ@nM;=Wr)N`UFDSVn$plw_1uVNicORcwnVApjT?Yi=avkpeJ(EROy6rJ&4d7$X# zjPlqGLyy+X@SwDU*%Y&XP&!Yl5xwTEgVJKzhKdL_hxz27bgKD9><`-<&zG zu3`=+!7v{|!V3g$rKCR)_5%V50?!k85qFP>z(%-c z?wxGfdx_XYAWR?*P^hJ{_7gZr;1Gd(2tfJX!b1ecAuX9Z0@Jlys;a&pH)8Ldl=7B2 zXSXzMTZlIscFrlBkHMcWbMKkyB zmYy^2#~PShx|+YADg5~bTn$ak^3TxjqMHAKHM-=Val$`X+wTf(O42^OlaTz2=Kej> z!kG}2*4Yb7Pe44xq!cmV*dv8%--284XksOcIb*Ms6gBhey^_A7uc2w6AZBwbvV zZq-!jN)hwOO;e>SQ$M&#YOx*xOoM2s=j6i!kGcx85|2{Fg~2K|U+V$EPCGh~E^pD{ zb3x}rUtx_Ev{*r#6*v*7`BSJc)>5b7Tu)Jj;az~9CKv2NZ$CvL;s_`Qb}ukC@p*`# zxIj7LtmS`%RSTFH(1JVf;sU{c;)w`F5t3j|OMUrTr_&>lmh&(a>~?oK-MlJ@ZmEUY zOcxxK$Z}4^cH-*dmqi7|Zl{DUtDac8qn`Ba1uJnXgqKAaWQMP@ThHYF3Qvi(7{4B>yGn zFivX63iSpLLbb5DRQ3HO@?P_)eR%4%Pc7Vm37)rbcZq&K%{S4uKdXA0(EoD1Z3Qt=O2;Gou zjaCNu@|us(5*-e@PEfF5!i;Rh?H%!YN9w#I4TwQ3byK;S;o$j0-9~j}uKD;sNHcO_ z-f2hTG^-g|P7oek5S`?}=y5IXq$j=}n(J0#@Rj5Z@!%FFjMQmpI83#sK91CA(Vj;; zP+dxX6xDsEOs3j`d_`lKK#HJzFn8Memqcu?;~{9tLJQlR*wth*eP;SpsFt2Vpf8rU z^-H_@4Zi6}>oyPNq^=xIYO3dxlPrX5ZPLx+&&|!Z zOS5gZ=Z?9}-S{P>;%lcJr|^rc(qFz(=%#Bdi> zG(5B=)z^uWTD?PqTUKX>)(l^_C7D{=yS-0uS-YrrM=7$U@4D-^;1F7-V=zM`bLmHp zf4W<BbFs-Xv1oyzG?YR?%mA zKX%Msa1exXTuZVX{QJ8ShXCcpri@nVT!$K>6^Fm`%b556-Mxc8kL8R{{MK zB!hxTQ8+ooB_)YT+V}CJz}^L@=4?UuKD}*R{(J!B70?eOyiX9e--`VGz`F#&w+`WD zg7Cd0&T*}x*Gq8JDnA$DIiJv+M<2Ma;V;UXJC!)s{4F0l*3=zZstr^72>XUs5tVM@ z)sNywi7jbJs#UX#i9Pl|h&dZqp@t$TeVZucNdpHFLJQO$c?45FPJwEFJMxqI#}QH! z;02dFHNy#nc7z`xB=sY6o~)}LA!%1b=tPLufAa|N)+dJqET#V53M$kx{iEMbA#v2o z==+-+Ec!cRB;M4IMel$2Y&y!f623i?>M`|W(YGZ&pI*Q5^XX4s^Eq^svZq|*N)eoY zPME*#Z*QO^{8!+7B`-%TfsXy1{O|mo{1aW^pOl4)w)H7K z)$hD*6J^dfxPXp7~#O z3AdBQSj114yKZw*=7|Sf!cMX{i@xqxjvaglAU1CP$7Qasvg70Cms(uOD*Tfrfea8~ z@r@SONzop`A;+Nk%sSUgQUJ)l^{$_lXsvCoApXG|{gMVO`5~9^aaje3fAZW9m#y5> z)9t$vo)=zsd9#19*6hplcJ_5)U|QaM`KI)uShH_%sH3;5qr0y+v%Gn4X0UnX3T#oi zmURpcW_qsc-s=O4-ofR~!vnob2RnCWdO8Ld^<+B-`Ud;94=w8K%k?bn80=YmqrchL z)6tvVo*5j%N#Mx>!OiEZa9i62r=K0#dzMg(H4>~BXSA0$x9&~!)4^4KvUzbwe}D55 zi^I^s@F0#SZ|^%tpxoa`R1zrA%x^^DgPG3Z0Tfz_gEEl0VHkpDy4nV^H)gvtJ2Hdk zu>E^ngJrHhe27oZVr1+=A#@nIC39n@+t*ESd2KmS8_p&=F;Rdfw|8_8 yW=hrKT$X%}IAz(FoF)9SB^5Ow!?Go4BnNM$733UOi%y@6pWYUPWg&|HQT%`0fRZl& delta 147447 zcmb@v3w#_^`Tswe-OOHcX(pS@=DyoY(rtE=Hc1<{0ou~i0tHIB)ue^Clu{r-7B*0o zWith#B9=m3M66H*5fK${SW&S8LKVacRVoTpM5u^Z6|pK`EdS5vIkP9}1%Lj2-+y1t zGv~a|{ha5VIhUE)b#G2tci)touU~lO@)yoByninn6O6KwX@>EJWf&Lugf0jK3?mL> z99LtGGMk3shmHT>Q~i5n)6UM0x2=G23hnT{VYQ0Eb61zohK+KV9xe?SO`qCyzduzN zD4AUL`0I!!4*rH!_;^Y4I0?!G*C2{Ft-`lUT0#hEAYJ3M!tRm@lM~Dq{vD4$( zxZ|5)-z;BnF2mwqVX<}kk@FF4N5mJs@lJSYbz)cDky&gQ;bcSf#@mJX%*?3(8yH36 zG??zrSE5$TE9Mt|Yn@qgN8yOTM)wTx%TeCO_G`?X4d;%4BjMYXH8bh~2{%sj8(+e; z)G+L4Dqv5XiSS{oERb0OLuzs$^8sFCflNQH9nnB$0M}=t%mTe8mO>;;obq6Y&q@X@ zH?s`D*<4ZW-(SlJ%~Uj$Ky!y9rFN;AIUAQgeI5VUf3R5S`sLX53ihf z4jeP9AZo+OdaJgl)^7$RM0vHDI9D7C_k>rM+*t^fwYaMRqKOlb!}bZLqYO)9`-i)f z8u(370}Ge-S;Z#JMoW!L?{06%PQ_Bz@Vm%(V7IXVt!iJ={S)0eO1B}duO zYK~N~{2kG@k#R8Ww@yH<3=hM1>$s=u}_7=m4@j{TweHNd3|^~ zS_=j8C$1<|Rye5;yqbx%@Z-yT5E4a7dAGx(}&s#Y|dO+SX@zGmjg2W z2{fhE4z+2V<})_p8ZKO2(a>-e3=G<)g8ICmzD&OGt%|A62%IaFQz&yKENy6fxA0Cy z_hc>ITr(tC(lgH{6GkC7=veo7vumoUJRIUfDXk40#@22&b@BnzvB}R*n*Z=j*^e zf7IG*-~vJu7{~C;IB3_3s5Pk zGG8!rEFy?j>~4hX$C$*gc2`y#`yFUdW2PG5MW;Q)hGH)_F?uJDVAO`O)z=X+OQlPq zOJq6`Uugz4Yx#YJe^fP>j4RaI9qtWG+RA*KF`MvbWj4T=y%F`Cxe=xe+CF@0sfRyx zAol28nlhTyaUTBSfp}VFG4fIW8xxGZvKakn2uU^CZbYU9jc=ncRfRv;$r@I1svk|v zx{dh#Rv>XxAyK^y1JSkBi6};J+m|}WpP7tOW{!mDD97MY_+j-oZQchwhZ%6#a7W>z zk=Byx!dD_k*&MoB%Mud%T*h8_C$eDS-;r#DF=iVBXwCxDMh!XP^s{jBp)ec|BmQc? z3~z~>U@JeER+yp0b%h&hT59N=K+@rw;Zu--E8%c?;o+KzffM~!v_ErxVNcDB<_)E2 zL%CfIqy58I!X<3^Gw9Oc*%J$MYZKEOr&-1~a3`#x_9SkBLpc3q0zu_LAF2a%cTQc>=R zWm}nJ5ik<412aDZ2OBrynW$aEi3EMs!R(`~Z5KXZu>;O#F-IezWSp*lHO9KyA?0j` zqWzJO9YS0ec0X?iW6e`*qzHC!V6+tPTR0baLRB56c*r$7UEds*ud=#yY`;UND`{e% z3mDs;Fv2JS%r6>0`do?hzX)0i1Bg+JBF5q^l8w^VjzF6~b1l*meL>G-+ZHa0MoMli zxY46ZZYn$+Z7OjJzlu%|vOzEae7aCu*HaQLoKW{qIM1zL1i7jHbay?RJNEe67x{b( zm5%hEVfv*R+Z(5q8b3thxaruqlUQxpAD~*Ze`4KkM!2xp%d8AqAY7gt2xc&zgfBk= zMXCr%pVw2XJ~g$9;I7&ewxK>7FN_XsF7vt3u8FkN^)%UjXY)8CaXX?&preHo%v!3{ z_D46D1w}zR?%Z7yyrLe_b83?c*riHO0&){iwfLf)HQA{_<5Dx)KRRbimI^Bis5y*@ z`R{ojrRt++PHQYRegy;Kw9O7{WDAmPH>EmzOnFR)A`VcuI69P#eioZ9ZU=`O!=;W5}N3d zG|OEYV?#0;2yU(jWh+8Z18E~O+Ad{czxkb0c%ixBn=4yRGE15ZUy09gWOzv~^2Qc5 zfV7queB*B{#pKdxDSUE#{G?jkn7)EqC=2*9Uq`u9{%E<}tsVgEctzc<8pqtNg6-;7 z)nrVVrEXObXzL2pS*%bpVR6ZIh0`WXfxK$MaLK0&f0-~F=4lfbm&6NKPs~(~M+t`) z$$j0Ij}-P#bfTy5j{Z%gmU&R}(P7rKTe(OfIcaiv1b6<-(ZB@DCv{h~;{HB?wZyK7 z*?!@(lNwNg-gPVmh zvG82m4@xE#J~?^6pJgbVm6%-EmWWl+(#|gCH@e`jweV#7k?sd^Yigsy=1##HEmhL) ztIGtjl7os>TF$0eSx>-p-0vOtzNXXY9AtGTrT)!#) zPei?<{@h5JFZS+WG~-8Hc3CVucMcQ*aaX~POBpWZzNU;z1ID06AqZ5Tsicp3Wyhcr zUE%bW+?g%C{_y+xmAoDiX2AxRKVAw5`on{ z$S&4qFP)C*2rAh*G~oM(D`vB-Y0hV%lwP7iU#xOWTr;nuQ=M1-iedpO?aI2gHeW2P za!^q>XF2?t4N{;qw2Wb-RO5;@6)Ve>qg2uU=&E8g<-z(MZLY^HHdG81(iQ8~j*IoD zu6xfAJgA=R(hleF6=9Ey_MB&$nI*`;<7G4+TqQv(M((z0}Dz>U-t>!#MOt?wzfi^|j=0dRV2eV+H4 zfeR(_^v_VPVl_Ip)7(>3#bQN9?Wof=U`JzBQ%{GC!Gotp?AI2wk1g1;G*aiY-8xJX zndjt`|LC>eXV=3gTe{6lr!^9dRd!bTGGhw+y|K2bjIy$4qE96oD_51%q!g&ZZa^<; zHKX}UP{DR-%XCL|@QzAz1Z@el$!mt8Ldn!fgncSmW!8xil-&E%Dc}0$VzmKdEr@)LNIG zFJ5Yg;@nkQy7uB_Z~#&&ld6NJj9SImuiR}bPgOB<)VcUwXr;~to&^&#D2rXz)LG#X zDrZl*X$NYT<}Zf-WMk1Qrl`jLlE8{o*tCP5D~hmaG-`>h3bFN+1v-Lo zuk4&_hvPRRrp{xNCueU&rYh1ez?%K47@wi>Nim`>jqD%Ah?Xv7xkFp_Q!udaER*@OO;6!vvGCFXHJ|3vGlC7NufiKP^JFQSUydeU z2!#}-ReD)mN)4)Eakjbtz4t^He8v>y6MvWjoCyWt#-}f_9noHX3P$vD^Qtoom-I9? zAME4DhxkBrh-1ltaV%DGEGfK%I1ct9ag6m*SlnZ^eBi(sPE|3G1_Oye;i#U5=%^Qo zqUfb?^7KYG#cT{eiR|SOwci|mibi;W@h3E5+Ze{DX*`|A#E~%ihJOl~fTD%bG_k7A z$*8}R>FleIMiW1y??tqlIbMdLCE+|O2;&WXb&NBy&4nl|H{T6=vfMNiNW#h^@}i%h znWI~(N`*;9%p6^@|Mdg3-tdKp@=E126E_M2Nzpm;ER*VEG-meS@TjoEJX!gp6c1Ds zx$8M#a?Aix?cniQicb;FSUo;l@vejYAEWqe;k0=E=O}){!T#O3a7beU%>1*snVrmk zm-4RiPwcR+t$dH-D*t2nbj4Nn$8hL7i>0#9NWFM^74NEWSrKCbq?LopKP_GWtce+s z%KsSuGs&e*o&6KHnK`Cgv%e%+5?#^JtlZ5U>zIzLQ_nGV^kG`_W{hE|4XR7# z=d6u+Fqt5%{{-6C%Fids9a)GUG08d|b(&aTSa`&V?l0iByWGa46?+-g2>PO#y-=!r zwCPd-sOkcS&tfkKr9Z6G)$i++qYkBNO}S=}VtAuOiEVZld7@&ZI@C3LBVx(CKrbE8 zUOHtzt&xN}ff>nBI~XOeW1?Irj?7;cCm<@O@?fegs8T#$oT)&mq?zN@pMA8V@Tqi5 z3Z2+e6`i_RU9&F~V~7Op;Kq(>5C5T}@Y{5w%f4@yWu8M`GrJ*78x6%481BIBEb|*6 zJB;lmNIQgO@^&*v?M_{!uGxR+*s-b@jlHze%QhDq5v+qMlU;U&UAZ|j&di<0G{;Gr zt|V#$N}7|H=F3d85@}+ZZ&x7AA9*T-G^tA2h>pC4X??f7s?RK5rusQQ8SY{VQR&@`UQXz4Q)F&(L2Bd{4tzCz-cG&ePH)hvo zUSm9srrntE`cWhLk+;2G>_^Oi8WIlaM~8J%{pfH`IgVvidR|>AgziY_5*(q}k9bx6 zXb-X{{Ya4XBSB;O5l1OT$?KRHRX<`TGQVe)urnoIhbi#~2!?m_sxxsoV`t(OeaW~q znt20WB29Kvs>W{Gm>Or=P0r@b9~oG6G7fdAHEPIHos7APG~3Oh8+6Y8fxV-MBP~uO zmQ2|#c5HJbZsyqaTsy9sO!d9^a7y>O7@8_($Ltm~)zfBl&xH&S(>vfYxQ?n-xPvpX0^|y&gZK6HVo`BST>W$|}jpMrNZxU^* z-8!0@^tU38v^kN;&WYN_GjvntuaH6-~FpLh& ze}>;gi-oZS{u8cNW-sLMpCQTu*%Q&Lv!}9a|3%#X3fbD>Z^H>!7H8>~1`=<9l$$ur zu1Q+K%->+kAB+5(IZAapi_=nCGH=5%`)!0utZ#=AidQ3keVM<*3dCc^8!^&!GE@7& z!l=Q*v$L8f)uGaMw_{3-J>z%apk|-c^)nH(U5fkN8HMWElS@u3^v!PWMG^c7j4k2p zkC@y|FhxRM{e&`DDu$uXm|)4c6ynl9od2s)xN&xK8%O1g58%d081irz5b!Qy2sqrI z3I}TTTR$&6GrP8AabeHwmZ@}C4V{^(a#WG;EdY`i>kR-tE3x4RQH z@7)K61MnEZB#-@oTVr9apu!P5xvbhwV0jnjV4OqBey-rckw|h%btEy4Mhw8YYZ&vm zoRk<18cvnjRd%%`pA5kxw$Q>IJFqzz+3#Sek@3t8zlgZY>2f<|&@y(!oEjXA<-O4?$1=SXmv$fgtq$hN8q8lK7qZMFh}mpGK8(~411;~?BhlA zjLEKxb}h9VWLY0;>C;$xEqz0$DWiwIkD)cv9qjbjQ3Pv z>toat@=%LB^j)UTg6j7>7~#B|1PaGG?Wf{VTqV!?%`RKfw{&wulVpky9fa9Uk%@GU7i&klLKql^O z*plt%p%2fBx_8%%i5%6hBCkI<^5@VnUgYlkMgB7kZ--cOqvI{}^+rPui9zeVLXLqtHIz zc6K2JoZR~5xGTJST-)w)s9$06{6?3j3x<(0rYW-LI|HL{>47VSC?7a?ue%OsR#zIl{EQd8Bn zEfYsvxSO4V0BXsf8BZds|I7sCxTtXRf~fmvc2%sQ!mZhVjT*U1^C+i`o{rG^q{VsB9CL`_wT8&mAy^;m|6!V*0432DKFrZ5SHCRysQIU^iSF6-Hkg8W|YH z-hqjV8Ak@*K6@Q~yEyar0Cui$&QYbZ;U!v5ch`iy1{|yU~gNp$L|bwIe3DM&c%ISZ)?|d)K)b2 zdPld`TqIxRGZNDpl#7t(;QmI-$nJ?bqsu?zG1HS9`jHgJCY1y^TPs&T*w# z-#FQtWegpTIgC?Vt2K^^h&sU;bF3pO+EL@nSn#5f!Xs6Q=P{DBmu4#%P4RdQ+Hp2K zm8Q!$KCiS2ec{&GqMP#R<_Sp zU1W=G!vj{<_PFlf+kWK@H;3xvlu*jX{h`_ys}c2$_HRj!Q$U?*0oc_pLUWnz?|#;a zMAE(J);@3BA4SA%p?DY=KrfKapo{8t4rTB5;Q-JpbDdpx<73ROtc9!Xdb`0pK)Pde z&D;IDoC#qhKn`jUIRi&(J&$0H6N!4qa7WrsU01Qg+D?-;d+v?J^Dvy!NZmF1uw<4# z8#PCkK^^VdVu!3zUCxn?Xh)A~S<%u{i36I8phnStkH!t$KURo5*wM6c=0wi}`3nm( zPHHU)6_%ZJlFI|F3nR_RR=Y`$e_XJ}7Y7ZqCxqU|BP$Gp%_tg{rA1YK#>~;7!ybDI zHhY=OU8QeKPzoDqv0HLO@Y&FVSvpQ4{Eef-L~fxA5XaQP=oxoFNB6gLY?xo{?A{Qe zdR^>&kEPU@fV~e6D9+%1Qymkn(gVX=;A&uC<>Ka4xa;H-XGPGH+Oy+D9wYJtA~Pbd z6M3h|uZsM($R|X;Fs{(P=)z@Vqxgq7G*&TE6~z=WA0cvu$b!f_MSfM}4@JHvGGH@o zg~*A8y^EH)NinNbju!Kwshlo+g~*SI{F2D;i!7;ToJWh~gG}vwE^wh`7}%{Cy&IZ2 z3`f`{H-|690wqri5sr>8$CzoZI}g=1LXpFBq5gGiv=xtp`q!PWt$4b`zwQEU#p5OZ zb=Y(F!Y-zDP+L!>^&)M>(Yt@$#oBs0tnRu?wEYY^XS8(*t(R)+nY0eUs`L_*R}RY^ z_1+cgD)x}NMmvx9Vdc(FfFoxt_FxhBgZ#PC&bam%9qcdq9djIf^Vp7XRKef?MCY>N z4P2cC50(33>M$`Fiw_R8whZ?A+LEVTGCzb6CjP?stHxgg{u0GM7lsMmU$V`Ycm=nB zWQ#9>iMf?@e2G_it@I^c78_n|U7jP~mjXA>l%d;kxm9?8^ zJuBxv4=Y1JCjAk=8_FFavF1k3LE;yoE&g)M zvWO+NSomzLT>ROa5Jn#0!&+u)Ap1F3@$}tSaf#l@X??3>TN{1|!`+EiN(s<;x|x_RQU)yiyf4SawsqLz#BOvjNkPRpeT)Y;$Fd}TQ}i*$DaT?ZUM{*w*qL8M zzNB1Z8mC?YWmh0Gs>sv`NpD2PNvgVPM$hLL=#x%~n=n>*UfGRPi$*wD^_ik!NT?E)KNA6H&Je1I44E-igf^~o-79-V?#Xy?q?GF(1ZfE^Uu zRCA+n@$m}voLQL}vt>etr^MevO{ixq_&!en@AX8iWQAFu!L}71a>k4UGSIcxRAXm_ z$=x(;lti(I(2NJ-;8(4Vu>6pTcBpHw?Rq;2gII2Ly`ZA!D3BU5V{=r>YA%fa(t+f$ zVFoD?<9sU4*S@-ZP9*GLQ8=R}QeG3m6C0JvpIzp3;o((m!dX}o;TQ`0eg5k5>Wb>h z#NSXDE*4#`7VU8N6UFC9MmPj!@bpEvJ2`5O4&ZqzN9@k<3fycn3HY$9P!JqQS>3OR z6E+NRRTnK`*o1kWsB76%rb)n1&-t+VRj~k5_yoq3W#6yOw(o%e1%(z|JUin|a>w zPL{-6VOKQuB7J4UA>9}omJP9Tab!iaIyx{yQ9G(fwWD~>7VP~K)PoA+Wxz#SFo?cqr~KS}9X0k4KzHO)`qcZcq=96}7GgjfGw0K|3QdpLAIZ$K*}K+yS9BoqXpg_4EP0ZN z9yvOlb5VQ=bg1Y0vSeI))-zTW*i=tzu2(i|00&UDgG<$%Hi*%4aG<=l_>7uL0OLrh z9pD~xK%PD8d|f!&KZuoikmGbvJ1~E?H6(A7sHD)B?b4G>p4}@b9^UC*J;Hzxan_Ms#@GBvt;tq+ELL~ZSAiO;=IGQ zlCj)GOa%^t;anr2oR;AE_0q&N+{rtz`xc-XdQ={OVyj-MB6TH#;Qm^y;TkbQj)`>? zhI28-+Jy)rf$Bh_8wTtSMGqd$denvZKT=%C)FnOK(L7+7uoTA4vs z)JhdbDpeYOr>B65u=RG9kM%>9tH4V<#nuX?n#VdfGXO?;9_J{H?i(HHafiW4jg+_! zB|e6un%Wpw9VKjH4krV#>^5BOGmN?L(;&NSV}3{U@-&OYh_rxj&*fI9Z>0S?%-eI0r0 z4PtZ?FAWiYL!`8Ur&Xck%pew4@w1M_5(BTt6^$4|A}$u+B=j(VcbY-XXJa@m#81NdwCA)S zUM!SnyL*lc))(Rt+%tNP4$|KE|r9Ct%vaxN$B_+U_4eQ)F%nu{~5rkNT?8h7Xw$%BuOWT(Wa-K z>9~^6xGXoviU}ppl~f+RXm7$>tI#WJPETW2$^izmW^VGO=LXW53wx#(_k29dczkHaL`SD%fP zjv9+oI4mR8NUY+(A|N%(Tb|(AZ8%Cs@F0qE#{weNqSN3a6Yj2sN4qsFE=e3U8m&7E z1@nw&(>Qjnif2e9UW}&=W2TNgp+Gh;##A8RmsDsIC%(m*qgp}ImcVnyQD4r%pjmBH zm*7~Gt3uCbr8f&S8#9r@8yB>?Yfy2gVq`Q89?4*QkyixLPrxDvtHqmrFtyC72n_m$ zuyZaGkP3`0LxI6AOwx;W#yrUKuQWNyOAoyOD+zy6uZO%NdGBEY%kBN8?GGoHs5te4 z4oC2q>S6gCOh&w+UL{f-iw7~e>wb+R<3aA;8^nWHb9TRmd1M*$Ji8lb0w|<<7-GH@ zGI^#`mYj=)BI^tX*0M@ScE;vJ8?5N6{#b0qsze&PU2aA3f~mMV6-i8$SxTf*5ejy{ zB?;gu7GG1cq^|p$xYLBKhVCzjl9~pld&UAJhDN5|tmE=YrMGScjd=fxcdvCTY4MK9 zP??+?dfvFU@{H%wSgdL0dEIIWmQo6<-n8@7ZrwR>8lHjKKv*Q7_n4sK!Bn1sBYrEC z!GYC^Fm?@T2+RAUsH^VheRxmE(@Q=LGKJ4m904DEiACLi`QERP021V)qWEwK!#^XX z4;Yx%8OIg!7qz&l5ie0snJ{4w*uj;l^F54PR4VSp#7aSgJN25+vsZY#2|L3@Lm5J` zqcHtqDD()=KYDl3bPSX-oc<^^I#L#mlsl1PC18wT%S(meWEcfhT2N%@-+0hJV;3&I zIOeWqPq70ya4lVI1?;j)f8uR)EIT+#N)^G1Y$u;H_Rr%pM(S~%jY$4@=t3p>!*H4{ z1NFO1seL)>zBou4#+)X9|HTPBh!$Qtq<1cCKjwL3{4|tmKao;3Q$%HqU2x(_JKR0+ z{g`u*Z$79MhO*-Aol1Z24fs;S@G9x}oH+Rn?7|u+P*IByBzUiQhON3yEvu~hP3T62 zIYSHF3wai!j{nkkp+tB)Y%F3S@iyidlpHz8K7`}>vF^(cvM(|l4{)DVwDa_TDyE&C zX7d3aT8eh{cmPHkN4poMXC}7{BF}i;E2a)FleQVFlVePIrEReA;Y{}`zJkVA+KdmD zjCrMPujyHeue9M%k3P*Dqg<*^64iNT|5EQMg99H7K;@bS7JtUVLi2DVPCI7~PY*6) z6xgjSTru3$@cu7N6&@L$cMRWXoIROJH0z+FNkqz)`?+ZtCyurpj#o2?{(T+__--cq z&-Z;OV1fVe4+UI}l;pL}Lmmpiie ztUXR#Dndg_4+obvoO?+{Xf584kRBx`>-`tw%TLtOiA^X*k=!(-=zm zeX&ziq-yXh^`0T9TZ+4N@tK{(kr*_%>yql=jW+5%UXVBnIOW59zW9!rZFT<+10(m~ zc^+TXRHceFqLQv(4J~?~eye&73!h9H=54Gdf0?>uc~Ot9$%B3Q%K_R)eS6*{kU#JF z=5vN)eBbTG9XA#mhP)M-ckwJC-oVN6GJG@&kjMJPk4>@Mhj1Ni?>2J;nOSg>`&X%f zvu62+P#=ahovjz_AT>M;vfrQtiuOSPO&l<_tQXKko_AE@bZ{>H9kCzCmP6O4N;yC-|i7 zc!uo?9-dl`Pac?Sv)X&T(jJ3kcdGl-{fO-310xILc^`?Z2haM&>bSd)=|B=Ud!5SE z33$>^QfE{FPMZ4511$q(#R)+}_cPVLU}7%jIpJzwG={eYSfYXGfK0IF0Ygm{L@yXE zEz+i?{@lTSuh^cimb1kdKZu4Au+(@88DDfz##;`_>rrUr|6^wByv!m~n8vCM;4rA8 z(0o;$%dB!EtR(tu|Fc+JdnZH6m#-i1_7si(FQwqh_>_Z6aX^*3|Mzu%P%+vu^`J7KtD7ep7)8XERWWl@o=|DsS{hFoY8gJP>D zz@ULKLGfO|K{{9=I*17m7k&s?{H{v~-*pLY89r9j&KblcQM3%pj5cWODjw0<{@5IR zZYVmCnXg-Xus`CK7MC8yX71kc|J358Scmx8Z;ewAsu|tgu%s#`qIHQKSi$%0678#$ z2dAsj#0wkJ&k~#+qo<&&`0#Y^VG3*wyqu1yQ&`M+UYoKKiGc%TbaDWveigB=?_E*f|;}9;tj-#u*4~!ep zRkYOPuT+;DFQYdiH=M^#hyU;)y&w{SKktQnPT!OyAa=(wT21yF2A%}AYr-{UHRaAA zzE8kBWR90oFoM*0GsmmE=#@ADJZudw#4!_Aq8S#XB01O0Ng6q(n>i7zK64Ue+aWx2 zGH?dlRCsC~k#PH7#Sywe?hV){$48&|NPh;?(r}x4xEkF6uoV9{Ua0O{iBnd=8LPg< zxa9Ll*3406EeB`!1;n44?~yu9dOXsu$@uJ1Fni^D6`_=m#0(gRFD&f4dZEh>tFnfi z*;Yfp9dz)vr^<&NY}phE#lmthp2lgVfwOTtXos;%;U(I?31f4G9j@bho(gayC>EbF z!>(+YQID5g2BPzLud^%bVR{Fq^j?fo@Y8tYC!ppu#gBXuDE&*}aZqE%YT!woSJ;(t zG?y_jpPC6Jd*46I!j1pa-@g%_e^^NyX)Gy_blklQU?AGN2IebOqso~t6 zc$!t2W8+rr#Gy1S^>m2X!~N(Se7yPcPOQQ3&d*ZVi#9udHs)nTTUF*PxTXJv=<8Kg zSF4EZYR9#!y}C}E!WOTV7LP>6WC5>TNp>U)wesk&q>Gax6ECJhUMtVR#9F>pg@M=WCKwijDq5>b zP^UuH53ebPtoQPm8joly^5|ly8oN>!IWtv%#G75^nNwK-qDGsd{ZuILDSSIL`-Xqx z{itYu1YKZvQ!s_s=1wcTcWqtrm?l&L>NSMF5_I)2{($5k2ehAIrg97#)J#nN=Dh39 zZE?4-Z(#2^-1n(gXu%n*sO-HOUQak%-~$?dn6O>oRt*OUYXxr8a2X-rA~LhvH9U#% zk5dVExSAeKx=ZLT4Lb=R6}Vf&7~z)%?$NM{@KXZ!YFI*ejlg{xzBLK(5`p_2yu7bU z|2xutp;ojAe?d4;V5Npn5OxZ5G~7y9FR(?!FA(|#wrO}9;h)(@vYk=G^^*S>>92+M zYOhxkJ|S?Hh8GcjP2d6z&m#Pcz(pFKOt@a)5)Ee&UM6rkpsPwhmGms3YqZxm;c)_o zG_(o31+LZ5By1KqqT%0gvrPvDuGjEY!oPK}^cxA~@IJyj1wNqRrwMNmxK+a&2(J*hO(5=!;l3Q{N}=1eR~{s!PZYRA!?Ou{1@6*t z5#e}&yEU9mST1mnhSLcDfmx85-K!w(s(s^0UlF=bd#xt?slfdj`UoEqXw?;!^z8|N zpA%TA;cJ8&1v(n;BK)wx77x1Nz8{gU7TTu0K16tmz)lUnNZ2Q^SHsPOtpaCh_;JE2 zfeSReitvAuEd8P)bo(wL-6M2Kk@hVod{*Fc4NoC_Sl}8B=Ma8D;E;x0gr5+&R>KK| zd4VGux)IU~gs#`rPk6e(jT-)aJm4&Ww`#bDaEib!8vc^7M&P{~{+N(oz%jEAX!sqM z^bMg~HNBtkIf2_W{0!kE0=H|pf$&QLcW9U=yj9>X4TlJ?7Pwo(72~1*>5GN#(R4Ar zen8+}4UZw5D{!BN-Gm(i_iH$juuh;=UsTc>LZ84&4NJ#kx|7~Jg>~X+x-Sl|zY^G@ z;qM6_7ucrZ^Mqd&*s0-@gqsESYWQ8k>jciy@JoPh`ck0_G`*c(mkC^?;f;g~1TN8V zgs@BCat$+tO#;_wxRNj+a7e>b3Ej66Ed5$d=aRl8a74o%!XFDfQG*!Tqtm>hEEaB5V%dl?-9lYZrAY3gk=JE zXm|(VzR502zf04Nq`$?3-ez{UhA!dL0{3WmDdD#T?$z)d!n+0T)9^IH8wKvya30}@ zCPV+stkqD|(&?n1TzxrR3pUPb6;*J!$q^nCF; zq~T?RrwLrE;VQzT1&(NVI$=WKdJT^yj0oJQ;S9q4n46hd_f}2YNdF*oi-u9cUkJQc z!yw@g1U{hQznTH>7r0f!HwZr|aGQoN5Uvxr-PQDI(u;)d(D3_&O9bxH@BzYO1n$=G zPQsMHJsRFj7!|lz!>b8Rf%_Wt{4-1XC(NVF?0)U_T*BuCT8%{|J%jLv0xLC~Pxyd9 zN5fviPYZ0(a5CYw0^1t({IiZUBeYX{4G}IC*sI|?SWcwp3!J6l9|@-kT%h5LgpC3h zY4}q@OW+bfZ~nQB^skuRnAzpp>sJY16u3si&l3Jf;E;x!2)`k4t%lbS-XU;A!(qbf z1+FJl^Uu|!!$LP|uV)f27kI0N3kXjTxJAPw2&W6YSHlEhOyC0=))R&WZWZWq{uw6y zJLVi_cANJ4KaGGd3*4^Zn}k0RxI@F22)7E{r6HV+^j!jXYxo1g4FdNl$oc2jNIxia zul9Nu;VOaqH2ehN$pZIl_z}XiK&z>!q?Z#;6j-U@d4!b$9S>suxrFpzm>-zg7VY&o z!dC^hX_zMbnZQmBrx1QuV6TP^gr65UOT#k4n+2j7)xyi|d$$2JCv;Jf_Wg-)jld-u zqO%+6Qw1*9@MnZa30$M$BZQL$4r%yx!fJtQHFWPL{WosVW_Co=0^#cd*K2q!;m-wb z)bI+zZ31uA5JS3={-VGw8h(KA7J>I_cs#~GOhSY{py^D)L4jK}Y$rTZ;5H2#3Fiph zt|7lio=ytfq2a&l0Ud$6G~8Pc{ZE$&-L2_w==Dw9OU>*a4WA+WrNF%!K1%qQzrJnQ8j|i>Q^g4PS66k2SmT*8|i-zYD9xJd-!+yf40y{N4 zfv`a!24!!}-v{WXONGwTG)b?2!5z-bF3_-v@CAX3G^`-pE^vv4n1>tbuM1qR;hza_ z7q~{lmkHgE3LVn)S<tL80qf_L~VOo_EExn1Y(Be!4DDM1c>=(r=}N@UMXIC zHC#$~p1@feo=CV@-~tVgB0N&yA`LqT+XODru!YbjbhFDf9Y^{u25B?9Mni+}_X3AB z{Hp`FQ{Y++Um^Uyz!439PI#Zd^%_1-SP@SSxTF^xtceWu%7C?HZm$_(zPVP*M$#Cd70UaF>RigpUf` ztznGt%L4akSVj0LfqPr@{Ii7g8ln5N*SBf`FA=z3!`~713$$Xz-2Z}bp1?{CpCIfM zi1m_J&aH&?0$XBw{`mz`ztA@A^)|vkW0ZrEYWOk2UkmKj@Jhlb1kTd%BEqi;T%h4u zgr5<(2+*5!s7(u#^DvzChQitRzs7pS>Onv znt%SS1~e#iz4rPl;omU!V5?Na=LoTZ19+>3+XkWig2;8P2f3hsSQs8zCFD5)u;0_JXChQfsOT$Hk;|150&mrD58)JnTQvNoOIjoJUQK^Y_#S!+wn{bp4&fUDw`zDl z;d27FY4{n!M+9!ya0B6&1n$rj>B>6}U&k#e^RaxL3nt z2sbMMMUMMw`RKtBX;I9O>X!v`=#|5@& z_&niP1$JuqB%qt#EVNhC@6zja0%vK6Z>SjQO9d{_@OHvw0vBm`BjEypOEerI#2fX< z{c;U6gl>~iEVaB*b0uj&AhxVMcq-vrP&Mq7YB-nhC4nOv_7MJ9;Cc-w5q?wPMh$BT zx41%Y)zl*Wn7}O>{-X-;a)I}1_&OoJO+Y2p@K=Nj1#Z>wDZ&{7w`ur2!gvJwhoezV zzf4*tUUz7C2jM;_0Pgo1ZY2Dzz}*_Ugij0Hqv555-xj!6!*dAlrv91PeVU#|dZT#V zui-qx4+*p;6kB9E;kg2_p!b@om2i7S^q=EE$BEZY4gWy+ z4w?l!r5gU4@HK(6H2ewSGXfWA_%Px31TNC>D}?tFy4fX~ZXw+yUUB~9mFXtJs|2pm za2?_K0*5rbjPNvpYc*U&h;J1k_ahpfPM8qruGjQf(umNF8qOfxkJ`fhUc)xRKM35S zVU+L}0`Ju@NcaPR4`_&Q24bgF;8s`DH%LDzbeo1R5UvxrUBjmdFA}&z!|xL=5x7gk z2MCW5xLdx*)gwG4KCKls7gYbs} zD>a-?_<%rXqMm>Dl73ofi}pI1@LGXw8rBiwi&jX!Q^OG9Qh~i1zEc4>U*Iea|42AZ z-~vEz{`n$lqfneCd#d+SLQCKh4Yv{g70F>J)$pr?FA7|v;b#f)?PB;G(r^>uHw3OF zRP)bkNbe9jqP-3iUN3OHhN}sO1#Z;vOv2>?Z`E)C;Ryn_Xm|wSbbcL?m1YWP3pfG-Q&uHl=6KM}Y?!xdv zfsTet2>%6vol*^tBYah0n}%t^p9$>La0=mf1@;!9+t)z)d7*eV#8bU8!kYyy(D2A%B(j=)_S?jD5WWe4O(+eYA^fGly&67B_!vZX zA6R=f+)CNhit)ESn-EJ{!dQsj`x5s@22ZWj3sk&lafMdVu|&4d(8j}m#3$R#3Ii(D)629dXo zk@t!DK}t9Kh*+Kx`L;;BSb$_Eh+HM|a*=qX1N=Ub4~cwAB;J4NET z6_}TbyjbK_BHbJ0vRUMpMQ#=On8#TVqPe6iO35?W<}m6@?Mc!MQ#_lU*z;o#&f*Lr6N~}yj0|s zBJo%p{M_0p`hSmD9u&D-p&+eF?ca;wN4B7ZINO_BRV z`e!iCDv@rJT#gdCSmbh%mx{bzzN$ z%TkeRL|!WLDv>vcyiepqBDahDwMhRljJi$aNg@Zw$PbBmgUH)O-Y@dIBA*hu_ZU}l zHE)ls(6HIa{r+#zz8$UP$85g9&K>OkaaBG-t#T;w$(KOypNkuQpTQ{+34(EsxJ zl53G|BBzTyUSz+>tjMcGUN7=yk++NdhRDZ6?hyI5NOJ*Wt`ylYMz)E$o6^nB63ar7 zr-@uC@=}o_BDaXVU*va1{#fKLkuOu4>IJXI4fW3)#(OK_{Ed83NM5h<&HOxE@FA2IPeff>p|H6wz8(~vF7g2}z-n31A3?hF$9FtCw{}HxKGovQ=JpUEO)WR{A+%QRV zF%aDasTe;7E4~LH{_$F}xLyXP;-xEo4d5nR+IOlrQ>|m-PSquTix){!c&AZvD#0(O zZ!^bY;d3>)kK-DD1mzuE$~ZQ_if4}0zg~P64h8T&ii6z+BR&T&tkKW1#2JNCpNqI> z@&yqU``>^wXQI6P$>nf3iLV7GJ^)jhZ`3^?pP$X({hx4Z%AtG!ygVSkrV|v|>31sR zhuxRLrQ-qh!|t>7PvArgzk9CMU1$y)$Yp*rGN4*eHSpP9G4R!fuhI$Ms>0_19;UH& zF#o6^e!W!QE6PJ1ie7fmIJ^u==6^OypAm}Jk;3_BX%t;)$G)U|Ibl&Tv0=Qv6wbdT zo{yuE^8-S124T(G4KsKrC*+ccLt5m)8cOJ?g?m$>3B?!vJ)Qi6O z%AfiIF+9;hUIJ{F4|=upcj2!x4Pyz^$X}D+1ZVjHol~f`^ZmK|1oR$RDzCMBA8eu) zDku5K2-d~Mzy3aF8B`KU4Tdd$?o$ZX74Tbm8dTUpf9_V&ilRMFYpHw{!Y8_hA0e4E zBN>V{&agvuomaweBsv)O>PqmA6yCEdzydEv-c5-2ZH-X;E*NLH=}>$f3&Vw2Lh=S(=Q(Lqqv4bH;!6Eju|5+2#HT9K zFCBs(CEgC~9_alYa;AD9{w?ushu{lP(dnxNPs5m3O!NEjoxH>+Z^j+K1`cHJ!k?R* ze<0k)z+E1*yiz<**mMwlfe^pi<%ecZ9 zp!uN&*WbY}`0mD+GK;^%Wk&H*zp})8%xF16$N;h(1BidWGdh3-Q>B3pd@&%}Zd>jA zSs~MyXwEuE{W=!kfyFz4#s_4K`#ZA5ue|2iwKFTAYMGS~c-wa&2D)=##ILt8vjH`{ z;WsChje{EBShZ6AfUgI?&&mh_8NQ^NgHHMJW8MM&Ovw~@K}_d@cYH;Sbo2E{XXcoG z25%IG%hS{=d@AcaSSltO*!z-y5qyZ;Uub!;t{m;@?xi5CQLA*AY!8?Wc zLPl~1zG8VkjrfFM4%zajCgOF(VD2;+^+$Wq!)%W6@TY!O27#a|;f}|UvA}-kc%3oi z<3c#}b>YJ=a5MKi3$$B%$#m9eL*dUacDdy~!{|S>CVf&7;nHH#?8*3C(O7FaKDLN= zk97q^{OO;fS&<@FP z;Qgiu<5Ye|sr-z{K>p;yz{Q4svX z;w8KwA6`ZBf{zO18J1V-P5UvT?}o9Gvx57eS3%s{Dp5rKgOT5?GhPOtG=?|g8}O+p z?jLAl{$yEZ2q|%-@#U%Vabpf{+cjnCW__f0v&Q@CylJc1BYss^TjUm8o}A%&>@xV0 zJ1)Pfre?1A6()VNF2|37kKUDG<~*dMGKk+5x18Y&a#EJW?*x;AWiT9Km_u>@3}8+~ zY2;u2FSM$m`gnN37>B#EKXaLwS&f-2jKky6QWfg0{!51NSvU1YH%IR@AFINbd-(OU zDKLg|44qIB|5NErgX}QZ7)AraRGdsfWImDb4wxQiHDGr{{|vJ zP+suSjPRz|>=m)PRV#(3zf~jgYDf9@B7SAP=>EyWy0;Xa*B;h+rK=(zfU8RKxH%`& zXEe`(h2q~s_VQn&QO!<6_4%*EmQK)9jz(t|z9q{N(-%uDzFWnOR{Bvsr2>42umY-e zBCaSqo)+01L2yaD`P}G=oq1Fs1~8VtN$d}5d!yLDrS16MP0Olh^IHMZw=tI19L*AT zrQ)i!L&DV!uO6Q^`5U7U=E`7|{~bUw5ULS_y7hTshM^GnIWM^zDl{C2dSosKM@+m) zdOmrl%)csj%GTT`An}RrJg=dgVg--!itN5d{iC1Pa82*3F4jcTx%H@tZ_|^Mra|l& zg%A;C-HN)N02=8#LRdr5Ikz#i6sn^fPEP=)uhlG+dj_C~E<5QmC@!xu{h53cNM$g0 zJME`vyOSpz+RoV>uC5+x>cm@D_=m*4I36*KX@OyebfbR?8xUQ=BM zDT=Ggm;Wx~lA`9Ja(;9MdQWe+8Iq39tY1N64hjt`-Kb^~K~BO78;JJACB9e{I*I!2 zlBPT+jEH+M{{vi;mxprSXZfA3ix3f=7?F4h<>&)nz@Qoq^^1S_2J4s>QjYm;uzBYA zd(h~?70MwtZgo6G`%*7lYwi)ojf&$nBps#iPgN+5lXuz8(!Zreny&8&cBb4zi33wb@`O9rz3s zjdmr=(~3{{gg4y|XT<8F%+KZ-J?xC4*n{l~c*DmU?NImE@f8q!Cts!BSIG=IdFC*b zds^gAA?NHvZk@=uQZ(`-B9W@14d306o=ZD+E9`Oj-UhxgV&g|LdV(R_ z&Od|r_@fN`p}E13l%io^sQU})0@64rpP?KTJ=NwZ4^|q;7G*~OW7UsobyQ!#avwEI zC#S0ovCtHC(c+_F`g-U`1xrJb;h!-$U3vJYiM1nficJq_jcPD6-o`bJdyY4r;d34w zpIEzTPkU(=rNa0`bHA$9_`fkOc9kQm;+YevP-Ffn2E2>r9F5MrPtq*yHn!R5799}Z z$|IY+aLVh=_`niBQL*F+zU)hbpRRD(x@<>o zNgXw^i4&>AZ~57^c3mQgA+*h|;V&uTXGj{=kCB9=Fzg36zI3c_F}7Ul?fT4zP~rsc z6XBP-kfDRh5X$XDj&lZu~Xj4|jCDgMwH@tLlzirv?l9Ojzj!BP9mu!nM# zeOpMKJT)QxiBTWFrTrRh_huCjlVVcM+|gX@Ho`HjMtah}yu$pP-e)QAG#9j(2~Y9YTh^ zA*P3BnQqMdS&&A~rDi@JbZO=!sqZ_2e+OfvNjdXJE$!yqXMnuhcz>Q2u-kE&*@1eO zkqKZ6KzFTYaJtGo(|;VL5PQhY-@zE0)OR>Tc9UAj)*$?5IzXMVxb>l8lZ>6LRdxvN zkoxi^+?#WZBRas3AvL#3Ry;P4XTHz`9F{z`6D^^vt+`v|=BdJCZh<}uU#+q;;Q0pq zqzcQJ`mPJ0$jxd>N1rzX>gk-_B=yBSswrHHOyOEq%v^+EFFnW#K5&F|B5tf$Wc-e( zRCWB{sMa2Q$W*(;iY{DF9-;mH8MYaoKkRH^^S|it`(t7d7ahYpaOtcp`a??Y0ZDb7 zhTJej#|cG05+Yr+F>jsiMH|!UM4?r_-!FHuRE6DKm%kI$l0MF2{pDzOx=ut5x?*PU zAoHB04_2TL9$5$f>4*d>Cc~}$71FMDbAA4^jP)&ynQ*4rne{_-)1n$+q-C?wN=GKc ztLK-=xH9&}5QgLK{SfKTOKdthV5}oBo$#kWemIves|e{K{s=wC*Fp9`6&v$+0Vdgp z#<L=43esSLD=XWAJEk@CV4 z&PPCeH5wMNSUH4$@G4Xspu+{5_U zkN8)_Dz%?whw+26CQy^eSs z<2aDNPXfRTX`cSZ`uP(3|3Lm;!MQKsf>?6J9+@>UE}g*LF%=va`b;FyL14Zg=1}fY z$mtbmPqE8clXiOyZoQ*!v|`Q2iwZZe_F{J;YD1lB$kvL^UH(h(#wK8{IK=RZm_=6t zJ~of1EAN=J$1hqM`!%)9)gi2kYv8tv{}R9!Ig!YNv{% z?=PaU1!H@sZ}t+S#VfeCizFRG{BXv@42|_Ord;9&cY5*Tj`cL!GsI8p`^6(2Lj^=0 zycno2|CK6x{*@}b|3B*T;Z&K4{U=p+JB}?XaQ{7LOglVpuif z=~(4Wcaj4K-Dtf96z}`pH8xf}G-19FavhlrBpZs81nvspTe9dIGD)zwXr~GDeKnQh z_dGGt2;0`|9dF=!i)Ie1RUE*ei_(A(lgk%M<)hZE_?e`m?|?6S)Hi@Xe3tre@H2XL z9&zLH6A;yLbz&@1-Y@u zyJ~>L4`0ZzhI}kVw0)QGjHj+TKK2AHkg@_9?CXYKY(M5C3<>xb3t?*$*V}Q8;M#@* zP;^*>Z#ZKbWJIvpjkT@(BX7i?1vVakVWAj*_$}WysQOa$h?=k;KfoBD(Gh5v5e@LG zsGo+n06&U)8w_cTQ$_&CwI7jdymKV|DVo&RP*()-G1Q9#@pVAzTc}Miz26_#TiqD{ zW#OePhngqB6aP~9OHK>K@4^+|L9GhNA#P8LeBU&j#LtwXABB^Z@*&W0(vr`5hOHFZ zg$=HbKsqK{KKZVv<(IE|TBZ3DyKs9^ek^GTD{~cUxOgCuhq(h!UA)aWhDTt_xP?dG zsdZ0>F<-*4>WjhVs1b8z&~@~{vodq|Y8rV6We#J77S3B@{0NOfF226qEpZ5^X z|0?tD|0?&z%zL?(GL#lGk2Rq8n zVfVXr?nc-Gxtk#Iiynqm9#p5H@=O!V=r8^}1F-yR=VQTD&P*_=%_m*!;C-6T!BOH0ae~lHOCm0tHHG zx%7fu3IZ*KQ%=f7mT&+O0l6tk5EZd1f)_+zR0I_SL_|cy8~XZIK}A61MZDpK^80?D znK@_9CV=|;{q;*ed(Jb@JTvpmGc(WJpHsaGPZhN{d>N`ps>%x2;9U;5L3Te=)eJC$ z_==`DN9j@^Yc++~P-Pn4W!HW2)!iBwBWYym?}_Vv@oHy5X zT6g7>mtS|$u6=MYa-7Fy@K`kOt)N!D5hyfYvT58@>1t2PhH%?SL(<=Ut?9be zyBanR0+HD!mxEbSNNWG~jK2I99OvTaDxFxMy~hP22?D+UretTpN^j2X z{1!3_&+X8|9PVKQF+>NWP(vbm)E@2`&Lk4YiV6>Hh*K+R8{(`P-W5_wm_k5%;$c#e zJ@L3XanUE$FtN~_cn$vFG$)?JE+jq=>!zpS>;M#=Gx|4hl&bRSKaXRBq2+MF{4>O0 zCsr`62N;^&{1RZD6RJH$p)H5QeIAlQD*GdqT*iU}^u3`+V-Zta!gpSbFr0aA$2d4Tr z#s%aSL^#37W=cnY*8!s^SPL&4hcz&QSPsO>mji3*S}Kz79=9hyJL~q~Y~O3aaXC2k zA+f*xW_V+6s>+^FPIIS`a+;=u)21w9g}5^iEtxC1CAT0t62MeZkN}FJ9RZX?KjKBr zsT8D(<``;7A))>PNu(r|8gxsd2k{kj4LHSSWXSE#FU0~247xL?xjj`j^SQpEE_5+& zR4f&%So)QmI&uT5a4M>BX4Sc~!or!Q3TMC_fY4v85&A5vMt0<8`i2F+suVGI>LBOo7CZxRU_5rW zJbEp7rUma|!DZLMW&%FlHn-BJQe4D_IM1EekJFVLd%5$}kb0hs_ECZ6BhY+zzPmdE z4aEX2a2JGu79bGizd(f=LZ~5k$n9mQh4@~yC?-j2k-I1iwMZqkNClF{=`KXjh3-Ok z=G;f}xy7-dOWY-4&?PG95*@S$@(bL>2)o!_?Cvr55lqf<__eQJ-wA{Ej>}COd|c9P zS+?pQDD4L9VroSoO0O-STRLu?>*GYi;N3jRtWBhM9u`vcMI_8aWvcd(!}Gm5FnzrDjXl+voE9trM>1^VU|$`r>`c7l zgKFl-&&u3~x!>_%9<#fZO?Bz?N-i}O)*azCy{9?7vL%i&pI65hb?|AR^>W;Yb4Si2 z7vgsme)xW0UAio_p}rFMH4!*9?uBoKEbg}ANjEmEJ3@O0IxD#b zgl?<}gM|Y5%tw}=`M_R}{>(>~Gg>Scz_13@epY?vgFtCf6__FH%fhARwI>11uY6=n z7b1k3*`5YS{{jPqZyb+IyxH%5M8Mx~@f*VAkEwkWFlM(K0X`3K_+>om(ktZ?AI${N zGe40-d96gV!{+SO#*k<*55n|!Q8JKX479U9Z_9m*xF${}I!oSK6>+Xs=D{_+QT@87es zN0@j=NIqFl)wg2_AmvV$k8tqVB+3^%D)_3{_tli$&8j&&4zkxJ?sBk73X!F%m`n$0 z4+l}v8acbk5AKF9Xre2CM|uR)#;iAnhkTJ$QOx~ShiSPtz$ZV51Fva@6vbn_P<%N4 zizP4~|D6O!OQX#0su!~9egFu^xS`7hF_>=5RYj`YpUU)1xJU?sw6X%O3EYgL>e7n4UxM zT&+F1`T{wo$=wFk?=~nYv}-9e zL{bQ2ij`91&`+Ti+8b3Vp&m&oWusDB@D=z2jrNyzHJ&U`xyxx{AXma~nCAolA40k& zc@4I@O&T+*&~TxU(xZIf2(A>^Qdb}R-P>(a8YU~R>anEcL-4RxMK%mq4YL?fm zKzS1-AG(CM2bkXx5@4WHvO}EI4#~58HWh^{)pUwFyWMD4(|oQK7u@9Vx-2nzH=}5I z42}m@w08Jq`HyG;!2r(Uuw`}Za+bk8@=?~q9e5}-zZsszskT$vC-M7iS$t2t+5mF5 z&HEUfn4?zq1uTud9S3|c{2PU`fC{Q7l`BW&-F7vIY}d_SB50Qgn#_O^T#fCSOBMri zMW)!I!PGFB`Z$vEK7ofqd$*92TqWP0D}Rz86oACpA#nnGE3gAk;YfRKSAK1%(Im%= zyIIA(FXO%qq~53SQ0Tz>G-I5M$S0RSLlAq-iE0Is$l|OQP4n|l&+=cSz33lqZJ5}6;@)rW2CGx_q{Lx|Leao@8!hsYPX%e5uZxWh< zy*Q6mro``o-wi}I{wHw10KW&YkYM9?t4}1hgZ}+^+xW+LAe%$tKh${iq1Pn-P`vE` zmumcN7M|;V14wienycy`WvMMYgiIuGcLvPk<8e5KdIc0E-|yy|vGYJQb|tQ)TU2fT zsCw6Iy);+$K-bD^i!D6Px&|WWdoAB89!7&xN@nG8IxZoMoEMXKYUs^jM>eSce~ zTDTg3tl7(1cyKEn7v*i>-&6_Qn2LZ^0TVKOPTHVWeRNb;T zFyw*@?8!3A_Nc^OS18G*a98Nw?-=+Q7+Ow>#GmL-RsRm|coVApJ5;a!HZ_YW{~nI5 zdjXxQaz4=CSbi=!c2zz(crbK&x~|ISU}|!of>PRD06DwuI(ZJJ3h<{}VM(6{dtCm< zfyBg#V~vI64EbDlozk=t;bsZICnHEnr%~Py{AAU2J%O8?45Q09Lcu#g-I{)410#o|^!E@3d9Hxf-s zxUhMFlqfB>&l6eo>*kG3DkP@{E6MHudzt*JjAD#aD>cpOAMoa13RfzZr*NfmkCdD^ z;n4MF{C42SHhVjM@Lu#GyobIn4t3EI0T*G)toHU&G6&gvQ_7m^48s$~L=uU;@Z$_{ zK7K>^Ex`|~lyCYw05osGZ$JE00<&gTmZeH$ZPv7ux{sNy42*SmC-*26`a1i%q}rk+ z(h0UMw0L)Hb$4>NAd4AcH)aM>sndzP_T>Cx+9owGT1T4Bh^9HDNw3Uez2XSxJ$+lu zM%w5BDA!b4-Ag_YFz++h$+eb~MDvNiJ{S1JNC2mkO^Mr)XWqLLRoLnKQW=8H`l}91 zZ0o!V!^UK9KE+FY&W>JmR;-6t-Vf}KaxZHxDAfavO*80`_|wh5`k1* zxeGx@S>)=1^=uHn4Qc_pc;(p9Q<8%ny$VAFsIm?wwDZS4J0&?qU6P($<6XKIi1mfN z(ZHz@q>|@qY2FJ2E^(5Ajo_Qm|41}U0yS)zgBsTB=w*RVf&LEs9>ed4_`!40?bQB5 zmm{cyx%4LmyZfR~5V@U5Rl!bmtO~Xcp6nDE)fj%~;`a!C&*9gBFmLw5z&L^KU8dZQ z`c~#5G%T2|mIPVN7CnrIqiWiWp%>urH=(Hd{PX2`y*%%d=g;K1OP(DEkp`>1crNBM zC4)B^6?V^a{59fpq2+Trr$haYf2;UB6FqfEjuPSBxsp+?^E4hpT=A6VzXF3 zE=YYrdyYu?qd@KbHWVw%U#*c~V8u<_#D(~sg5L@Fv0PXg9{(49)KU-P$GV{E&8+ou zBi9ktvFKfbe;&_SGls~Y{{(Ug;~Bq{EWaqvzsPf!JR1(7jN0YdEzjxl?3d?4KI6r- zQam=v^ZfDNmx#wr@(hav(;=A`pp@?xpC8KeC3(IgPkd4xeAq>D+kadns2$SM!XlCO zhe@kxYAh4TCSD}(1Ez}KS@^MM_%43f4t&$!GoWGXc^N-lB&QGkP8`zAKF85t*v@_c z4=Iy>yGUvpMS}<|FVAUw#*3*e9(yT|U=Dhju!Zg`UK5nvYVp`4&#>&GqB~nWE|cf$ z6kWG}Zw9Pzq|8qBWN6AdsCtb4)3Rg*2UW8yTLE50m=C}h(xheL4frs^d;q5Mmld#7 z^|i7LEj?FW}R zPv|{Y>Pa?rK4#eIGHPeGf4@|fjwaYkNS2{LvkkkFgy45vf2-qv2=%Uv!z3u_X2-uH zbUFV0A*`kV`N8Fm8hShe9}GRv{5kfJ`N5Uglu60}grzr-@t+1wrQUxAFEtWTiKMEO zPJZx$RFxvj590Kj$V;4W9hj$+cpl*jW^`jwY*Y!!*e1|?n=t8BwPCiZRJLxs63ShV zhzuWZMe?2VAb}Es>6j*`szi6qgD7T&=#;EN^|xdBk}FY4FxNu;a9@V)uDJWW*{SMr zfcp1P@9pbRuUmw&v3g_*g@M|W#pP%>t&&pW_3lD}^tU;A$HRyhI|Uvp3O=`vQkCwL z-Oi~fR(19aU#3T^b-)JOkhcNlPX{ZgEV=`RGER^30hmok>O5c;iZmj?6t~nPz$^o) zyMUQn}OmU<;ZEW()$OxLWAOwX)om9pb6U4=1- zrs;P46=9LFF0Tq*Mn@l_imP-H7-kV4Wl^vkOBcgqR=1{lCmmR^7atBL806S62nvZU zFL(-SY728ub0HkDpNg1pu2pCDWCVyRGW|+y)OI~q8BnS^zh&@*{1zA;kyPKoRIyy; zAs9{h@GTm>NymQSDi|<}qNlRYtk=7l!`BR}b3>kZq57R1?7txz%) z2P8H2#d5MYXge&it)$=*olIk9$WdRu;6rZg40%moVrDMR9k*xIs=Gv|-@ain(ffJUvG#ahi zI(a%CQ6rUd%D-tsk4a8(__&@E^MmO^OC^e}%W~QssAmG*vwts5EtHLC2Kg~@wg2WlYIfz_q+IVTf3g@uU^ z$GaEaXuOQ7x*9X<2Jb$`f%Pd1^$nn!GF1FL1MoOh#5OR{kofgc3yVcv~UWJ9bVh5UF8^likWd1w&BDXfcT;b>{} zmO^$5rU8|zzK0P%1a$xTgN(YLy|YV!{sH}W2aTe;x) zHXQBsjow_OG=nwf@%9s!Z`e|P1b7wFaUK6YR8DN=eFsRXx_}?G;O`o^d>lGxm4Kt- zl%a*BBC!RzjN0p)+vv?flDK75h7low&)!r|vupMuu*=?|5(G*cC-B)2|4K44cp*dZwGIhF{ z9lc#{!wy<4%8xt-+2A0fs-+KN1qBBoyzk*P6Sbxp?}ZPHeQgd_i=-M$wR@aF+A3#Y zF9_>u+3g#U0M7wQHFnDAY_t7`W3R{iK4WveC-B1Nu+(IU;0Hj=9^nC z`4OnBP3gpu?zNH1fh2h(a9m0l)H&&|Y>3uJu)7E;+I#nTg1lpcEQ z2qWn=BE3c{J?Oaxm0n|%USm8xwHyLzl-bnxNdz-Cs3x9D+>)5_Z39nE&KOEc)5l7Q z=nb}iuCHyE;*gwwm9ppXgMGD835RL1SRH&usR!={kj}=I0OHdcBjW|vFp|shfGT1B zl((poRCyb!aH{-vK9#Wl2ZS^GFeIrmZ5t0BNi&;2X( z+)1gcV=vB6W;oR8XZWuUyc>>BI++^|SwFrt*VcjIk3mV>nSo;iDc4pFG&C7?vWzY8P4* z5dk!XxW*Tdk^mI~_C>6|My(g5W%uo0XH-}4utP1ZpM54c56Fbb6o`=db#dm`y$`HaR! zm5LcPeFvTu7)Pa_iozO56>0}kSRJMYn(FkOSH~IFSqRmS=EAr16WFYLW^DPQ1UHdvw>WSvQkNNVpE@CBrw9lcV) z1h0y~t0NG41>t^Ag7*-tQXbxmaC`rYhjzbI<5pgjm!_hS>`-vy9IO>g2vjiUMFlG> z1k0${;{8F=6GwBgW#nb1`Nt6Fzk*Sw_ovWZx(4HrV#mmzN%ofzo%jAK@HuQEojT4) zSEod8mwt>2Tlx+MGFB?2^4nI2snA_KxLB~05L{!J1_DfH09ings0-dJQTnM}4yueZ z@;CDMcfqsf8MYMsj}Twl100QGi&TC&VvFutwgi{eY)OE9S7Xsn^F@T=i!j(i^^LK` zQZG)@;u1%5aSvNK#QCcuF6hpC{}kQ{tHOU(+)^)gvHscBNBK$N?+U$2P`8QFO)Lqk zAQ@pP38eR0NLie$vnR2u$k<9f6`6&VshTa!gR#lu)jQ{?k@RvE@WH_9sA zichj+^GTl=UwIGr-F2jae@j#~$Z0XFTj&vDXXyD>F6ouY$k~EUyhid+Y~!tyV$86wSegrsOGOq zO2($Dr07?o8Qc<9HDcnVPw^yO=Hf7|S)%?aGSYn&XARs?8U?Ye(q13{jy+Q0Vs4bG zUP!GLqQly!o@>1B?V&_4&8>3 zaPb^XNWs~KL$@I$92O209du2g`ct}$vLRgJKy;k4p^yJ!C_5R;)^;+a>DF=+`qY}R zi=o?eF@zzz06lq3*NEy@>VRVyttu=mhcn}`9NCP=a*$(TO)X+6T}D8JOMise<2(`V-xBd85!8r?xM^BMLVsVx6WxXg6LHaF-_&Sttj2MaU6%87uGEROvvDdR{Pt$Z{u6>~Lj<;b5D7(*SrT(43qLKP{%e%|JPu|^u|BwJv{72>8<3GVSBo6EPSsm@Q!{XYw zxk;T`dJ-h&5P(Vg+TqOB@&e>vpAhOTIGG#u$!V_bq!R_OhlN?Ud>B<`dS#u5&Cipv zJo#NGC%?HidK5E(zm%Lz7=14!p~3eN?`Rvezh&}DifQRE@fSuirm6>Ey#_J_aq4_T&dur!w?Z`}E zn;Qh>?EzSWWdV1A(A$$B*npbXb-xc7l|sg=gm3`EHT`h);W#@!!XzAc13)bEaN)6U zmCjF<`SfN1KeKOz!g;gdDq{{PrCy(S9~ub%Qhw=(#J037+|`Md2|bmi1UzguXFV7+ z&enPJ@Wf&)(Rpt#IyAhu1<$wOeL}e44O#F43tni!i!6At1uqFOWDi)&1}&Lt*1fNnFq@zlLo} zkxd}vb^iV!meL(qQ|DhCI!5aJ`;24Zdm!C9KXIg{T=*GrFAUv0YI0;#?;fXqI7VGLSxC$1 zWy0itiEq(a$yfFll6)EcOGxvVILU58Qizds9%aX3@-`I1_N}P%e}lbre_x=z{ZWEg z1Dz6JYxRDdczZkbmA?XBs&z|MRJB#=!COG@uVzT8wwAZl1@S(^c#o_Abyl8D@@yVC z0GJke=1fUI|J2VI>s*~&5v18PTA@U8Z9}bf*uQI2XDF~`UcCtQrGEGov@STVhGPM@ zlQk??(-!=94I=Gf_L*Jq;5hVFCP9nvLpRc|9e3){uVs&h#S$q1F!XX<9Hk_ zV`Sig%M)psbnmD>1Hd+31^YpcJDG+s(ztGa5MqLn=lr@{eIsnSA2il~c(P}D!7aFr zd!^k14igmI?0iC5w;7jDn{ja%ucooLuSr&BtVHMrw@d4GEFTe(i6O?mwI(@d>{-NG z>W1|l?A0lnZk$I@!MhP<^VW^t%CUj9$@zQWRLH@IWj%H(!J>v87Zwut=Ye8DK*3v$r=|^r_+2Ker4xzsP)k!XN_qpeH2+QX(#I#(OJhGcGP%-HJuO4O!0@)cMX4SXo>o2I1{Mxc zJztAprFuS7gV3ty{o~N0dSJz@ZRj-mO~P3ns-A$yf~wC$u`F0cyn=CT#}#c;;^!^4 z9CP+oe0;)H8md)ms8*$+TKG{4Xq6oFOU|u_rpckX!w8Y!9Jc`~1|M^CbI@gtZhC)c zIanLQjqX$2$tbh2OV%XY*W?{H9~Gd|B{hglEro0JINLt-pAJvgWOwq|1xF`$pV8sq zyA}0XIdxF+P&pynP^&l_L1^rSBa^$2J$H0+(mriqwatYU6{Q2-g)<;14bcIU)p?MX zm<~we?)ZAQpmac|<960&p$3turEm)t(@@m^nhxOcuYXwwz<`8BtR4TB4rqoknkYH# z`2S4@a43iEti(-F2(}L3{d=4ob8%`GvkPqT)u=idcie^c9mu$(HQkSaCnr_o)I~H* zdCaH-m=&|;Ggj0QEk2{EMm!@MP*=0^Y?5blojMOHqm~A_TAgTbP`ZO(D!d3yx_)>* z+jOJS9eok3bVpYWLhFvyIJD>vSeIxTdXlY|ANHf}zzCvQsg8Siltw$yM>W^zj@AZr zTN@E#pcxQ4qGJH#u#e#13V1$-9tQ6iT8tU+p-%lLL4e3j>P7ox7@9kB+e`ymxon+~ zWWvyZ;-NE8VF(X>CqIeAZB@Z&LyN{%(6kO2du3yCW^gQ$@Q%Ynrpzsl7a<{ivHUAjgR9;2tL}8?W9x_am+81-&RitE&3Vs70_%Sw0czc_)!c zj5ocv(RDH&1rIjn?LOM?IT|~l(41BX=+r1gzBYvF)lZekHpe52I{Zd&%LHCs-f0n* zU9)hG9dH>fgkykSZ>#aE_3}=K&$`;-L*Ac>Y?sfd3B|c)LFSr6`^UxPdGPYK;eqUN zUTTR@GJ)?S)nLh;!m4uqr8kT+*t)TNCOkIh^7&k0#0Qw`)U{=Vj_yjay}P`f;1svh z^@gI(Rb^0-p1Z~nQR-Z}8V>s*HK%3}z9S_-lBcy9-y ziUCnNa)DnMQ-Yz;YcZe0@E>!7g4{@p+Mj!pq z0Pn@ysO8+8OyYRp&WFbF`%%Po>LhErexq(|>xVbU*;YAAjt0j6auu=Ttc%j$*B_tE z2j>IR+~AGkmG>@S;#@qdN2M4=)OFjDNHW`=#F2aWotE}4;r(PRxoow?_VRiENSJ~J*$So)+8hLdtqi0|;E5YmFg>Jw;T@|PO93{i}dCN*+7 z!7%}^Ajk}+M&3=3MUbkF1$f1dvXDBjSFQ=mT3F$#r%?>tv}O!Dyekm{0t1GT+z`%} zu*Mua0YC~ryAHkQpTMUIQQET(OWAq7l#TQ?-W&puLYU4lT+>r27UvcxsQ_1pIDcQ# zP+qD&h1(&_S+Tj;Y%i)4=N}O1B;xWLko>Rp8jVycFUtidnDAGfut8 ze#ScVFnW>w3`*y2(qX|Adr;2E_29TXCTuejv+6Ao_7JTWy)lW)&B1{-2=W3n+0AZkl!gfsAB3Z$~!{4Kn>+?*N-&{&+&41NCt_I-b7F@B|pUVFP=ho zMn6P!^-J<3Ecck|uI}%u%ijnIejF%AaqrO3l~aLDE+Wf+g+qCilThsC50LScoVN5Y zUdKHKECOZa4wpLdFCr*od*AVj4Ddk)Q0}R8e<*U#p!>sc^Gx3@NJ!m0b5IT(iKR>X zL5Ygd$y6EZj83+E6Ns@o&V0-}iEnd}N?&Bc1vC0tcdME*{fj%AGjeAHCmBBgUpW)n z7+$ZkP~U(An;ZI@8oVJSSN@1f?H;CfMykqpbh|4*2pfF&Nc;aPf~gx;ykFG6e+TA{ zX-N@wYE_bJFp$Mu4LfvMniiBaHSS8_cnZG18I!!3CSBMnf?>>?DV#5OZs&uJ6*1j9l-bWGaf*p8xAH$=+IbA~IxX}AJV*{DIQIYBrxEGMPkfM%) z#TEiKfVQhBYGUxg^a2P8L1#V?%j}hj_v;b4(&+UtE1 zuzxjLgxYK9$P~O=fy(%|vX$Hhu+pE&YuwWYm+?NuaGmSXgY@Gu@@Y7|&){K6K-rw< zS4_xLprrCy&}6*Z@hr^i&Ul{#Fj1}Exx88J^2|r8$Rx{lmUKr_ImPIxf^IZh?qL~#xt_zUA_JJoIO zbrZ<{2MKTxb378l`mw8u>}r%nc!xPvy^+;_U31VdN~k^M-wc1DEdN(ngwz#mYS75o zD0W7+@Notn*c01^SH`;&w0+Z|iN63R)zBC98;xDu|HU^L;j!@AbIwqt7~wwx3LbP8 zwfZO=a?qJBbqS0Io#|31nR(EeuGm56$Xmjp5A@V|9k(A4 zJoU%~p}O8A)P#Y$|BXPRikT^#u~V`TOk5OaF3JAnW81zP@fU_NqL?616`6YKygMfd zWJD1Q6ey{`eo{shu~2Uo$cW<21Hlg4eW*>IT`dF=7sZ~(Tyy`Q%a2C63IiEYOc3bC z5Ag3R69h7%xKH}ucv7z)%7|iuq{!Vd*F0^KQkS=XiDri@ezry}zk>E9gNao6OK1aL zusg;O3LcoQJ-$qj62}%Cg`~=N(LD`dWsb&t1sFD&yK5*7{vH#e{5PbRs(u(E?#CHM zzP+!)lNS!e5qu4T{l$IBKlh)y3TtTu)4^GJ(C2k zd%7vy4F^(APmFvUJdKjOuvx*}!F|BWUyk_ zb$UY6d9`sI3{umV)>U7G*e8es6%s>=*N^1fW9&#U5SS>EadF&*k?0kaYZ>(dM@7Da zT=$=#y7;fWHHr}n7WCRqA*fN3So3Mhj2|7T_MtB3rK)Tn23BL_U!Yp{{VPPND%*z% z9I!zd2OE@u*`N%}Mr2?%3InrI7?=&hz-$Z#Mjb(edK9@#Rqp^S%jjy1T}5Ta@!@Yk zAR2+16LU+0*&jkzt_avHUtByNweqr@wl#`7N>eQdO7q~ln`Xb(ntj*HeB zcH@}lggG2ph@~jVT10mUV(M>l{GT8ZZ!tVH{$P!N?pR)*u{>62kGkXH_yaV)QiETq z@r!Ek2WtG`HF&I(2}5TYeBgM?!554K*|PD+;P3wdhCdt+$}?MD#Rq3CXm8+1c47OsLsal(ap0J3cYz+C(Rn3#T%tA|oYaWO7r*DSrN!DOjP6VT>_;0CdyNmYIH$9?ZnfM5b;|I+L26^_LhNFrUq&=H6ER zA;>dg`7V(dOf)V`v?wO(5<9@em6WDl$btTaWg5)lFk?FggB2_e!S;){6E7^+N+n*6 z0@HS4V8K<%!Sdj8hW8^RIMDNYzD*!O+|b_Hfe$JJeNB3qKFI!Dh4L_C$__O7Cn7WI zGhLXPsL#5zCpK5rnl|f0Pdbsf%E|#Upcutf^n7;Y$CQ-Cx%U$wuIfcu+W8TqG7Wj} zNiJA7z*;+o63)m^;V3uaLoW^dTAMSp45KCNEIBvQ3W72>gQb&1KG%@XH4gm>qRim9 z%+C;tpJw$=LJqL;_i^Cco62vai#x(g5%mHbTT9@35ouP)R{FG5j^p_S$Uh6)_RW^h zq{>X#te-@WHXLTS7~*%^z1K)>8C&jkg8}frG_^TcHfg9i- z-;Njm9x%C?$t+y0J{ZxoE5BlF(mBb_$^VTkEo%eT1tl`yfGgfV8(Vx1ET{Y&Uit%S z_)N*wo0okF`~ChWQEEMUsl)#V9BA+UYx3@tt3^3p(xY*ejH}f5U#6)i~TxNL;=gl+es$cmB4dD*d0i~biN3jyLBRPDZ6wdoWB5Z z9gaLo)rFh25hui6gz^h=$-e8f9H47j%Ml z?}(OQ4Q*FN({}kT5;4UB;~C5$3dWrb#S01WcqhumEuc``PLzn-Wfcc1Sqr4PnHA(_ zb*2N~BnVdMhMWDl`2a%3lj^btG){lkKZO~ihN|nwuDud^8hdU}BXNUTj{Zk zxRAQgSXf`i0LAIe_2lP#%T@|){t!Y)ocW;qE5s9;vf(B>mlsi{;jUK-?tAguieIG$ zKL#996YC;omyGoNc$@;->o9OMc&@iW-z2W4vN015=xc4Mii51$8HZS|lBvp9A-2#! zM(uKO)&U{Sas3*3$$P({WS+r8#{agOip+Bvg&RN>Q{y&esP0;9jglT(fLy?~zo-tZ z^;R!J0S}~EkYXjK7ggXx5Ow*(c!<%Q-bN_}vA2_R6cN^kdr%C@ACX*)NE87+GF=TA zXMS+Ln^9JI)F_aqdCX8>r>i&9dcmt2fnms;#U6eA8(8VVK`s>^O<R{ry~(RYlUlS1^arj*c4U^MKocxh@!&*zAU0U9MSc6x~_jN zRfQ||pQ2i%>uH;O96;Ao?O=F(%IjO0SKvDW-SUN-{8`l(VGI_!;a7^L+Cyj= z!bmlt%^Yn2bKLlvP-o`qtq~vE&3(zW7&@FRJ&6m3vc%t===^w+Az#-J2Vz-E$2?J0JkN^)`6H51Ea|1~p*30S>oe@a@V8 z(_Gg8Au1aK(}{W)HH17!`zk5K0*eBQ2}Gx*BM*c?NrZJppIXJYzd{`&2J4m`fZ=04 zmlZqdc7@7{!5y3+!xteNQ0=^D5zAtB*vgH8^mD*SmBi_}=K+7>eiZ&Nx-O6e45Mjs z&3QROr?gKGM$r?s;>{Rqcf)U@b_YMCK|ys&g_uA!v{)(&S^$bghv-sOsM%_NSqvpH z6R@(YWU&ilqi{OoCT{v%q63)`TUPs_@?R^J|0Bp)KkN$dt(I?5SHvO9w`$$U53YsY z(i8AcgVL<+DEN2CNvcX177ktz2gX!s|JEqbZFIlEn$PPm12eH-U@V&*X~6^ONK^Pd zQlSCS(BZuZ%m9pi%a~=&*v5Ax_Y9swc4>F3Khhdj2xy9I%Mc6rKWpva0BbvDjD(LE zNh?BQH+I3O4s)4GZ+k|Kjv8>fGAl@hSPpxjz>KVOH31V%Y3nQd;3O3~L!Rt} z=rM4HaUROISb%*!amzZetau5<*fsA=P7l@~?u?iv7cP+KW<;{^BsDFq!?ZN4H66_( zBUstaWgPEi#0(XQomrmd{UcqCYGAh>=XkMrmF{1yf$9idyJ65;UC(oV%3HAB6gKH~ zqr_K^ucbJXS-_CY5G+H`-!rZri;@UL6B2b>e!q)HfF*#pDh6Jp$Db&i&LEDJl^O5P z@Up6OWCa&070Vxq_NwIzYF^Zr>H{8?#j;z3kk*XS46mQgPJxr?8=`M z`eC!=E3DzKFvW}*AeYC;-+*ZLH=rt*GkmgC&4q+3fyC&=zh2&5{w;i$F>J^>fvN_r zLtG|=*`ABmcxhZY^ZIR{>VCrN1Lkc^6=eqF%#m;P>t% zt9&=Gs1!n%FEQv$P>-}2(~qw;d=RayinhlnD0+yMUS6UOf^1b1I#@{$ zHij(;NyrXqr8`SG42u|(>ng+~S={5fiEQ`vOwR5quL#>eKv#xyjhSp3Yz8y#(x-7M%z zmF0Q<-B#;TQ-4IDd_7E^#h@}CU9hTL&LB&b4!`YfJYirF=@;!K0Fh-G}Y@~Xdpi8z1{PVzBzBnh` z=bTw7$ew}r7^X*{PHX-9dw$&i^WSUoj&UKHO*-*2Nb7ya-;|AyeDCrl}BRA+;+|EZKA{!R|7Uz0fu=K zl5(P_R(Z9t-|Kd^Yg@lIS8A+wGbEg<@@d9u3dXiSq%FR?9IWQWhH8Qib~#@o{y|Xc zZGTPvb)v0pf1=G0E|Em5NhMY%ynEFtw^ZWk#F=Z8kQD1dy?kr%{ip}}NWd&$vm=;; z<2acb4?qU2wH<-%L*c;WBVb@>wizpf0`c^qI>(nt%$}`uD4Qcxp=yryrueyPbKHX_ z&w98!k@as!#Z;pNoXnTq@xu|HoMEf1w5bCje=sDUCHrAI3>9~@DK}ALOjTLA)i!nC zv^p92@kmZR?-V4iXG^Gl6cqj2)!Rb{l^r-mJDSkzWcxQLmpXmJEv9DBq{=OLc5Z}S zScdnMYkfq7YwIAI6eCiu_3AkKhak|t`D$4sRYrA^LwfR-BYG=YR*A9OuTAy@+$vC) z5$DV2Ko(4Jqnd0y~&7Y*XO!|cxwc5+NTMqUQ;9OjDj;ojs1*(&oT`bjEcV5P>|tpe>t1;T!Oa0{)I^P;+Txr5K?Z@SzC2D*`cKGz6F( z0hAb^!qcfhnL$MZH5A{9Kr~N-MUyljG^>f?z6gubH3auZ-~$o(pnxKuZ$@D7FySim z=;a_$4ZcVf^uKs$_e(WyvcemKO@xcB(*ips;{1k_!<6VQak2zE zuYaag{vr$zZ7lOikP?HSW}&p$jzSBXbd<)xshG;LQ=*T51cC)RQxa}#T(W{|(dM)y zgdb1H7=NN4vispMyMGs6*pNeB=rDOF>|cSnreo-xUY_sDk4%Puyu3(>z9p{Om;Q=} z&9#KPlRT)B5K0_=tdS4V#TlA!3G-~kH&sf=)U70U~!HeLGoJ*R}}N*px4 zYZZ#%rt3r{z)}WKr9$NuCQ64%^R|FdD^ZM>1Sv~6RxzF<`uO`s97zI$agGGHo90LY z)U2A3BczX7Lyc-!lELfiaYWD3yC>pEf}Ip`M0c@|lR1~QY9TgX*;CieW z{l^(V6)*dnuy~^(uNonge#5G~drd}2<-c+Yt_JD4EOFE{K*2TZL?uM`sj);&1Ee1s zAk?3&+hp1j^2MIeoJ=KL+D5*3G&m+yUgjLm8WVcgE!PtPl zgYDm#gyf*W)DyzR)^Un}Xn$*zHr@GnMUjxbS~eV8n5+>lZYjk%7t*IK(I&e&EI z`lll{=r(K+1{)u#VZ%OPO}NyIGA_=I^?$_s|BTq6yOs^&Ogj|%nVUmt&~4Zt3^s1A zVS@vm@De8+0lWLd>@p#RZNi&wW6PPaZOd6kHR!5Pl;ZUgK<`y?-$&BOtAsV@MCk<9 zYpc_^ljUE+DQ9ECo3RvU>e|EDr+D2`n+eD5ZxdtNoE8dgYMijmr%baIGvXET8K`Zq z$WJj!mTX2Ds(6C7IjvGcCy8t`=7Kjxc$8Fr3iOoF{4|HgSfWW)9})n!12`0IOiA%Av7P#$l|~5;iqgrBf%9T7^$XaBR$|eCtDB+a@*rB!fj`HdNJr;*DWh z72XNM8@r*zqe5Z86|+gL(HQY*6_=He%(!inn(ojhHH9Ik%e(h(kTVl0!EXtG66CBzO7QCf zpaAbBpp8{C3R8fDO#u=%M#UA}%B-vOjpb?y5Gzd;g(^+LmgQ=xxvDfb*>#m<#ssbk zt_qW|DNN={jJ*chW;hX{g;!?PUr4>uB*nuV+r>k-5hmf#;wvR$7Z2TrkZ@?Fni7-Z zG3gUwC<&YLi7qZhHr+YZlOo|n5BmzK>H`9xQ9VdNmjpGCDG9=+BnX?5xGxZg6on*Z zNg)nr5nfbLA3wl zhz|)fJLvm!u6(fLzBj6C{7^8zsu7#(TyaU!C<><%m+eDug0ML zsjg02LS`{W_KYD-L2!s+j5!EmCWDKgL311t#6pIH6Pv_SA=Rj33Keq94)VpR@*=GJ z;u_$$5g%TU@HMgrk{kUFK(5KA6URYcKxbf6kGlxjw7jr@8IVMsbxKtkL3ElmBdhbZ zSg(wTu?xXe3iDD8eG2THSJ&{izC3P8*9jlEB`whjg0w`)`-+r54JD;dGEo1dM)oAL z-z6w{K_FR9p2|gBbnB=dKY`GB=dL9&P)Q_i;OcUE^f83!$jYA301V&s&sMujhah@$ zGO$kU;i@x$Incl~)U%LQxeqC3u|XXqwl&pNS74EMAYWHKgzt?1w^n>`zS-Y`c<|l% zv+;rnvi;$wu2BtS^f4oNn7dQ{4RD}SQgkXkIq}#u5%C>{ct30c>Kkz}>WA&9|757& z0%AG2^=9#BPZMSuY&tpc(W3I$-_)q@G$H+LUG)G&Ok8+9sjhmkMm=Y)KGjvRY%Wyc zWu>|*4iO3}yr)!GJxHU%D@u*tawL);+-D+51k)9(a#^V{x~yb#K2f;2Dwx<6iP8G| z!Y3xn3v5{)sFS-l!y^d2)c=~;4(J~hMdEL_;2jp+x;0G5KhT0L2K<{W{Ld_S&FMO9 zfV+xOfO>U3s!!+mF;JO`th}rZQpI+?oKR4kpG$Y3k!Sq!8HO+ISwUU*`tiusS2kn2 zd4Qm!JOdn|oJDs=r}+T((0A14@nB0J2K%G2I8dFq%bPcyxJ{*_FP zUHs|fUO@)>KEL!Z3WD%kUd^m!JkYp!5b&`(_8)oYx!()ftYBEs#0!Ww{wEx+Sfi&zZkiSNw5=@ZVezmE&OK?7Si4-ua!m_X6FuT$kJ?b?Th1wJG!(Cw?FrI)*#mw01+moF^mTByRIP*(!c#vjC ziGfW(CUby$fOiO!DXL^X*KBFj1B3&r)dM`NhB-=M=BBv^sCx%n2A8_iRJgtD#;?5f zwSxfm^;F~EZ0RTI5G1Bk$;{x3p!3X? zbgo4Xg{A8-sVw9U99n|6_HhqX0y)S%NCe^*RiVcTwp8hI4@3n&&^^!=?Xo*?3ez4WWc-a=P>yEUasyz{ zv(Ho`i3iaxF{-d>q7g>MzugM^LIf+(?DEF`c?UGDzl{UP9{&P)PnFiSqj#EmM^Z6^ z+g*sK*l6yLULk=BvY^3ZpPTT(Ff5q)*9*Se|BSq+`1i@XN57}~kBfVn|ME7B%I%Sa z9-72cnRX^)E%<%%?(~P`-Q^!B?{58`qVYZYJ=H&1{HOU>^8Gql8JqP52%#5B#7Oxr zP#f5mrX@jZ9GCGQosj3VdH!`s{b zuLtu1Yq2fsXC{=Xt{lyNq}u$Ma8SoKO(2xHHsK~s*g64W;M zn-C3D4PLOahr!Frq22M0J zzFKcWEGA-=)Mwr}-hz8*6mj?KNBk(FF(Yghv9o<5hR0sHGdUyRIn_&AwVVo;1T6<<6#dd~85v%)#B!Im| zo5rrph9>LZWs}$~BO>{sO=8z{kTj~R`OO)>;cTNcf#kR>Py@52^Du;u)qekikTix@ z(!3U)rt<1urQoaH1D6W05NH>ht_iU)rmqN8C(-HOhZyZZ;I9Gvc_%~V=MEmigg4QC zu(~sQhQ+(F93pq-VgghZf5Omy>KvV7U`o+F^!4M)Wg&VDwXzG(SNBg2wlAxqq^!|5*2ilq z(q1O5c;}&n7Oa4;(NJmiAux9k}h863uVCc6PzP2Bx zYSwCZwRae6))PP~2CqkSVZ~Ve<`v^`U4k}m?&^t3a5XD#;DqJ4T5eynFc0U2fz=a~ z=!6mZpD_Z4_iEG>1Z9)ZF^KI9RlBmtrxat0x+HTLtWQLjj!}-y2<OuBA|KGMtYe*l{_(u+X zgavw}D$t|cqde$9XTcQ{7wAz@fsQZFqbDlQqjY%&F{OG`yg-i%3-o9f=uzVe6#Pkh zI#P-MNL&1|UeiCs6mCu931lVeSolscleJI5L1Eu1?`X32pt!sJ$M_yUo{YuuypF^4 zu4+owq_Neu^|F+QMK7#&s=VYjwjYD|--&^lEMYy~e&X^C>eg5LTj6o(L>|Y& zgR6bDAry15mAhl&D|g4}l{>V5T>0v3KPJ9(cTDZlUGBIE;ygAU=dtnCz+>Z49y>0| z<005!8mhHfK@OLEa+}&WDSJk&9_2+`#5zG&W4oN)4eYd!qjET&HFP8G4xM0E4isV= zJMOuv<6dW9zGe-D{t^9!YT5So2(~79D&J*z7F$de{T*BrRnfl*zS}RHNB=4Q-tzA8 z*UEdUf2O>r`Paz1*yw*YP*B$Yjslwe=M>QFXQ~s{^EfA}Vi$C8->KIg#;-Z3HHaR) z1~FB?r};~i2UyWS)&NZgXjXvL;?$=ccN;liZ0R?Wa~lr8u0(DRxQ_j{aSgzA!?%-* z#(w_|EL(7(tyVn6219N^KDRKRTa?c&9(pO0NOa^*?8u$uE;4QK1{B!rF70v`i3N(> ziSCJV-5Z3B)g&Tx7kZ0XFW%-Z)EI9G+=cbCJ3@-WI?@E$+{qQ(s+V1v+{y0A?%USq zPU*;<>Ylg&cc&AG`64u;g@+S9X~9Z)$yb`(1?ppx`@~RNsUuDu!V#T>d#by*7Zb+C z>YGfP-OcJdZ70fi+LrweYQ)_f*tqpbjg5R=ZXSrA1C-kW;28j>86-->8*-<)d%35r z%kAaPcekv|%?HWzAc0|ky$f*f?e0$W?(TGVi~9ED3{uZF!}j=P;@{?kp44Z2Hf32aNw|?ml`|9gNLycTXF8@qy%w)mz=IZtwgl?mTz* z_1Yh6>`m_0T(97AGhOhbKRh`D3{Tha?rxu^?--tcqPwTtw=s9Rd-~W34<;ukPaiwy z!Q|kc2cu4{2a|nnS%>O#=jc$o56>TVXS;JY=FV`>82dg#1!v?2k>;HBxw-Hd&>=wL z&eHH4cd>h_d|GaHBPdT^wi7G$?y3DvA%0%5rUo{-i%_zxrIoaMGKveq_Wz$WN^~Lc z3)?++rZ5Ce9fYcSo7-=YA?{3f?l_$94voW&xNjSW+wPt`4tJJ&N)4`JZF`)peKAr< z%+H_}ow6}^w$KxkJ7=u*p=2RAM{CoVHgOlK&I_hirCW}5AR}>QKN45=BXMOvQmgEt zqU|^X-sr_P-&yfU&x%KSRy@+P#z)$vBJG8$3@0f%vtegqjcw6Ar~pAMM6VLh#381r z5KEARnWwdTakjlU+g_Y)Z#>&QdL=E!MdgjSm^uL$+u~eoi*vCp&IN8@TARl>-#&B> zI?(pFt3md8?s?L^FWAIh^<20vMtA2eL%4<9UJp_3dEPLPc=bT6xhg2xd2>u&Rh9Gk z;4n)YDDQEDa)!Ga;i`XRGDo7Uy`$KzpFf17;EDG0%SY2S3XW48YmCak#lx0jb^#6m zazD|=uP5;Z{vB(v3mN!F$t{i3y&8>n?#c?Hg zHY?g%Fjw9$(};p{25IG%=t9MRD}seOdQ|pWul%oY@tvdhQ4gm_aKNYAy@;kmFT{4A z8|>(v>W-dIp;!}w<){HrU+}>0`8bcjs(C2M4Je$2v()(i{9XBF;W-G@J((MfDhqUP zubmr2&C}j%yPHKeZq=atXRzMCTgHFoe12R?`D5*UJmGIZC}#sFIsMJ}lnc&4D*G2Q z6K^WRt~0D!DtkrWdQxmD!rTqc1M0I%(c!|{2%LU={&HWAcl%J)mSaN)4JUy^PzijR zEAa`uZEG&uokZdQ;P1iP#xKgjIyS2KL%1+*UJNum0WB-u`$O zm(Rm}3ZZ!moH(MrLcI>ai(jRZH_m~k5-@R{_yBYRZqWUVaUKYY?Y$5NPT4+5#6duK zEAhxTRi;?DjHoB?VED|;6ks~Wsn(MoYgXC8pIC(6Aq+f-!>)`k?;Q$fs`}$t+)j16 z95~4#aC4=*Jypi(hAfQPgk<(vFyted)%Q4gr*d3Mka4W zCR5eNK-#~TmDm0~bRx`nKcps^76+35A-SYOOxjXFK}Gh?DQoIsykl$R({Kuo3W2 z+bDP(WZFgnOI~B25Kt^4{6N4}$jvCZ(gqJus{sm@xzSi06gF69Nn=sd1B0e8Qu-D? zlkU`4ZV8vBqDhp#jfZ_Djz0dh(eTm(-Np0bmko$B_ws~(FVf$QOsA@J7XsWiFb*70 zM^0mRLYedMVjdm-Iz%uX-B#`WaG`82&a*irF7F*C6_l{KEGiLg6V54r)wIkFQF@kO zDrFsKGmm#u+&r7;lZBSyZJ55l7s50nz@IMr>%OMAXWS zICHyG0(?A@8{Ouz2w^0R@{fs)Rl{6bC0xShP-2=b8e;v2N28m@L=>0CZ>!jfJ*4@CP{MhVhgoEgsoapsm0)J&KK z!o}u@f8Mkw&c8K^PiXq$84%o_Is+2mL6reEU+ONyKVic^VSJ7?X19Yl|H&}1Kmykm z0Evv%4#H?+sQ$W%9mV zRkkY|qq-RT`VZ?{jDqzehG2=t5EOq5L6OA}%oY$Ve|^h{`Zz{H9j6e=eA1s-A^pFH za)(sFn*FiX*cBWxD)b}NR*ZJ>&7!X`bou89kg8%fnwBx77qn4g)n5|9IjX1J&(Zyh z$o(|ke~#RUCS3-Rq5l=#Sm`wGU(@}2xM@D=mq)RY@Nx8(B2CYO>k57W178A&0h%$RcT6gM-b-0*>az?><>Uj#u0 zlpEp(0XbIgm&E;Bx)CT_ep%dJmi`aPgNKK?!6>$)N^W z?s^~taS&wxJBorf6qJ`HrX(~-A!(5L)59Wiu17MM&=>*=OcPL6IvB;EK^B1qpGW-< zM?K`zXj%oUx~O1R>M7x3Q}H~zD<#g)V-rVT{-nF~dE~$7+FcUSrc06_M=(gBsvQxg zPp6O!AB2(KA$EGi`8P%B(Or6ETzW(s4J<((We}Ad-6}cse=Lbk?;j`#1s5+U;iO`0 zs7DgNygJMx-L)ndh%-Yyq2HJEDm}VQ_6S3B{0vnLw+x|wyuVA*tOUh@aY+;Hw?#<{ zS&c0W;>^HI=%+_X(`}L_jHICp!_kuTf2?#y;tGzmQo&OtPkgays$CF7`S}Y&snA_| zU|c~EZ8{SPvVlRAV160E)3C=1Z`dG=f7j9asA&Ewg)2c0VGxyX<@XA28Yz`! z^3CR5(0nLZ@jDF@z!hZY3p#bPE?in}yMVzrWAoNF&Uge(JN@qBz(jm>aL{o;goay6lh?1eZ z*cl%e3C`T4mjD+>=};9K!4if5Z>>oqAl|RjjJxf8S6L_;I7(4P?RW1D%|3k zrCVweA==1}u>Z3#y+DVF3s2%Pri3I!+dD$(QI(rq5k{`=tx1q*e;R|xm_<6l(xo7Z z=L=-8^kPgYNud2}(@_~?pJCD@j3yC{VUcm-{B2Q!beFy{E;La#PB1}bO zH*FFmj7)NnVOI~L{l7Ltnr z(v`4uohZ2z4t1GUY&^;Q#SkL@7=~pXL$J2R5Uk@d1RGZj!S)QyUdK5?zPz3<|3b)HUCQs)m-256OJKM7ka^0Sqg`gYmA-L<~EjeX?TVa(HOeKWRi zL#=PU?d#O~Hrl?~T3^@pZCAdWBB=c4Y~POBu#;`yyz<4Gunt=oTlU*9N1e4{o9wX7 zwZ4;V-?p(i&qB^o%q?TbJ)4}ROQDrvOOvpDEPsmc4Py^Ho17h>@5z=wiHCnT%Q$kj zzBA_hSlqeSr7_=c8D~o3^3KC&ZYQg6gkhD}NqCrGeh}Fn*^X+Fb@HRf!GJejC&F+F z-`K$7>v+C8Mo++NWF4aL|Au(|%)9Ub%w5h|2s-jPvU@VU_6FA9PkEhfygcz5rPnpe z3;Sg%l=Auwz5cAc&V^T}#HjohTn4{G@jzm4A8U9%x!cJjaFsFAbap13DtYeDWI6=l zR8OR{Sl);x;~m4tTlv^Q2EUAl%Dz)&p87LRLGsw(Er-{DoAJj?%%B^)=K190Lytx9 zfhnpio`P?qe9clyM8(yZgKc4kFU*_KShmS0UtUW-_J`+j!BJ>>RFv-k4a_DSgz#3J zj}KwmV|{It&Ro6vEH!h{IC31=!Y4fHJ8;GPHb}y$vUvJutG8s;tyhl1RUg5kb8o)b z|6y-60te+RzWwHX9-%7TPL+k+kB@E@ye|-gkC5dB(lT=LQelu%hxa8A;FFJO?{fg~ z!40XpUrl5mO0;6GmdedZi7k2@xNtAPEIK_<@_&W=Jw9x5mW}OR3B6NM- z7hsl2D;5{Zn;+xGj!FWHS6d4p5ViQ0liV6||6%`I}Gz!N79N^#)6c7Xv6jTHi z6j2n9qNt#N_g$+yA?P{x|Ni^m`&@YPcD-w@npag}er_sdfG#mDB{p?D zmg^fn{$fRvKV=oUw59&|6$magNle&B(57qi zT8;4CZ8jo_X(_SR?8e_QDk&AYoz#b&!spRd0ROlE;_2VV$0*06EQ;sp zf#-YJqk+FH{0+pq%I)~%RUA}cJ!d>u!v&T8#T7j2x(j7>)Z@OGSp~5^$ngoDKebpR zZapzI_cBbTPNFhn_BE@>t9oklu7F(&w4n|#B2^T2QEQ6jN!Vs)t;hPv?DkUlB`gZz zh85>;B^3s^8PQ0f<`Tj0sZvAL|G^T%q??f^{f?5Z4zMaeBOXii$Wu{QM+c{x%P}lS z+I>x?O1C#ZjSeu|>Gz{r63|*w;Ge7uK+cpuibn>vQB$jt7UpGo=8fM^^Rwu!6Ku53 zr_)>O&JplL?|L!KwxE$-MmreQ{Zm6A8?HZ>IR6_w&)1vEXhQC2<>EGWJE-TAspm1{ ziULfFXixr!`>KNesGI5Et*xC)JwSKX=*V~0==(s_y(ytHCZi?ihyV>!h!Q9;?dk7upQ{ZU3~>$q5C(Z-j*N8Bn@82n`r;&`VQ*%kc&gNx{jOk#pMJ8sB zwRz)qCMn<*e&(Q&94ocUe~4o5zKGMJJZ)3g!dDXXB-_T+C2B4eA!B>$a9d;reRZ&07$f)U5+&OA49d(U+YBcnwBu`pQ%~(|hEBj2cdPB{fx$2&uWs-I~ z(FfFwJ8-PW9&0pBcVw&PQrI*#2;r%-$Oc*dS=7xvV-D&Nuj(gIp>WRWO;zeJqaP0Q zaktH!;xM6xb8%zKY|;xe^-fCcW;NHJWtn;jgZMCYHtA|iQ?WWjYvJ-7cM}Jo1WCA| z;Xt~jl&))4^)J|iJcn$%wT@*xjxx7WcRkbTty)~coAJG{U^-apL)zM!>O+YRFl=Yc zMY_7^z1HaX=o&>2*Q2+igXUwdkD!2$r9P66*7_(MQ}!CM-1QvGt;cfLb1b(W$E`oNGbN)ud*{AO^KT8W9VS1kL4`P_4ryfXBkhIKg!2ipTI|t`b0k3>XZ0rug7!@ zy`w&bkDm27F@t;64Up?FxNB{Z^9NG8jekA=f z3aHJfMQ7A=DfJ+ibjEm+a%tUqVcqrdl$2X9sar3oTQ8?uFQ!{BrCTqgTQ8$qFQQv7 zp<6GYTQ8rxKD|;;l#5rP=Hi87b9{Sqf-BWBm&)Y2W=i>!n9BDb#8g%i_fd0uBG?{3 z(vrW*aao3GlqPxrGX?cPHXYFEIefI#cjlwD9-|m?kNU2BwAFXxqrJX6A073%eDti( zug1Pl_x%FbX^-{U(3#kll zDiU_nxb-5r>-#{O@?xv!_9Y{8JthHaxBhfMyA9x@rM?))lxJE82^+h0W4CVX)@|Lo zDUXiZbl+=nz8UjOT`@{aD9IW1G++mE@Gc4t;vjaVIqC;fP+vI?X$`Kb`W4??Qv5l* z`r#xFt{;Iz%81q)l#|q4Ka!;SW6vBEU_EAj{(K_+CK~A!V+UXTsLMu8N>OtuzLin` z&6qn>l9|dgnj+1ZkDykMS7D5~Hl>3y!2|iuMoV}cwEXGBY6=(zHEqHeeb?tZQ+*v# zx+c931>L}>C=y{Fr>WVK_9_N-$akk;K|*p5jVE&t#;FY`5EirxaPYEC#fC%&e*Vrj z)rCMk#xt$BDN;~N?9>PwFWL0yHzu+WiL{ZpJ3Z{CQz#{qB6^h#=!L6Y6miuvS7c~j z(kvlj?-HU3$5-nRE}J=h(ycempx!Pyi9Lq+?T^^uWukJDr2U2>w(s!%ML13YKe$R> zm5Ev+Qmcjt)fAW$#Wnolq$N-eF$l+d@JAV%@rQQdFjNA&BE@w4{T+YN_#+$Qbi|UM zkMwYt!jbs%$DbGegg6yPHl(SK`CaR871v^Z2Ywkfy`QgWS4@h8=-<0oMgJJW6vi(Z zA7^}-aUu;GL*ci~gwf%IdC`>P{m^Pw z5iCixB#J~=-4yoi+hr({a}yqqp5D(YCUBPCT}hH%OE$}@DEt^_Fopi@V_hfGyo(Q+89SL)=`Lo$eOD*EE&7 z#ap?Aw-*xrsIlmdY9bSQ5`M&zUWhhXK7h+TtEkDLdcQKbGCNd^Ovp)fiw6fcl}3oa zXUs?V>mC&A-<=3Wep6|vXsXOfT`0eU*SSR}q~jLt+5YjW`Kb<(hi-}xuVl?njS$DP zwE3x_qNaBa$a4_6#Tb}|ij#Jd{1Q4p6^;p?aEQLWs0LR#n{SbgTb#%s+{1V_kiwbe zgj0}0jOd;E{UEoP!>K(Vl!J^n45k$RRYC3HLf9b+24`#OR#8`&50CBWL-~RZ?gb`!J@g z;^%_m0T^s~_jL&s?#@({MUj;K<*4j*4gDA1uZvs6MNzr#jqcaQA#MSp5Mfl5DeS|) zq)_+|6X6h+sGbylDwS{fp#$@j0B%AvWe;MP57M zst!%O8BQL1+fYjAT~0Kf6M8V1B@o7I7Y ziNgqM!pYkk;ov@EST_0p_uWYTMJhS?7B{)JW`Unu#YL`Jjg^w#&e@D&yfC&h+ebua zQsjv#AB?M#>o5wcWF|bkN;V^G6-}1P>?(N}VYetwrw-a&LikBIbw_?-Wwu*XcWEm9 zQ0BAE;b77(WS9R@>qR7PG~qlLxy2P$|2>kzFT=S)#RqUkx7f>$b}t-lp=^f>DYtlq z<9^EiKe;Q}AIKsM={9YgL)f zG`#LBM}qj#{m*=>sNhL#c5Y=ho%^2P8#*|ZYnY{-;%%?up#?Z)(toE2>8djiW<9Ej zneel-66ei(!-|J47dFvRG{7>VJL$X(78yv)g{y2n+k=IJ4G=QQDV~AlBVdC#l~v~A zp)cXL5QouE#5%?Hk;OwpK}T}LF9Il;2(bv|bc$o$iiZY^Sb@7_V4+ARSj6*BDLTa# z#7h&2;$e>WC|HC@!aD_&=24{52?cwF3mRYnN{M|1*-iRjyFyG!Ysi|@urDO%r>#*G`s>rmhT$u3K#9?Al>pB z7~bsMUO@DAELo`BdBMcy2)Cq&zQx2ELlM^|a4$c92FM-|6mdPk+Z#hlg%ItIvSw18eF)JNQx`*UF%$l_v zqL0WXrfZ7GbiaIy*=-fX4$60A1IEZ}oXjD4h;?JDDBdCYF0)Z_QJJ8;154flaZx5Q z`v|M20$jbPD+_e7bwUISPt}ceS1>ye%VjnVDFq8JmCx)|q!KK=Re>@g`?XlM6$l?y zs7%pL(bpUOUm$!{Up9RJL#zNd#|ALlz|3EjFw0{Wpa!)>gr!RjQ3{!L=p)2S!mY}f zz1~}A<;)&4bfZy2r|67Fy9KbUVs--SajifEscNRZ9i$9ZHO$865(`sx%wjoWw3@)I znpvir$gG%IUv(?9Sw6>`}Z0*nYr{uxRl`w>@E{^CQ!i1N$&aV7k~k_Aq9?)hUNXw7Ap% zu!h4w3rhw% zDEzhup~u*(z#(3Qk9rdBvJ-Y@$?Gh6jwSsaB;V^n_)(|BSRd^SCLEcd!mYwxz5((y zZ^9W~KusL>B=U-l@P#_U{T%){kitVVEr>Rwrvuo##25HcaVT(SaU8IzxMR4VXfC5( z@6@MjxD>m3_W(ZLr!YKSoGYPvUt(MpOyTng`-#+meu(ACsMWcM#3yZlBHLDs@BqfO zi6oi8XinJ><`(C@28H7qW>^`}F?BH195;UZu%e-a?R9;ib_cJZlNczF_!r^zQzGi>Omh`{ihZevCIn<5)Ao zs9}r!shubVv{vc;NbN*5#nL4|WUq!~(m2AeIb6-*v+YUpFm8k5FgKo18%TKn7{XQk zNxiJ>F`zwoJupA`X<$5SFD6iCmBYz$5@Yxv3YRbzwcmvBx(LGHUYt%R!W{+A0n;M- z0Y8hRLZf0D4okUypS7dLX%|5_fWxU?lxTiF;gTxC%4)***p)U_Qus&#VFBBBX3Jr0 zlhB^hd3q4xO`O>Twpq$@Kh9-*V83wq|F%A4bcAtv9fdE!1FYh{3<}pVPU9rXIEnkx zNd5`ibYaO!Pb$P2#`RpRA>&A*7g{fFM=u(FO=G#<0hB_*D71toW^xiI(kROd?Wl1M zaAu)B$taicvNvfxhHU|U%Nb{Sy@v1xPR_4AX@BiY_OEkl)3}ybInA}4W_f!`c_e4t zi82=VvRpgQMv{BwBxjt;S(Y;P&8I{Iv)%%3i986@dwnSP^i=NqDDLCc+{eGMXZ+Pw zb!QxzKt7?n-mgJN;OsrLfyW@y{kE|GDTsa^@dJ>OkUU5vZcES@(Y^gWzw6#U%kdlJ zfkQ}QDD7y3NDJSx{_+{ud*q<0;YiAW-4GjjqUa8Sgd1pux$oE4sfTq%1 zV2{#1z@XA0?dVi7H+;l=&%BCuBj!KmAbg5(3*$b<*BF;GUSPb+nBhtGcE-_+4U7Sd zrJfZU)bUJiX3S%Bcu};UIb33|Xj6zP$a6ao4rCnDgTg5sPU3Je*Xhak~oP+X5N36f2ogzq^B|F9F*BYfTFR!9;&2w$`k2AK&hjO!pF zOFGqh=FNaS!b13v2jMSbMmzZbtD)p#6XR%j=mT$O7-}s1*xN7gG_sHJ1MXMd1D+3| zDd-NIi2u{XrO$x3RNvW7it<3RoQjjLReU#!aIp`?3hPT@?b|+d29BYC0bix;kHUdAYdIq?t zkivN-&jII#ZUydcPk4DC;j20pllJWpl59;Q{FvpBGS2hSC{h1$l-#Ge6xMnWz8yy> zIq82$iuSviu+l>KHsi}&pdLZnAc?4^#mN264LE6ihOTL;Ky${wpuO1RO<-jCJ|GQR zDIVd~)Pah3sHv&;QgrV19#NMEX(q zUp)zZIDB^sg}djR2Bwrycv2NvcFH0A+Ym}&*07W9G%4QXdc4ngm5ua9{y%Tt0j1RV z#op9rzXTHIMiQpx5*AZYG_gFNNL&qJKG)AuMN7@gW6we|CHz}>#)9rY03ROuGbH-_ zsJF*!wN#rqRaBeNV=4Sq7}fQcFcE>;XHpp3gAiU7MB%>$QTY2R3g?WYu%;)pU`#)L3J5RN628xtDvq*zzC9)MWDUv7ShB`W;bZ*>YsYMB=O=#6wnX@e zC&C@Tvo7C=OgY@{0=h&9@O8};fx|~0u|R1fQCI6GI7n6$wLy6$xzZVL3V+6@mXCo_ zd^tEN0xLlvmgnY?KqGII}z zk}Z_ME1Xa|Cv>n&A>;?UTnr&}h0;2|BqB#V;4w17Ey~2RNa0|YF-Y`am+`=3UJYTS zy@j>YA$g>XPSAJ9(wg8&#%l?b@5W9I8q_P2iA+~y)P?a=#+{74OeBwU+y~1f#__q! z5PqTqoyBLl2~+tbergn14ztmGdS5x=zY+<{iV1u1;&az<3a4?7eX;1p`sW}HxKYR` zSTVHhVPsH_m9bTXvpmH{;R4385(;aKi+CyVG-C~8H^wVmi0O=Hu!hE+UkhbC(SxuD z<3f^aVgReZHrMH!r-nfFm(%Z^QZ!0go zagBgQHF|`_qo}dI(tWJQlL#N-A(xR)NfeJG z+&6?WXbh(CoX+Gi*PY8M&JTVrVn`G&X~jzMe#usN%!F3TGg~gU^nE?TBAN^~ zQA>L#!b5~dBc50;uI9fl93shJ69Ybp@DvM~Eet-?_e_M3c%0d3*)DI8@D-bMX7}+= z#Ad;520Jk(2W&61vvPrt6YQwLe#Nawe{tGiZ$RzO+v%TV*lyx#m9MpB_G;ip>(wXh`ZUK?W;(LR= z-7PpWSreBGY6)E@@vuCOvR`a-g*ruwup8_`^&XKb{0t^*a0(Hj2HQ9?A~H?H7;Ms@ zSg;gki;(?F>{HEYWl518L{Tg27@01#hvM}N6T;WYZ1JeUPIM>sH-jw>>l~Rao-x=Z zctW;#(O{Ev62Z3XOzbYP!SbNNJ}e{l1G6Sv`Q?JWnn3w2v~7oFXR%W==$kO@EcP*L z5Lw}!!J_bm9U2ucmGuVunAu`6!F?tI_uKg2ZcFc|GZEdz71k{j`Cz%il1xgyEAoUl zv*p1@`%d@E6LHL_W$+XjRvMdHsN6)Ec~k9MndSH4rO>5u#FZj4xH2+dbTioR!^cPV z6oq;g;`iaVMHYw(W_!6U3dPMjvr(FbVw$0&Gz-O=WX^vvm!`Maz{(c3sgly3p2Zu~;ndc{{RNjA&(tBWuLCR`y|Jt(e@(zK9$z?rdd0L{1dDnQ4^F zwa6*r-BzU|>Q-^Gl?6pj6G`n`GK`P9U1YYh%&1wSu$A?Snj;3aveKw|Vr(m`j?%WNYF#Ufp&VOZP(Iv*Pm1s*i~;+h-PAiid{Ty*tY7`0FYV^5>NM>at}v6A^OqY9VA*9ej;kUIM!KD#;@iq zUf#TDu&Su<@V=<6i>@0P`fJp);%kEy=K9(GA>P45ILiK1$+f8G#1}dXKB{3N72TOFw5=SO3|6G;;N03u+&Z7hbh&si#LxDMILwR&Nyq3{g(r4%QZh3Fx5=U25Qke#d$+XzW#=A;l?{P zx4rwQ=-py8GkuKg5et|tv{75UCGKTLL$oUTEwQ$h-5kAN@Z^SKkB?d>-w`&R%@*56 zjhh?&j&K@mV(+=p+B+iHpfsA_5%J7u#5TY*L)T%9+!K9BjAPayw15Yq-xY^jbxqNS z#i>^IX!H^BLn~Vs{hp9LxDAoY=IEoi)^1_jqd(L{Vk_MjeL{3;Wk;hw5d&J;>F85p zY%BXd`i!`(m0gWKD;Bgeub6XUMJo%7`BrRfWvMa$7O%FljF|6*wl-gH?q;8^F+T{e zo_g6n&g~d|LCh*3wosf3$dCC^EM?Xpo&ftvTrt?aU>Akfi!2+&cHtC1i~E@AbLTH& zEwjBCcpamE5od~Y%RBlHi1}4)(0c21c>fB}iG7GI6fXog@#OLfvz1~=k6UAY7rA(J zM2pKlIWuCeh@Bx7-N=K47RlQ${35x;`7B~+XgVJoMN!LK99$E$PEU&3(O`@>I`n_ z6m~hUg3@ddJAyaFIHYql=YYcCQG~>AT~PiFVxZSHtEcQ&rJL4xJsaaGmlRyZSl@rJ5mL&=MV*KTKgLO`LHzq*dR;}yycKRU3C2cjtR*H?qpTq>pCT0y{ zQs)<~LGmaV@?R_t1kx@rGMq1#M`d>TDkfM))e_g24N>woX4Htk#YElEh;g##`0JK& zvWb~)8Fzzal8m_dx@D5=LJZgcxIVMSCdq@k64zzF?GHVJky$@66 zQrc&Sct=Ox71KeU=iPqG#aXb9GU*Oow;QaJ+=6|9WO<_Su9$SW!(d}O+!d1{r_l2^ zp)D6tF3;FZ88ufoO>?+nbL47+O-u2N?Jj4{({&3f?~3Umml!N>_+2sia+ARhgx(d? zQ+A!NTP{hzE2cn>Hdt1_yJC9DDFz!_786@2%?sF46UT;ijO`=8)~WFIzAL7m9JWwT zW>k8|*#2_JBApeL<-``tWq0drT-kuw(iz6|uE4y_HqR*2$_?hW&$biOy`d zr(Lj3kjoABfYy!ZTAgBooE$qrTJGmWMZZonVkgN!gGG(P20a;Xuy7w@c?R=j*3)3O z51bo2S=P5$#x}%GkxLDhk&!5-$f5`I9PAeDZYaYa)agf-dt+~rKNu{_d7jrIX zw&`-JK@$gE09&FnZqn&;S1YTIoguFnmNb3bE=yKXqANvysGsfj8#?9=nRJ#jL@KLe z?~s>tW?PFD@}08%SFE$0j$aq6-6?Yo`c=fb*gNG+or(0IjbP5NTav-uVHtNLV^7d$ z`Pp(=tF9t;wmjd;s$=KKKIildDf@YHiq6EgR4`5MW7;74W^Rg|C&!&9(*|K4_-d>s zo&VNZQsnEg3uNU5P6ponR_tPV%0@F*gXoZXGN9N93k>vRo-Tmv)bP zO!iB_{a>0Oou$3w9+x)S3=P(+W^mk-(w0PQk@(#-B5tkhX|PGw5pjQ$bp{KM9UHen z&SOT6KOye#a<$IH(6A|SPaAHjd&$%C9oFfM|Frx)3HN{LgiD_Iv~(m>hKp^5f$QYc z(%)di0-fS%nPjj@nTeuV))}mk*=mDrVz$FzUobmquxVJZHp}Nz^&I-b5u0Vt_PGB+ z8BS#SGsY$y!%kPK%@ibN85%Y{u327T-6D~jcPH4^j+_kYi{}_}ht6!X%C)#>P~uwcSkIZdsePB*ougU!0OVW)5KoBG~Oh;WRmG#G~4vJO*dq*NgiO`N>Lf{ z2y`}FAyTnjx$ER+Y0l<&oXTc-G3Ra}s#8C_9tl|@}!3iC|dOL8r<2F>^W6^JQiHu*^+w!wd9bIh+dG)2A@fU7+|t zE-{o7Mq9;)vQs&kt`rZ(ACLP`o;27yDWBr@q<%8iyYK||?vP~*o5>6_X~9=q?0KNj`6|6>%Nnf0es#A)Q{q%kmVn27wA{xX)dA z8!1$-txV4*e`tXENf zyix}ZHa0n;Ue7kPi7tBZ18SFHe zQypfuSggR}!(SaYbXCy#tJ4O18_cE78I0W7tu7jjJRw+96YtQoAb$>3m9upgl-f5w zRP~#qvl$Mj2vpeL>R_Uvx z1~Gov%=mcKrCv{Syx+X|M5SGsuP6GQ-6@jQ?gnDZMNZA9B1QFEpzG2`EQ(K4wuLP$ zP_$P~_Yi9kFIQPb2W7rrXNfo+bx`5UbT$d2se@{INRKxV#qOXcuF`c0KI26vwL>%L zjLPvMUD;Oa%CceOMTUC*5uFW1Ni)^SM|I|p471g#H9DJ#M6+9K_#|rBS#^0_*FA`M zUDbyMd!@5ebZ@m>&>4G6)Tzh${MR5>FrD;-9&s+Fk6bm&U{3T}4>h0JN)cG{KzvX2 z0W;m7d#Ou?B@Ng@)nf~#q7TDDHICU?IX!$@P@y`Y>ufYl_f|)@80Wt)bM}Zn>ZGB} zE?6DkM}29q)Pg7D`>OAB#xr?e<=jeHtQ703H$oS}OwVwzO41ny^FQMUt0kxODqVpi zmZ?V#_H*t#@x#?&Fr5Dy#8uR=Tpc{Eo4yLuQR>dmb#~7AYuso>-`;E)zhl%|Lr3); zt0tZ!ou2(zRf2C>QJOSAj#VAL)fvr?<5Uhaz13>eNMagHDVbWe=pvb}wEf!ekgHa0 zHCSN(WAU}>jKLm(u1>jr)-7M@cPze66&dU<`ytnOHPc}C+mFSMR~rnLIp&aSf;wuj z;xWhKCn)nTl!~^}7F~77b(6|sy3&?abu9iSRcEkWL5ExuRg=L^1RaZ?sCFByWW*uY zBz4hXH;p(JKS{;?suyh?bT_ME2HOtZ&1#Xs_Qt*$GFffanPxi^J3DZ)I%6nj*Bo+9 zQLamRGD~ZY#ZOU128)e56mW~0X|V2b`^~qg4F=0EIOMuj9W_{4!Lj&TmHD!s%D&n| zuBj@^V5e%2#ZOgr2Gb6u9tyZktv2Y1)cxk$)IObwR|ZY@o2J^HWTvT$hOT}7$MMtD zl;0?eG&CjB|WOcTQBO;71FJ+T`2e}ex^#lsdg?|TSPlImr zz8HVIn#gRWZD_@BU{jebhF>I#+f{?X0{aJoH5qJIP@=d){oPd47P4WqL`;NgKZxXsDVCY(7myHM7`Qzursj|>=lE} zu1OSksr?39S`!F%(qOT1>!hasZLscfPQ1u?#b9^Y6UBVx^M_vS`|W{X(FVJ9NTSdh zRHi|f3<(qss*l0`-T6+}0yWZLvdb3d0yWuSJ_$3O3l)94gxaEA!gjD_2Fo0iC>E)| z8LW6rAlOR=Ge=E#-L2j-m@DeNfV(yIu};}(7OT&h>9?I0t4qvQh=)@}!eZ4)D9R9) z*fXO_nBhq|uha*5icGbwRDVTsaOlxdYY zpj!=PM?Aw_qD&@AMt{D$M0qmPvtOc;3>{_PsOk(x**B_Tmh0I!s@s_9**D&hJ#?C( zr0g5jojQ}0eWQBIVDxyoQN7EIvUh@g!A!r|ey{qzRcA@KPhD+g0SOPPXe$*)w`@{b z%qW#7)x&C-m0o!@l&jT@R^@Bz5w)UC#5HPnt1c$tarL#%aKk1h;R*FSGyTc;lgi<7 zz15yn{>=0aee#C6*_wP z6ZPTTgq`ZqR%R8uRC6nvpRh~qZDsc)>{iEGnN{pjUo+$DzZD64R685xLWyDnt{T$H z9#1%+erRRb_p9!=v!$&jIT37w!8#@p`=`P7v+g~E`G+^eexlCk3^{B|_(V0~>odqt z;|wpT&YmsVzn^eH)wMG03RKH(U|wD=mM0T_QCOoRnX@t=Z@9RmCba6T;_?l>eOWDH z9Uk%F5yfS-%wU=~UZMC+tug3ivs3)8HX1C?;uKfZ3kG`~?5cW=8MTwdyW;xNTAzge zPzPCeTGA}=hpP3aqUq!Qj~mAQbzPcumOM@F^grQMLh$Z*P1ufg>&z6daeBK%om#)~K|2`Qr`UJIoZu)7W|@%d2F z>Mhh3Z;ey1xr_%hSpO~xro$^eTTo$B9EIB&{X38njYZN@To0SXc09P0!V3@7lqjkG zbNI({l82xuxI4@pp||e!Fut~+;Yqhykf{`>xkSErBG%FodSi~|9{tbFtBiitZJy<7 z=wZFJ4&ov7jV*Vh1oNK>`}U|1dSm|2qWwRKbz{ax3iv@~k&EWVJ3J!@OVC_OjKO0v zY@_s|@F9!~6ZfLokdMJ0P5Zm!l6pP_W6t~42s3mR;V+)LtniQWS zWt{)oipWhA5+x(*Ei>M4qM{_Ak9E|OGl>l$*Ck|W5-($1pokcp7U2fA*l)`x?S1U3 z@03vbx1iGvt!8AO!ycgb@hBwJGLH4JK!{dBq9!BhwgXiUzZU9rD!lhZNmB({TJ+lh zl5YrcI`LCuTnK$EEa*>ZY6I9m+lq7tZ0l~<$8?LkO^u>Nb<3$yl+fOBRJbjDoKCYL z*JHIU=znle?SJ8)l<5DL?g{^I86N-9KcUs#^CmP`%dpg4mE2Qz(iW`YIY8Iy&AX?F zDxkN)o&d6^NAps&%c3@Wy8!DClSsz4AZ#ixxIxh>Z`*+XE&Mh12T9?HJuOn`VSOG9 z!CYbzf$VO7m0ZJE!2C}KDaaH%-g%ySu8zVNIl1rQ=Sno8a3=9YJbAbWCK-!(1ScDO zZeS37n{OkuHnADnHZ_C&kVvxEtJhlN8|9j)Ev9G{{Roc^|8o*;+eUAp|BOoH*r|wD zF%*7O}Jwwb?pgi~UrM%Xpugj8+3JVgn-Jg$X#wjg9%&X8fN^W^xx3D&tf~ zmBidS3jS~6g@Xhk(X+gu7>Rzx%az<6e??t;hFjvVN$(A$xGtPml^6t+;yC;KUoGJi z^x3@E;U*@-|IgScS}KN?3Gca4^ zEU$-6M(uS8jR@@jQyk$S+)#5!p8pr+pIJc-)P4;@VwYG}?AHuZ% z72A&6l&)Hh5sYjlUPVVp@xutGQ;O&Q(ke#^N!vB*I<_su|0Jvp<_!LuY1sGtaX zz5iQL{wH$1i2uz}*S0P6t#R!CH;Slq|GSc3MpEs`dD*M~=P8i?|Ly_YGPUmK&DIZ6oU|#JAWD|C?N2{pm;-7*gn~LLK$E zZG1@MdAsE@vF-4<9>yGw8UDtr#J2PQbxX-7Tz!E3uVU##RMW@e_3`?jqCJ2mi4+gT zlE-cWnuIHfB>&)*%s;TeQsNOs^5HKqL6Dn8j*T)a2I7VN63S~O&?Z*W`o}K*iHz})IiRN)1oRS4yAa-D z0V4Q_D=37oxMrcu=6X`18k~8-G5cE54Fah+`q-g(!NV7>~b6q6o4g z$ci8znE(cmgGca20;_!ZKYQWoMg6@gAL~&(0!nBp|2*NocToVuV?J14} z`-{^Y;T+H@E?}$8AYgm!Xu2SN!y8=^Z*mO>zJXV$B;ISH@Hcp4O3K4{Q%XvDn@P&P zcr!}MTD%b@<=uD_s#+fETQ42~qM&9f+-%@V(ZJzmGxk5O5xbb|2Rg-Z#v-vTHWAHN z9Gecr9ZONdWj}4cJ#CLTZC+EpUtECYN^#j7T=Bk;mW?@|LY`W71|OB8Y#JDc;STvW zNa(e*Vd9O_AAq@ezW@)%QTQbdFB1(z?XLk}P4JbZFDfm9p2g;8F+UVSsv=!5tvfj720Dt`O*)uR-!0L6@6rbxHXW9TT?zzUPk(5z@d0KbDg+5 zXtrD@9u33uuXUn2jBr#p!rQwwKryRsG4QM556EasZBdgy!)w*5zJ^NCDH7trVIs7i#Nvjjh?)L;ITauI~y z*Xb{-VlT^?+;q#?G>gOYfWFEL7^<>R`*4|sw`St;)Tg4fn_9~4k;QM$WC>SYiE85J z%@XZkLoI|i$x+1Vs**dglAUo87g}4xiEaQssWt&$Q!`nS#l3afJge>tNT`Fha^$TX zdAXdE>M-@UWDH9b+oWHapQ+SRn-ydlWhsuO7OE*vHW7AY%mF4<6ap6_wMFdmJ9K~2 z(7TIGyJS_AQ|yw_VYr76bc*FNt@uULe!NWBUo1EO0}oBlp?P;AiD&B$ivqNb1YQd` zY$E*7)W9#^UB-{ioi*)-uWS?3*n4JMMve0}&$dkL?QPCQ_3*kYSMMl?j|z?a^C;Kv zB$wzU*Y6C}p(>Z#AeS3_mEB)ZQxM7}FTD7()~}*9(^7forVkmc==LvtEY$MeA|PoYC8?r{$c{ zuUk)BUhcF`o|Bh~_ds41qET-hC|W0TVY*G^%CQycKpOpa72xxpl}tZkJY}`3KgNA) zJ#AiHxK4&+I-0K17~>eT7*E6gg7pO+DIrMV5;DG2d<~LI7&{kGnPgMC#a@ol0l8K4 z2;oGNQFbdf;8ggu^GH@3-Eki2kfZ=L*GkBpqtiXQs?C9&fwpQY^w$;ZWLN2h$+d`U zA1B8Sdf%f29`m)wFtn(br3eMWwo0cW(YJU(}m@>mm~EJhyEMk`7gu(W8mBnVd~HCp3=}TEq#>+?o?)Lj!#;6^JIIV|toWuIp^Gux2zM+m@rqLq^{w$rQ#96HC~mk+Q*^$c z$vvIL@?6+l6Ga>@Vr>?oCQ1}JMG2xM%W#f7Op&w9B@`4UPhR*PnVx#53!lkF?0H6PX7!#bh-*>`(a!GEu5n@af6hwsFE?4aRbZi zI6Q@MCgXYT*Cqq0SZUm5Y20RU9FF5~6Z^zGjx~=np2w9ukA=(+VxGp?S8|;eu~8+L zx`}IlhOv^%zKAV1aF%s?3o%Y%EMZ)V6hhSoNfU4`OPW|xr$|!AU9g6`U=4S{28H!c zC_X?(>Y1$G#Oh6~-pY}uuyzV-cd&K`Yfqb}hj;b8%nLO$Yj?ADx1qg=bAKaOsoC`H zIH%aAZY$d8yG<=9ec89bN}XO{t<|!2_#RQ8kE02ddNiCHZJXNH-{$OZtm%D5s16MClN^J z(j%(3Um!3r2p=LtA+$X>)S?jEP)mI6nv2qG`XPR%^Md*=bq`8?pmdJ2n#)_wN$gU; zVjFq0$yuH#nt0jSY?|y0M3|SJv3Ys3Ts}AF6TPEaB57KeX#lGkawNhax~5{XR|uuqB`YwI_Z2t z-JKsOs;z$RFVOh*k%4G~_qz~YE+>1cc)x~~lFv|s%#(#LPVdfjK2vr$v8!5B#sC)w zCi^w2CdeDrAMx9rjVcGG&8FKD2p=04h=RU~_K+5Qf`w+L5!DoRKA**$8rD*gOC&LrfBbhI4K6du%OejXaRkDmv*lAi}Pk=^5PTrDDeGxlb` z5D)TFY4YlKQAT^vsU_JXY~*x5Y5Fnr4!?Adr-SGDb>+Cyba7N5EUguUt{lSKvk5;y zTS`-co6yAaT9$u_7L}%ZG>~+Up8^*66?nvA-&%u74q4(?>T!$v0l!fm?-!B8b4Zil zY4c#$qmVxx{HA(TL*Nm$qVvo)9@(mnqQ*_90ZDpjA;K+9_w;3Q{xhHXwNy2?4jHr1x44pfMD^UwK@GQE}mtN$Y& zff-l*8$CvbnY4gXk6Ld>K(j|jY(E?2Q8vU0$;dEwz$lNWtJ?wp)dOfM91!00MCU^YQ*w=pMt`HCjdYhie$AimciF>Tz2DrZXn*{E z(-&Ts0_?WI`S_rZt*-JTx!<(C{91std3bxej&Kp1O&VR%9WhyY1&R>ci^2H#vTadW zxGUL47m-)EF+*&5btIphn(7L%y$1WPw$v~x#N^a`*AX7mba7JdYBsH}wqoX}%^L4& zRJ7Z#QGFXX6Gc3V-Gq(m<^FfMPMb(_+C-A&rYfy2Q7ktt!`J!?Y&63!=W;DK-90Q2 zVHXBTe-tHDEjLZ-w8&Lzgp+;mbB!{>clteqgw}YE7w1evM&|(UtHAvL^ZW{2=bDCN z7p2)m!?@aZr2GxnY|Eh9eZZscL%^yiZ9lGEf>E1u=Cp|SAc_u~?l+Uq&0buTa6Sdk z&B z8^mW}4YJwv2Ts$?COfv}M)R0zHqp5>nnzZ%>DVY5I{|qaL62zMbn6%&vE@|YEN9BY ziDVs0*Ey)!^k`*KP=PH!ED-*&5?g5>QQ>99LCuOzRNGX;AUYS!!W7$VYR0LhQ7x$^ ze5~8hp#5CGM&*$@3h7+I8RQ5{noT3~$3c=+G%n~7o7MqYrlSoAX^*~{i;`}+IPO@m z*+lbNmWig9Ty9=7H)*-oY5@j za=V3$GJhOg5OUf!3&|a~`MLW+Lg)2!=H!S$A&tr>WH@jiwjWFT%s9+?_J-Q&A$EIy z=$#>c_5q#m3W>JUwe2}nW4uVVudG@WvQEsbeJrFA8LtC=UA{R)+H14cNNLAcg*5pB zmq^+({a*^{YTNJqW=JKPu_tbiEmecaJN2o?uYxE)+VH2Bfi{YwrtyY2qGWomdK-D3$}Kh?SxKSWVUg&jImNP(6llG=442 zA4`z>P>bkdek|0YiCpvJAU(}b0*lP+fc?ztfhFdRz#-(%>ftBXXz;Wj1 zfOX~{oO4 zYdD*=oXrOF9;lx-zXjZ6-Vc1`2i+5RK8Jjs@)*yapt zzc#;vbbes%MUB;$IPw+FQdmfBw!DjG^s-PRJ1x|_F3S;Mu;l~bQkE~XP~$hTb~S6) zK&!+{Hg0M=#f@i7W^B)x4)hn8kEB}hzW6wKm+3x8 zP7`XO(o7_pv4F9fv4PQKE0brEO}5c;L8!?_maztw$@r8)4%ad+WQ?*e6nV*ocCxHx zT*$bd@lD1?N3A@NxXwW~S~HVfOinY}JtreGyC-F7_naZMlw_79vm_ak#+3dn>Cck> zESbcTNi3Pfu@-t#28}FlWO*aYn_1G#l4cE}@T8rdg>qW*epc*f#eOzD$C7g_ImZ&a z7Z=Wp3y0VGno?rDD4S%KC$l^mauksz{aMlrx*o(*?2){ z6k|`n`!tYoOinQhe^T#s6OIceoX@zHaVO&`MiIi={9;r<^gaN~D8~7WYZ-Sko?;Yz z*oLtu<65AG%$S^F6n)up@g=QO-z5n;+T4KfWkRhyQY zmYE(k{mt~2=~L5rli3_*?qu$59%G(pUW(u0>}nZqsj$qoEU~PzykR+NIcxFrDDYTp zTVZ>|_KNM8?Pr^xJ<{IMo^QX&zRbSGe%St%{h~eC5%1{e$aM5_jB(uTnC7_8@wj8F z|5h|o9`XI^}db15Bfgr`cm3ymu9t@m6UcJ=NFe7sN5 zs@Pjqvepj2OA;j#ab`+EC>1|bo+Ubn9-@;dz!&NY@kFl(tPlEV7$oJ!1H;J(-GT># z(~#^bKYC)lWz1RNQ)9j%tR;y(o-i%z-@pOcKLI1UUjhb)UL~v{%kA1oiN~C`46*_T zg_ELBF5$glo{(IrB%?cXD51AXusReUml5hVI_k+?D++|X`+x}GtDHenKBaU2P)h&f zTzZ)L=*Vo~7Xf%;C02*%T8-3`qKMowE+65|eF*jB&bIH3@XOr@H*lg~#`j0~+XzBk zGM6O}GrpTP5c2hmZ?j}HhOF7hX3k&d8ndshs67Z9Lgd6OHlgAJau9}29 zsp7LBBKM9UJi%y>y9ME=YDiKMHw`$ofWo4V@Uv8s{9JH5@Sovxfv4+ZWQAnkE4NQm9#WICn9SusV7MF%Y$r>B5wZ*zFNXs=$#P)290kmf zqk)-n3@}Sp0kiQK9otQ1EwHm359}dt0v5|jz!EtbLv$bzyC3B(z`=4VaEP1+EY;); z9EQr1 zZ1w{c9!lJY@ByF_pUC?W{uHRhXYxUWPXQI4t>Ak{QhY9#gM1-ZAbbXh*G}Za2%iNi z@s(VK@Yg^k&Pn<;qi=u;KN|BG!smfl&C16S{x?vG@8y#S{{U3t0-hR6@gq=)pX7Rk zF9MbLS#Cu57rBvY^Q(Lc843#jmuGyg=`2Z)X8>NSL&K-%xGUI+09DiNS|A?yMw z5vblo*bT%xAPV>WL@*FrbJSZ1hXNIT4`x5Y;XwF}I)HEl5YDIGK{yJiM6`Mr;TRy^ z8Bs?Njsq%@rrsBLdfh=C1Ibn&0z0eYz=`SvaFY52c(eKpI9Z(r)~hdon)-58-tB6z zRw#y=<1uRwGbaK^nv;PQ=2T#%xjk^Kxg&6#nRYDH!Lmq9fMt=m8J0!3JV*uJ3du&)-EU|%hMgMGEQ0{d!l4fbE+r@~T2H%&|xci?*i_lqs!b#YGE zWVO6Q{vkb7hAL7c)dICjZBf4~&2-rmU`{gUnMauKG#@gbH2-WiSu!klSm#?Ctt+et ztp_|l^SJ17)x*OUVav7kwhgw8venoo+h*G8ZFkt8v43j!cZ53@If^`+J@ zeM1FSCz)NacJc6lXCDN9FTHFY@a#(91x?Pw;YYax_>(lx$K)qGFu`D75dRyHM>~zK z|Eme@fsJB+mgzr#MjMT;|9gQy7oM))eX_y@+2 z5Hs#~6GJ z_*0mpKN1hAlVX$l7=I7S9hL{>A^e@PlzOWio41-2xXU_5P8U~0ri%z{Jr^ZYZ%XeZ z`V1`?*n4Qfu-;YKokT{j!cKiMI(O~UDKjTMqp)jwpY&c?eR_52RZx(bUX+=U)h9ix zQ&FE)WpmV9udJD?UQ@5~JARpFA12$L?qd!Ng zf(pt&WGSbzCxgR2>kh6dvu(v0GMihKr9)e9yPG=H@CGn6*`Ko-9&$LQ+HD#7#d?e!LwuRx?lm0%<9&3ukK$Z z2j3DnyV)RHdP8twH*34KZ}@IJcL{oTt}d?aRxqOW?)J{!w(gdmrp}&XTcKE3-qzV# zY;9?8>sVfByS}^7SM2Cfx{KF$^mX(WTKd|Wi#=^Utwj*hQ7E>xw05+0_O|ye?( zFYgIn{t|m2IDQ|S7kuwNHhNv5(A-nJzO%Krr=_pGrL(u#(zCp~x6oYZZ7p_m7nXOl zclLGkw1hZc57h9PFSE27{@dTMU#h_y``Mmw$Ng-z7L?Dj`e6HOtS)@#02@~M$A(5$ z4R0L^y$9HU7QXQiJEMkA9b_xnLtkhA7<~8Z?2d5RVf-Vp;P1b|&V|9l>^&{`);HOW zVg5L~orS@pY=Nrw1^@FHn-~7}G4@S8eDWz4n8D2SjVxXYsnQGg~ysA3ofuR0gZKL^$TCXG<9PgE2*S%Wj z&B)8yIZaoXoz0pVl^MBflxq~tQf-#A9nReWcYwM0N4u`~pz9rC*Fd+a?toL?pU5kC zF$O1vis*SIM=6zx4J7hv*4FY$Fnd({;BuX7hN0nArRzI3J{XICWS*CSZE&Ix?V5;P z?-9p)0(rJA2+wj1Xus-;_{Ct?sP^H~5!Ibz<`rb@#z3gRZeUV`DIa6?3f}Ap2wceU zgs5*DG+Z=FeNhR{nd>id=TPl)s#_=01$R!uQZc#)*UuuZ%WJ42N`PS&#(j-yHfXM& z4;Jp!u3bC7!MzCu3~-LwU{peM;%41!gIfgic$7|}Gw)TJ1B$>VxBLVp%8%FEiLPw7{ItOnu6K-Pf#^6EMK(uI-Z3TzVbpOZXcqzc19JpO zl{<2_A)552z@ajxp-{&)&{Pscxh=#@%vBd*iQR6O+Z8jRD?>%m9Px8M2r49HGQk-* zh-op5S?I)Xal7IY)XVc|C6&jV3AkQq=2Q<2G`Oq5FSmeHX}^m8-iFv0M87o&fi#J; zVi=G-)lQ?z`DzVH&D6KBPDFOSl73%XBtW7uhM6{Vzf|5S z%y0R}@~*?%RQv;9TCwVyIgS#RF|mT=lXODOrLi@Uz~G&9XCZ7-+HVY z3S8eLO#C4Ir^0F1PaCdx5mifR1mZ#cKM6Lm>Yx%_Zvx9#K%-UEc#PVP)#H?3ouxpQ zFWOFd$|=9@l;4I(W8fw&EpJ+cr3*{;cZAB8-xG=Qdz2Vcp}>%DqnNj+#IF_-;*REAZ(M@jL==xV zIQ{|*brgTzp(x>vqo5rpCcp>eQDty+rU6-z@+O3Qx}GQQYAck3c`VWT^G@TAR^_Nr zkp8?_K>`8+grp{=){Lkkwo|S*ZpP${W93NgnS**I<8mI&kK3>oDy)S_OYsA}G)m|M zZ=%`|pqOxxOLb$wqoqZ}0-&V`l4Hziog+Cd6xOsY5*M!E989@D4;WL8#SD5;?sU#A zdDF-SAY++5eKj{NDVJ1}gq37dwgg*cMfs3-s)|)1WM4Ls=G?|wg|ftLah}NOh5>`3 z83fGY>FUZhO~cZz;~xk*O4=(d zJuRyzdBQM)&jRdq_$*RixET5~5oCBIkNzs%OqnSLvzJb%Veg2GLJwl>1`LtKOv6mG z;DV=psI)l2)tnA%X&Hv&;|KD{eK24iWg6PE3JW%vz=M6QDMUJyL(Tx{7Q7HWOvyJdI#EFngy$4@yn~K+*zt}Q9Pi1TZady_$9o(AZt`WIf@sZ; z%_U7lQVQKdHg_x+Jm+h3N(&hEAa5oY10m#MASjmj69Ay{IBbFhT(1I)MW|>^>}$YS z83RRPB}MJBC7c@TrhY1syCHOgFkZ9_x17*VxI7ii@ZG+_@2UqL8>tG z;03@~TD%ibhOT#lrUgASNG-Hh#D!V;q&i~@3DKd~^T51WYtHpnam#&~@;tL!wq%=dA0g))fd9^>w=TfgV!vj3S zTr`3E_b`)CYjGV|89{N>1hP;bDjlltg&20dvjC-Z$J{3q0$nd{NwqaGAC&b%+&%zV z1~Vk5s+!^xGEAyrs<5uD~a%r7(|toUuR z!fCkfwCYpHvK;T*fDek6CPnyy2+xw%B3)=2(jCC3CgH0M!v;+VZ+~o_&<(>zAFrpK zh_L-)nY#WK$G;PyykT+x4ad~gtX%@(-lNeW;cF~_{=?A7?N|se1L8|Dq_{ zDiWo!PKi7qqn#AFW8ES(Rum;^cLIW6k+{|Hy=WL4WpY4dj@>9zH_2$djG`h#vdA#) zAnNL5))pDG2Xk1)E>;S=(%nUYAk?gJ0^#RUfO$_r-)}>|5sSLKwzVGIQwpc;W18rgdk0_JQq>a3e^@b}+d&yju zmxQ2>?UkZ-LGpc?^ns5r=s;kEM7!nHkf7-FWqm!4Qv6qFbwV+SXq+|~EaUjEI{p+@ ztEi&EiYY`Fbh50C9}-NU_}j36g7AuiA(v?XU1>lar!p6Nco-MtlDqsa;rxrFgGdeF zG(hD3gw9!DXUb2b6xKBQ!8(J5MdAWgDX$k54~S3>sIowetFll=i$(i~M3oXcz)=D1 z#4(4kuu-)SXunx&j3RUmTd9Ow#+C>3 zi8H!v%noc*OEVq_F)*KiG#0aBeW>%})bFzcm!{Kc3+fg-7`TP06j~&&8ZH8PN#cf3O=3}09VDbr z5m{n(MdA{e&YpzV78jy+1hK?kL9%Ro934k9voE$jvdASN6(b9Pc50-ASW|Vv1yN#G zR4z5ya&JK#llv11(Xj&cEH^+BirW=xX9|a?nABL#==egcoh|Uq#AKg_De-(xik&vHrlkp&4g|>a41+*4ZWSL0N$7; znW1Ue?^7~Eb}fr@OhcY~)+;z_mZ#U}5EH-hf%g@y{mLVeBJ=p_n@ z6#RgKAESwK1EBI;1m2;UDLX{S#S~EK@=64L0;PSAf>8>-K*0#eAg`4JAw7 z5rlYUKqID?BtpoDNqFl)Ej5Zc!jV43jaUGL?69f$`Re(4%n*MS<@QssmV$NYx15gV zM{Z@Y!YOCIrHGdhL1{ue^^qeu>=t6}dPkTc-pF8x1 zF&KSNTXZ$ZqH};nLXRUsHKIiDPY-G>nR8g|c0sKc3ABf_9(8`O{2{IPrYRFWVfnZP zcxXC7Sl$L4LU=k;c&|i$pn<&wPVgu*V?gt1C}CR)Hs^Tx1GrASql#Xjq@kpUjlsV@ zq^-$Jp^kikL0ZV03W|ZYFebUHmgFcA>=<^?89t4~+qEnzSs=BFIU! z=%f`fmB4!z*eT&OD24^hF*76n2ui`G3p%Wf4Z?5=N`#GMOf#*3!6Oh0?7RETI)R4z z$UsKGWn`In`-X^_K&{2oPtPoY1EQqsl+h0&pmoyXEh1iPc<*7~hciQi{9e^^{Kc>n zNaC1Rhoivh1kt1*MJSW@&qVtu5-Bu=dG=Cbx?y1}?nn}i@l=Y#(cy^eP+YJZZ=WXE zEaJ?m=$#N9DNtkmAO$ z4>|UG%&t3L<))21ll6q zYLnY4Q^D%Pcz-Asadj~ZbteWA={t<{44njcm?;`4q2n~MfYK0dm6PD6QVj198JW^>g#Hk-08-=dgBLiad?FMtae6F4%BHYxvBR z)~Hx`Qh3sOBMFrVk0Jyw2`)EihQja|cxb_sV@2Ug5w51tPf$t!fCzVrTI+)!9npTy znmU3fzoE5U+qwil-?$PVJY4dL;@ahRZQL?+@3y<{SoGpoQK1D^B`1$gI;8%}m10{U78XwqLv-9qep)E`L?%H_o@YdUxth{yG zRtCI5K;OoZjRIIR{J9~XV3!sqG!>@Dgu|%lze8VtsFZ&1@NFZTlADHxZ{IY650%5S zM3wlOMfGpESyNcB{ZZ{}YC5zY)6PGn=EDoedHr>2-F?;#@zd7gE!s{CJ%1h zJT$s#uy5$@J4Uu`AMEK{x%IEN^{!gEcVeRl9u zLz{;-2J>}3i4SjZ>R4QD1m_RTyMi_Hz)kWc!Ou9a1QSWVH1Ig@5BiflAB>uOML6&U z{p0GD8a~ae;QM8YXCq#xDqaTho?vkuzp~^3P3$>{n2Mc1oIrdQF_xP08sa44-ykL$ z@Z*Ude#=a0M2z1kQ@Ri%s=1^HoR-n=;ZM9-gTjxdA|*?{^`a$h3+%tTKhl%&{@hX?Ur+WX3+#@K zK$CcgD|FsSek+rC=T1x6Lz5j{;WNKZ?lGM`pSSRZ!59gV2%o#%+LTbe;KQ4(ud)d* zcy6=RlcLYHlH?!`Q(LT8RlOBoCTR`MZ?#_3nv{;<>222cBHZrn*7Xu3+1-UNnBy1p O6lFzw=)B8+t^PlLDswpi diff --git a/bin/BDInfo/System.Resources.Extensions.dll b/bin/BDInfo/System.Resources.Extensions.dll new file mode 100644 index 0000000000000000000000000000000000000000..939c9f5823eb53615d328e75767ebeebf0f703da GIT binary patch literal 137376 zcmeFac|cRg7e9IvwxD2w8pXOaDk^S>;(}5wQ9^@)q9X1JLJ*WqNW>LUz=AOrt-H2g zZL4*!``Rk*`)=K9snu4bt+-=r&HJ3W3klNx`u)E5{&}ylmzg_r&Y82#nYlA_Z^MQZ za1I>DIpVTfIc_&n`USGTfBz>7)z#kJTaDZ9dbZ(iS?Jk@5%CG>n$)zE(P`01nwaS1 z$Cj8sJYMU{T0YB;qd+B|d3}#FQAQkaz{pOl(SRet{g<-RAb+?7tfLR|EfQ;9m{= ztAT$t@UI5`)xf_R_*Vn}YT#cD{Qp1${PVMnJtlf`T$U)ZSWZulzLk^{Tp*s0%beOH z7s>VKM&hrI>xWzj*N5wk`w;vMMxK7(I6ivAaYRkQrJS$LQYwF+{@_RL(IczvF9 zP>tr&KqrAq^R7YX!J*U8fucc(UnkY z&tQBZC;zWKH;OGk{3a^_ZV%8)!aoDBjc5hy1E7_{jR(D0l%{hUE{3(8jy!*#!NsFC z27fhxHGr>J5aT$oF86nj9DLi41=6*T2PB7d?XNei8jRS8&t|c_pq`T>8EoMfRpM&fi^U*ShAA)=mt_J}e){Oae z-Kga(rh%f{phidMI&;BITh!v(HRYOi>Bw~t=91)d zf}8e?N94cj7wW6=X@qDbl_$zbmUe?`pjDB#LZji%=s)`=)!%r zn&UdguUjqSe73HZb9+}iaKq&@BYh67cH~a2cH%rQt#$^To2y;8yE{3~=iyEn_hhG> zTR6h!_ni)$lYFNm7p|7?bmHP7o5*+6IzDf{ikqFPlP?&`Wp-yaH-~SZ)A7FizKnY=H%)H1vZ-n;*W1a#@VKFBGN*Phg{a)P`tpUrO&c05*to71t0<1sbY!BN*R$?0G!CJH)76Q3Kv zXSiSGt>tqf4UUG3kt(;^oXOFks?VtwI5PTu8+xo<NtsH0)a z5!n_Ou0eu)oV;n&M0u){le#MiEsX4@c3SX4#?6(hIykMm3=a&}IW5=;)g?Iv1)H4I zgD1Ig3sBt}|CGZ}3X4c2@6l;uenAIX4+qy#|_{d%PAm+_^f0fsTh0~neh$|3v zbcsLGWer@-aP3ApUA8|A4|cyg&JLXOD7mjJoR7nET>~67%a4rnPHCfwI?!id;U=1b*;Jyy6e{CVXvTv!(}xXX_v8+L1(YzqHyimZ+! zMCs1c$!h4qRctRRXhq~6=c=47oQzZEDCRkE9{ckTd&S7h`N(q4;_|C&Od7~VIl%7`c@N1s}1^KYFk>f)SC zQD^OVoyoE~Ikrr7a6ViZ)o*omfsRm@)%{d^XE-O*QNMDXa=+dzns}=Ft;Wlzv>x9r zJL>FGwXC8Z!_UeqTM}YJ>N)qXutl6zEtHt}uVr%6`jE*O-0fz3d`| z>Mc65ti+t`vYOsbe%K^d6?kZrx;MK=jM#AP%tGtcX|dM*)82fv>F}6ZzmDCId2!!^ zK@k!03(QYs`IiUW-O_x-zJ&by->x=2@8$58EwVa0`VFdnCeVco+!I)L#O8p>N#}q4 zdf@X`i(lS#$X(huDP#V+!B>oPNMBW)xhn0PeVxJ`qa0G@MdWK2EHjGtmRzpC@D3DU zh5x&`aBkfdzAoX;QBJ9jMel&`0obM`?b?W0pYYdU8iic0nLmuqVH-|e#ai=qAE z(nrNFTG4ZR$5y9%Z@m54xw5Tuy{h`XcfNgG^Nh?-2F=fSXZM1yzm*+#G%j18I<(#L zmu^p8&mTRsF1p8W3a+g2SHmM$ZrD6gy=~lv&bsL(?}v^$Jnp+6YV33W&yU98%@@{t zq&?QW%JLC9!_#$5zWjM-l?i~l+(O;D(W*g@_s%hI+7h>8=1i{eeV?ylC%FGoPgA=7 zi2JE=ug_jz{N`~+)Z&2JH?|GC+NSE+R}puaYSU#S3f!xleKkb1TMyi&vIft*!rDIOM>p?=Phk@7n$c zZeB0U?6szA;rCNlKM!8NIL9mSK6mGL?WWHAKR@lbq7i2ud1CqagHP&nYFU|8*Zk$O zIkGa3tbuppe%{>q*QGz~idh^!$n)7_4fkcIVReK0ef5LslSRX_+jVq|dgpw}pwDaU zYd!q(i-px9tGx;=%nw<8&$n&gv>BMRZ(LEId|Tzpsk&G6F5Wl)U#Y7w#uTIlHm@GP zxO=q^BH9Mmo7R47NQ+%H{lfeoJ=+!@-01u-+b)KVn>zmXB=^kgKYn~(bKS~*)7+&Z=buj4`gBEYr{Z4Y9$x-@$n*6E)7X*j z%i1~yhO?g&|I>v#Mg$&-xbWR~R;9|3{*gX~e*FdYaf%!L?Bj+ipm14(0cT2=92qmv zFVmyXq3tKPKfJnok}P0<;_s%W@5mzxtq0w|4&)pIxj>abqhjp&HLZAFkJ(=*?zq3; z&N)wcSr(_rqP8oEQ$_3ht*mo6{cD8bv!3$OW&~InoT?qSq}~V5-fMAx*21j`H}3rK zG~>eEV`YvhRcFPafhIgFQ;q=&Xr^b|h+k0X4WtS6|EG$?YA5v7~>pS&3uDVm& zWq-+?;om*J@#a|d1(&}n8~%Ok15>}>a<=wI6SwWX>fZ76*iK3PzrK0o(&Q!9mDa*5ud(f_x-{sVy)dHUX-oM~*?TN;Ap`B!()QlKxUa;`?WY?Yk zhGlc_yp!}z-2?IM!W-(hzWDXlu9WZhPyIv6NV+25wBci}Y>JD&=do6$@sSQRqw>}? z&RUnlac`R3QcRqgo0+un`^O0{*3Vq~`lsH}rThMqxoY*)uhu3k32>2le-{zaWmVME zNuAbgNN&+0xw?9sV2=uIR5lMQdQ><6M&F0+4ySK7Y`W9*KR=)Ml&^WXem|L&&7XuZ zr_^gxTLsyf=8-fnjN;=!>dyyv&%WZbZ-lceJ66T&XO!1xZM-{c+iT-D9Wr-r+N@PS zENE0+>IX7~Qx-R~^7&Bg>a}i5hNji=qIn0!p}UpGp}Dd>4vN4*y_}Cs*R}b~t<~-6 zN0+EnK?)BC#gjVjoW{RO>4GiG4XrI)@3QCx(MK*GUwh@j)0*wAzBlAy?{eopI&Dnr zv7~ph_vMrBKRl~a^z7$)@tF-0|M+He_qDa%&$rFVsI%vb&x-f=p7kF^$mDFf2_Uy`(L;C-Ry8ULER$UUgrxWwT z+PS@6c;I&H5yzi=laev)d()h=`;OLr9oB7b#K~UU=l%S8WSbl3CjZo=<4NDrp?{|B zno$4r%l+Stn|iHtX6vOHp3TeF_iX#!qQ&Y(1NVG(Wc3=;efP6J>&ASyx3|0hr`xU$ zS+_mzbhoR&?`~7GZm-*iG7fJJ{@}BV7rk>k^o+5bE32l3YIz;>ds_#U#vj)$GQ^*c z<`qphBL1w%n$3lDdv)4v^|Wt2ze&Efsom;#Cz^)^um9os@DE?5)cbt6<<^$oeqD~e zduQzV4w2vdu

yyu~l+V?!)`CfS5hU7**RTinmSXVKQ&@;}c3Vk~N6}8{{n$SLH z;pPS2*X(_#y?Wv8ed~>r4)5!F)#N*A@a#k54)1$VtHghZ?m%3x&*N`)u>9QScr13TNkh{y0Z#DhTfV8^Fr#E~$bp7?6N!O3P zb9+Pb#Z9wA&#$ZPB=vQh7{g*+8k?*0b41rrM~$c6vnVi`EA%dsr{0-yI%&|V8pD!H z=A7Q#WzN}?2cv0S&L&1OyvlVw6tvdVEe-5=IL9L^SXR4G{(Fl>k;V6BUHsJup>nY1BSedr%q4Vu{Zh08XJEd`+C!N z+do({Vff1do5p>)e%Q??r{4A5w&eb-?tXsROL|^Rz(dwQZNo+{sWf*!GC6CHd%Q;O z=3iw`(EK4w55|2mySV{Dt_y+7XQ|LD?>A01K}hFv<<-o@1Z%J8pRXAIBQ z&Abu!eMj)~VdM<1T)UUdbxR8}cJHu5$1MBI;Z40L$YAQh@j(xHzE#Y*_H(5*0wi;M->5@8UB4+ib-Gu*2Mw{of_TO;d zO(=5yz&FTC-?wUP{O9%GsUsn%_*@x#xfb zEw1i%i`=rrn;-jplQySJdJLrS=J6@W%%Gjj!&gMG6VuJNAH)sHj*1J^+x#$%2n-4g3J(l-%;Hpf z&Jq9RaE{^h7o%Y1a?YJz!8dll$*)vx(+bbTEo@ok>>0PX7P>F%{9~Po{%Esqv3W<< zjrc&vE>YdZ(c2<%KXMHmn8IA@!7&}svO(4VU;C$ zYRs_v9Y5H6<8ki^r!U_ryVUFe*FrU;UaN-gll_C5rd_mTw7=K5W%0?#oAVYO{UWZ@ z@ssDKR*zU&LtWf6X5Hm4dUjY}rw5016fDnr_<4BDMS?{YEjA7CFA#G}Cn3*DNLLlc zXjxu=$Dvm=-v(lStqLxz{eJSdgN8&t?e}fct&w#{uAY;z=*rlE`(K*+mF$an>a+Ea z?(+(Gzp5C|TtA_BcG_6f+2qU>QatV5?Wp5S<)3%DfBF5>;m1waCXG4z+n|*(bEb}) z)O8;wYrhWuZD_ZBy&E*kSm-i6~fi0U8z$8$jQemllQ9-Xyt*GG2wE;v zc{xw~M8{#vEhpO7zsFV&uD+_VPm_1PJrURGQ0x$O{7=^Uel7QxojLS#M)rw^?>*YL zAxNh?`>4pz+B;%r^<`JnC)XOYV?e~tXm$RnT2p>X%-4oYk4{jBVJaM~Kw@$TYU3^-v^ahu*srs^;J3iU9Z*P_7!;`lEv32T{%|{a6$ygqD zdPe_^Plg*lUpeF5YHRNG9rE4M8!tz!{=#cq|K>OL-@msMy82(AlgZz0VscLK$~&9g z%5|@yH6EpcQ=+rmt{#zHGg4|`tu?49Rp^kM7zEjo2&b(iOBeODaIYT@wEZB6?XSF^@AJak{!wZ50#3B#}o~7JbU1es=K1$5zmI$mDMQP&Tf0GK!XuG3o=x7=BmnSaF`vj zNJ{(%wk>@6MU^`GtxIn#D_dQ~v?i$ay1pOHa;)qbgELe;wSJI;o9L6_q?fm1+#ttZ zE2=*)bP47-r^Z<=-u-RFxQ8Vjer>t0<=v_uMP@we>)LwXkGl>wzWIEyDR)-Nl&B`# zW<8%08QHXJMOyQ_&l%{9%}MqwycN&rQI4q&MZ&t;^dAuecQ2fEfoE^+X-s}GDluzWZ*Nxb@A#vcs(XHo5_RQzI&4Wh_-9Gc% ztzSoPJ2SG$jbYoSO^NNfamD9rcOU&CqUs&P#@4M`6@J)Wq5IxB|5Tev_3PIB-1*D+ z;Y~U@zdQY-V_((`=pELrTeY^^8kesh*zi_S4w)jt(Jv1=apnR&WdV2I{HM#Y?+hDX zC8%cYcdx!+`HaiEmOjnno;)6xhj>vzx0`dcN3%}0Q@B#>sSu-c z3@e8}ahu^_`>cq)|C}C1w|iJ)o-rs;{-U5=$;!(ulx+XV(V<4$YEd;}>rC+P9UVC# zdH0aB;aqQ3w;H|c40$(5?>#Q)<8FIf90)9|cfZxMYOm{An>s4oR5d;8X}r8!`L_3e zFEB_S8Xhrd81_)2urIv>S86>zy^8z_t3v7}CG{ z-;DH#kiUTRo{%p^>Nr54kJL-Z`yvez^5IBhg?uW~93fwT^a~-s8R-!re*x(|AzzBr zF;bw9)Jw?wA`KGq;YeeJd@9l$Azy&>3n9N5=@B7+0jb+S;6R@$q~G8&B43E~I8hRBJGXqYvjX`4##yJ`6#4waQ%XO0n$sjIE+U{NK0|GLY`7?FrTMXhbs`} z;Yh0u!TAE@RY<4dT8w-a(lxlYBflAG)=-W+LFGsd!_XG;sYu<1b6ib4WvGxA<7$t5 zDbkn`95);JRHR`efs1@N(qC{9d@0g1QMgC`0@Cks)zg4yq;;bqEAkqoZE#V!FVY4$ z91(;(rO~*)XvC*qB43C+rN833jXb5kI6zT`Jf(wiXrg9g=nCnBG^EIvBCV2+GZVd?C^=C!!vCN=tDWoABxUsW{hx z{0gM;nH={Tc}f@KifW2-q@PX$Jmryog^T3fjP!tzFGQ-$g1mrHA^i~7ZsZG)#!rV2 zAfJkK60X2^t`BbFOXF@-;8Tuh`XTf&fe7Xa9|K^Yj z=~FZ8hCHQ>X2bW8_eJ{50(p_Av@teyqFSIF=`vhK7L1NtfQl(yW6e$|#wlaPOad@9oGxE$M| ze<7{D2|k9r2B~}p$9W^KLh7{>{SkRzq*=J?`SIxv(#(DE5#(trg6;Fm<&>OpbnyD1 zhECp~ue@V_mtz}=nl5ZOz+dW-&HDCU0i@eHQnOg^0ji4?&pKW9Tm|E zZizfkvBin|K|V$CgA?cKpi+3aa&;WCoMp~ja|h>@4qRLOb>ikaIL~#Vzgt|nEe;AT zR>&P4o%>Rvh^D`x&RSg4P|gj;Uj>(dBPr~sVP{)i zxzOqycka}q{W7@PTwIB79sCear1UdH#~RXFX_C@oQqmF=MzzvJ8q(-|O4s&2zVxq^ zriUrfXi775O*WW}Y0-(TG~uREi3u^i3=<<##u}2lj_TAYxpwKw?scW(T4 zW2R(dLXtsjv%kS82U87j(Jx#ZME`AVRGhu-P}h97bKT8VK5RU<^Og@6461i!fzc;< zbz-3Ic+E!nr$24}+0`Yx=H0D(df9Idn_E7co3*K|_4dzR>y|C%Ms=OH_|tN^+MTYz&r-6Z;^Mbf_R|CG0o99EMD<&u6SH?0FH0kA9to>o)wQL za$Gl_kcpk`q?6ixa3Yib#(^}Q;70-a%o`0uvxuVo>SIDgfkBNe~g%P-j_J0 zv+EPMjcB0Xf7d`dj>(g;qAOUOoHq{oHK0y~j@0|VC94Ojc&7esRrCYN|5C5%j6+_~ z2;d@L)?&x0ZtmOtc4|45o84qG1m)`xMz<_}EmR?9S zBi91kmn;D6gNQ`7a7+C-!IErsH zT#{h?do2wxcacqsh&O1&c^5y|)5Ii3r>ARTQw-@E`clo<PFq=WZ_xCC%-oS150 zdPr_&)EFitq#J!?@BJk*+Dk|t{ddy%$jmZofzeGjq$NZrCQL#*_y~{@#TE3Z5k;c9 z^ynl34~j5W3Z5RF08&OxOiHpbIw9E*Yh#CI(0?<{D3g)5U^A${nU5^$FJ-Bq2>QB? zC>T!)9zebktr-s)Vl}BL=?O+W6o{gvGt2tOLjSS_EjZ!5Mw3jk;X6Nuit z6621XiwQ4Ki~dV$5kRn~7nhR86HZ8uHB4as_fMJ#{L3anl6g^6;^NW`Mvb6`WONFq zopd-n6yPJX$o`VVfMjw&sUL|9r)%QUQj#DkluEq=--e2bHl-U#`~;!Zc=i~ETga7! zu7Q>`5or@4^k_q}Aq^#PJdSal<16jmjv+{ zrGjZ&<7;C;|G;4EGinSR6s$84ADPy!$53oEq(PE&i9aS#OkzqpnOYhcTenSVJo7{K*n7 z@9;7biv|p7XcC)BHyFmE1Ng{>F`g>)0nt$6XV*wa83`D1SdSwP3~A_1wjqQs4ME=1qO!T2_0N-~-Q-wt*M~{YjsiVbUkfXj1tHF^l z$|j_f-@(vW^kBXi*1x+c0d5tZmXc~nGfu?v3ZaO^A_%j;pkf3cW17i89+{E`DJZlU z_#l%2PU?tA{g{l@9?vmE5mbpH+c+*s!uKoh9?ZiDA~6M8gTyJx82QOrNE+sR(2Y+u zJk>qgci_FmgygY?v~KQIG@AD?i7+IMN`$MM5)-NCcWw06!jDdR+o_Qz&JfL(k=}-a zK6y-09xN@TO9%+dCeS8DG-55I?S`#eYu;+CQZk63N$6ea<--TMJ$wn~h1U!HMT2Ne zQxKa$W~^bN2Hl0q3=?7usciI1f?L4Un%1pFMkIyu3J9JcijeN}7Juc+@af;L+=m-! z@U|E&xHaNgZKP=$B5!RuwongQvMR^*Bl z>)pJOW>iY-#IB9fjiynJXoQ#aTi$dVYMVE78`09ZeXot+0$7E?uSZ%+dOAj^b{f3F z=HuV4y^mkVj&0kW8PH#!{focelIRtv&o>n5%{OpdBMtAsPWXT`R?(Xe>$8ve4;_Bk zuTY<_E48YpWXL$Z`GnqlIn+}BeJrDNcGX&IP@Ue4iw9;zz4hhR|xDji=sx z)vE48H)i*EylSa8yXwu?(8~JpxXC`E89HL5-U_-`_2$zk&ao@DZC_-T7ziu*57Te3{;Sp6SBD zNNs{J*yzMyjtgqA(e{}$praMIvp6lH_N|(7ZLs?0Qhi3H^&V3Fz)I_1flrd&;?$|q z`X8kF+LhLylCPqmDV4W>K9d7zgDWxsI-2rR6nrN`iW9~ zr%LOirTW^H)`v*-FRA0bC4XC~{#vE=b)@=TmDbCo`bCx27f%rNn^9@~MX7#ZrS*HH z`c7}xQ?&D?U>f9w0O`I!gpKg>wyH-Xo+@_^^YZpPnOmp~LMUF9DC6=5eSn_qJCyZf zK0bsC6qEz+DVxW7`O=?2K87L&Sk;ky9D1LEwZ*L}3lNP!N^d^5myc;*qbaoCgdp-A ztC}+vdP}I6KZ6_uY^5s5%U|h1T%sJj{XrzbwO((I@X{X-^3q_n8zfGL#+(l`AGE5U zfFjS99_)rA{u09w*Bc|njxngb#KwvOig;2?Dv$r>C;oNy=0EffT?;5;n$KI+%|J4@ z(1<{ao9eg;RPGew`4wVE3iIU=jM~$iPg&LGBqDMGQRpBAO0>y533VviL`BT8b~~v# za290NDs;Jpne~Ir7qkcOI%ypa1(`1eD|bcob<_GCvK|TwI5h1?aLtA*?-28KeZXat zE93>?p(75nCOuh`7zkkjHJ-6LV z=Qd4}f9TD8lS^1$Xma*nG->O9I=@Rgzmt$W$b4NFa9yYDa~(P`)$+Tf-AntjxrJKu zA!X1ZunzCj=fg+!xtEM^C*-mxDe$2Jze^=GrQ7j?SyrEYNLA80NV&6$a_11)!5p8*bQR|7Sn>d_MpG; zpqQ%w^j(2pU+SDz^%_PWQw()28i>r*ndd%X1mMCqMneJR{S(t8J;^hJb^CMZ)GOj- z943FzTFyKwWZYv~wi}36wOrypi*$xLoG??-+Hyae!T!eD1oy-3L49nwj|M2OZN~jW z+`=T_K8l7Bf&2C%_jMWJ`$T~3ww*zvqnu-wb7ElUtmuiYBP&?8;nX#K;h9W2?G{#O6U74FonVOin1- z2n4J8n8>EVp3Q{}3~YvpY*xl!2IEheL+1zdxmF`4i-?zFNdNHdrtEGu?n2FlurMS* zxVeHB(fI{VS>O?c8;1{2R4SYv752i#fi2z!_tV`kikR$l_My7DMzm%8EcQ8sL5Pu6gzR(LJ~sZbU* zz2O9*=CTm;`4D%vn39bMg8tn8rbo} zjM1r({#>9xMf&VQUsH)bf7RwUcu2>?IInIa?ai6fRaSKllcdb5ZpqYJ8bX2eD*VE# zUd#I0$CBh$_50w~s;&c>%_;!G|64~f|A!yi6nctsB|MzRpay#3hd z)tRvX-RN2Oy#Tv{@*g7^Wu~-FwiS2z3LYvBUn#M@lkx|-mLrbUGq*!b!y zd^H&1yYgQCKFC}MN8*c)^F=&-ciik{BRkebXl$_5cL7m0R)7Y%sS$Cqmj)Fb#R>;_ z={fT?6glv;^f3tGAMpxc`D~Vdp!2(DRUe3kzVdVF@SLUQfrED)FwwQDLs;MXh8?L> zpCNPP=a#~pR&@psU&d^E0?G_E3pKwotL3tq{9FxrhB}&ue!xSc-|#T(K0ntJARw$_ zpcE%?bDg!~!Q&N)tt=o=w8YnD^Hf-Eey)P3kSdfppR@TyFOA+ZU@_$PgZ%t+LP;DJ zSr(%>FVyT*69EqMASPlD1=>78%~-6$RqX;EFzd|CWwz#3kMW1q zrK1kBJTNhiXF}}e>xS~OTNd~-ZV6-r4^mL#g(mo7^*lyqHaWA1x=2J7VUih46ky;h zN6K*{S@{E$vq1*~$Q72SwoVOU?ejrm2Y-Wpi<(rBXD**z$5e>0ytSp2TTMN7?o=5sAiWxsMr zzhl0rWsf?1b2ItotjB@^eu8$1$h3ogFSOay6kKLKJc^hj7739$TLjg1&QMIdM?h00 zpalZZh;kqkfcQp=2aC=n`0I74K=8KXd;t%B2sbv~oS*wOS(C-|fjoKh0xGgZ$Q^l% z2O>QR;(j~~JS($1k2M%2_My9vh)~KySowQ|Zul-=5NK^|CNf+z5E!JqllmX122*~H%2mbzD>gJ`ADC8%T$G}(a?4P7HKefIL-8l zvD}@;Ijg!cY^M)6ZgSrZebEz!0P4PuRXa-6IHY4+-HTP&cL`2%I~wf zlTsZmxfKuP>*}()jZz&>>~Y2K^L3+H-CU^-$I!Uq#(Z53R%epxaNLS3cH!$>SlwW$ z4(FA*;u5mI+1-!Tb(QKcqvMLt^L6jCx`tAnE7fh|>#DLkC#kL~)h*)dj>9=IFMvz) zvb#~;WWKIHt2-;zxlXcO1jISHb>LyBcDypl> z*Ij0O4wvdQRQCvfAbT@br#SM_DQSV=NgQi%RG=8a8~=Gc7uB z-3WNxZn72&)fmwF@fB52vDX9TY+${PW+yOqPr*g`oieMsHzSHDR81e?J{Z+T72v{= zVT0(G05u0IN&@)h3AA>0fHY&wwpvZ^@*~=RP>NP)CQ6TYfLp+qSUw08Js0x|3=J%F z_>yq+M9Du3PG?LYbZ}^nf7+$>4)(OGr&tt3dw^K>)thgIm`k%?HB^4s6)tF|$M2lh zV9_y8Z&BZ)(bM6OwTN{mFLDr?Hqa240#z{W6mNv0^yaFNwlR7-llC2C*xJh;@T}a~ z9~vf{!~I2^i4u+#L5|_PV9^fSez}CROT@V*;pils!y?XJ5vQ+RJ24WDhlKM|#Q9Cc zsSg~za=+M6M+xaR`YqCi1Ywr>y-yYq_z^$mlD!Oz$$aPu!)EJ3K8)uAQm@a zg`_Hfk#ZXmU=9GxBHiMCqNe&tbcc&55hBV^Ta^70N<9hXCftK5_agHXDA$|#mztR; z;XGi`g*ba7;yf2|ojmQRIlzX^r>GpZ0ifx4GRWi6Ut>CH+owG+IU;xN;(sNus`yA|!$h(OZ_7mUawhh(=t=0yMM#nI6{%`2s@Oz| z=OGQZ6wCx=R232T(EsBB{4ygvz`JF*1qPO5Kz=Hqc)^3rCuqX}Yn{*uRW0o!R#m_W zseofTlQmhz8Qpm{YVd5(OhQjv^q70CU`E1f)ifB$CV*bxy$a9Yz_F?)ieQ86!Pbgk ziy7Dx5p1PB*b@yTtk^acY6E)K=KMqkp@n=o**w>(%xQp(RXqf3=*@c1P)iVPP10IK22?7JcF7p9+923l z)sIoC?+hRl_Uq2lIAlJCa|sZWl;}%J)Y6U^+cs=Zoq%=-?l3<%L5oI~`ujtf?b(9# zWe~Bd->ZO*=XR(IYjfJ;!SqTAEy*En;E8hxn%5o+t_f!O#*c^5e7GtI9MgHrY*@g$ zIYXRsI1DYZMb+G|DCIW3_K<~340U6nD zu9okRI8VcHrEz#1LjubafFv1S$Vm3n*sXqI7os06?WgH;e^utpfj+Ei84M;U2*FHH z5SBJOgRGz+TI>%sA7u)92zHM3U<%R*Hp2cgV0i^K#`A=@di#jE0%X4$MheQ3fI8X$ zof3hzFrc9lP#qhfRU*)A0Fj8c@;%m@e-AM~2*GkAL>mOrj1$m$qIH-_=}1xrSCH}mqlT%Ox+|Ll@owNM@>U9Ow5pd%K;bq( z_n7UViqin9l>h6rIt~6)Sno1}M`oihDZ^%t(|a*zr%r&EX3>%!suD!PR74O7qgEE0 zpzZ%uZQcm%s`6s-#%qmW@s?N*0M8pQj?Kb&vD%3o*I{BEmVhSO0J)1m&%m5j{iy^r z%m(NIGbv<0&w$#pZiBEG&!8oL-owD)&uaiCxUrNB;2+4?3A1pVFhjJ5*)hMwx>pft zJYQFomFdkHYnj7UyuRA@6rv7JlogrvD(gwCXedAQq;O?XXE(-_sOGFti_>llT_Fw^ ztalN$ut$1`*{_Tdt8N~MU_sONdm$1rbOs7)W5kl;8bp!dCNo@;2mm)k0C5OA%uGpD z=TIdKVRZWyZiOL?+L|CCvZgQUS*hK1 zxsz!gI;7@gB6LX@)|sF*-WNV&;|=c%4Oj!PpgXFq>f_Anu;6M2P4coC`AbF?7W@e$ zF$C#~_~zcFay;qO)|&?uWlgHd22EPA^7(*?M)BSsPu9Z^3ugAVJ@Xe{PUtE;Lm(7Z zlr^!O(EVVE5ROrR5brMH;AK@G0Y@Yk?Jd*60E|M-(HHQX#wv)JC7{7ZDK_Ll-Bxu+ zR9e*q3?x-!Rac|-`2~FQvAy*8XjR{3x^W+hXlzy2XP|-u=*tOSz}Uvm6JkVby3b`8 zCAjD324JC9Wf*NGjCUEv9$SoiI*IA>Hmtl_A>*HjO;+`XwisJ0!#FEp9A+2;Y%#Jb z!&oL^ykZ!QZ83URhLIp)e8eyw<6e@yUS$}aB#b16aljViVNfNKJ4qPX48vlJvAZ&i z%e?16SA!YGP+N?dm0_%uFxoJT=C&9Sm0_ev7~i7jV7`ERN%BUOVRV%+dNGWnwiu;Y z>JS{Y+~!p!jDv`yR`tiW82c;3xB->38M^!T3?teWV|Ha2UrHGD8CPv=F@{uzVG=Mb zbSMCGiu<_dXLpz(OTir(q2uf61aTDN`*nSOm~RC=Ycr-BhQq0vJ9y=P5qJjqQ3%?VwH9`%>}i4XewY;f55tVG4o(VwLC^aXa^Gl ztNIPAVof2$qN(~!4s#eL7V8k)_*uVA0MjFY`O~3g3|L7~dI0LpAqw; z02I!Ha{er=8Hp&=bdduphs3#(E$bp3d!A)we|NnFN7s z9`>@C)*-ZDRf1m=;MWwbYKmuX9lHDtOfbRf=*>OC^(}ko^)cu40SBkJVpyUFR+6A0 zV;rW^Gf__Dr^7KYojl089m-1~fsJIHMajybJ8aO&Yz2h<8j|V7Q@7>%&ZdWEAFFyE zuO~c~Vg7|JPkJmvYllJXcyLW@*kBWfnB&!3PgJkjo{6tTuT3W=%6}EMBPP({h0RbS zIY#aT5NA3!WGR~)3UIB!wHb3r)UHaUrdjl1U6CFN_d^BdG97KHO^(-E&nxrjdm$fK zT6NQ2{1f}nu$Ctb;~5l?0CTOyW0cnXZIC$@2J4FQK_M|X!7mt0w+6c@YeaK-L5WJ0 zZjI7|afK~FVh+!-kPG{FK>^5bz)^obt7nw0yOK;jH2b;C9*yb6~6I?ogOY=!EIg2sbe~>-~3$!(7 zF|X1srY#?l$!TB)OfIg-Y9|Q$FG8Xt<${$dO?fpVbXYu@w8m;+_Ho&U< z=r*(i^;2Yc_MXs_@xT;n9;*i|#c>mbp_mIH4&U-ygTuSA#UEjGrN>^YIsq`)o^`Vx z2hq&0ib>}J)vF>^yQq;Tmh#)O7rTlvx$>`KGT+`dZZ+-={8x;LQ8+iTn)o z0`L{a+`1I+$?yQ;n0FLjjy&Q%7(J6UwU9G0-w!ZV|m}BobB^D-;J0Vo(BlJFqdJ)Xto=B za>7u9h;D+Fek<491E|-t(o3I=Ht4Sn639x=xDA$Yl1!^+jxj+AJnoG zood>3z-bRYcsI!5xb`9@3j~Vs4(|8z`diK5xu3-?MA0E|2b(Aw!`eoS*$U=y&P@dK zvj=-m1T!r$^rVxk&<{J6*bPnC7u>v^4N*+X<5j|vCa2 z0Usro!QogWdI&}0{HWNY#s&}>wI_75X>HAeuU1~igmuDq<5Y9EDpxO z)LS|U+Nb&NkC+$)wS1t<{dL+E!8(1#dRWd63(~VQ0VCA?+0o8&!J;8~%h41_Ij0Z6 zv!iHi1rC7=jaHW!xS${YU zt!CEB&Pf#v1G8*yr!31o?JH`C%ME*#03ygR}*)80177xHiJfL1?{(}*3 z9@q@EKxCd|W1eKz)FO^v>;c<#n8(uFihr|~#NY*$KLBZQ9J76wl+<3SzIk1nZ<3PH ze;H?zg6FljQE&`)BXlagLwGp6Lt*0`ez#Z!Rtr(_vxq_?m`;^FAyKBxRU>|6zp+S*HW1YZTvz7Odm%P7CyO*$-~5AWimwAC`%U1~x_D0k zTFg&{%1@yE5B*pK1Y}7AOJpGmt?H+|Sx}pgT45ptv&8T&&*tb~iLv1gF;RXppbhnD zGR04D5bRel6tIku1rjWgD7Pu6e1XM`5hB5j_5|-RUjV@cAej9|rp&31X)gMBZfm5) z_3%e@0I=@Kn*T2x;AfEk3kMj$9$45w$zGDvJwSIum$Pw!2UzcB{DrBqCnm~_9`Md0 z@^+g4K`SLy>1Hf{lQs1Ldk>nrYAVB`XB)%`ta-Ag>>@+BEx1{>L>n8&B+M2nWiyaL66EJWn*URvL_G@QsT4+-p1uuEvZJ!19%`T} zVjuRljk!FDOp(M{z}b_y1y5tzX+$KXpEz+c-@xh&|46aYSLz?MIG}gvPo2GPJKJc= zCcRu+0T72_(6RZGWHdz3Nis6ZG!{u<6n8arU{zlO4LT7;D*zNHz7$(3_GZT~+#?y- zW&zBLmzT-G;NKVF^FKgOKVJL|NjT6y?#@6##of(#2d1HZgST}t{w-C^b zJlak89L|5hG$dM6oO>e*lJ?h&Xit?My#5h5$Z9gBHj}b&ymNg!tiDb zc%AtdA~k}oTZ>q}M|g<^itARjRv^_;c(+m@b+@%B zFHg$bs!m~~JQ$y}*8)-{<3OhwBAHeF3Th=4MC_qmOPm8>aHW1&?_nM_T_k8epC2?w zWj&s4b)52}u-N5G+#*(@*K~&3@mvS4XuZo<=@F#CWrbv5t829Qyp5cvDRzG5jqOV?`2MiG=hM_OGxUOncjy9x&DT zw32j&{X1G3lw zrHDXzJWMi!IcW#ezXD8829sn5^S%fpHr9jzEw%${R1t_{KrQWnoJ1gj>)VJgR`ozT znBo?K_yPhh-V#Y;Nq-TNEcYi170jr0%gZs_lkl*jrX`;h#5hxq@UXe$ixsr>34F#(un5A>Q$Lb;W2zN65K20><@`Z3?`RRj8hs0{ex&3ho0z$0inE4 z9T!gfGUk5p76j+lu%_AY-;Vfa_>Lk|m|SCAVP>FN7n?tiW_|v6aVFv)+S*(WOC6o~ zT_qK@EM+SUkCn3r2o0h7m{d*FIo`e^ilTcV@3464E+lceVILa5W9ttjTr#+!;965p~HBS8M;A4A10trMr6Y5+lk#m zsCxhtna9)SuS{4l{~kkcDx%l7M?c1tht|m+tRL~MZ(`_Z{ZIIURozBpGSeRHLlNvM zz@*U5GSc)`{9=+-Jq~*RC*#o)K8Rrc%Ua{Til4DJU=7mzml~AXcxwtBDUvt~IQt>( z7E=XCG_vE_Zhw+>gPTpnZUEmf0XV={nM+Y#=)-%<>xGgvQc0QleARQx*=9iFsV=jH z0MJJSfcx=Fzh|4;E&Z1G3Xu3nzxZqlm*7{RLSO}2A{xecfNcwyl#JQdBD}dQ+bTD@ zW?S*39S6Yt$%z8VDk4aJkNE7#@tkt@;&Ner^^%08n6-*(D0I{(6IgoT5#xeYt!RcD zDt(tP1+yr90ulW1%$kYSld&TI29X{g;C6~_2TaAz%so)2xsOG~`sqo8C+qQmH&YG> zoO;_MPZW{y+Ly7h-wSiu%kQ&~E3le|%^oJbi-=%77Vu`;Lw-M|mp%r&H0pHn0Jc33 zUW19(7bJbANpGI)Dd0b9EJz=a0rqsn-Z^@;5aiP1r(7)wF{j5L{7J6N<4UYCg$A+iR`&mM*` z59OMGabWXP+yt5Lbk7f3A~1>*Neo9L;^6#pYD?_p zW9O81Py>4CvCc>c!jAZ1Q3clWD6O$Ui$dGj&L9=MLi2B{;Hk(|aU`Z?Sl*OJ(!yaHk5(g7r~z4)`pr+1Z&U0x`|*r?7{ZKFIc1Q3@n3z zonW)@&sh*ci2U6irI$$gCIrLzD^?3I9=`N=5B9&Y#=0dc@+aWS7ljDetc}4UMZG=6 zB_b1fjN&{I%+(&Ox(F7{z{ZMTcX4YYc?r`O^w@=g1&Lt$?7@zRU^N&R>+dw?&bJ5q zL#UEKBDZI*dj6!$asB61UraZ z8wH(WYK4O85R(`WD3N}`*y2y?H{mwRV*DwWnOL|KmJX@^T+@iRw1!rG7-pmm7{7!W z?i(;fksf7!s*j1-ssid45-P2y^2?@+-$I+vGfw8^(BQ~fR93-O_&}8cJMWmGjDyL1LO4s7BWh+CgaoR zIH*F0j&i9;2(k1^EBQCy#(J?0>xt|#9P4d|aL*RCyO+_ChJHJ-NSiVwEVwpU)Nx3N>utE44q6*t+c7FmTTh;9$kUrp;$&Kw9?PrQA6T5VhUX00 znO-i|G3L-)s}ez)290Ke;R$@Q1YnlB%wPa2$rGsdtB84#j7XSKwwOaD%;H{9r-0mA zL=M8S7}_nR|ILU{mk$iF^IO$^JXK={ex~CFWw1ks2#$K_5FClmEn(~faL@{yx&H_` z@Fgy2K+7Jewuw}Mfq6ZtpDo2eK0K#c)yn`MYJRzgj{{R#{R12&!JFn-D@sDM@I(09h=)&&t>Sb ztQMLX%A;$6&m?yco4*S6f}sqtK^Y;UgxRCa5m6Q}lxcjeRXv`;p9snq z9`hQAT>c3-8zG%VE(f4o41Fx6^MvB*aI8#z!$J?fHOk(&X$Ca*{Y2(X=o0h^#W5g} zH;BGTNS{zFkDs(3(3KzmLjx4FilGq0ZeY#}V^v{fdI}c(F5%;MfXS~JKp9n_^cg}O z^%6c<^GD_oy_S%I#Pf5w&$qo7=L=3W&&Gju8MTa;7cEZfNEhN{0Q;vznG85eQVlkD z=#<13W^#CdKYZ3sIk=(kAEIB0CuG&Uv2Y#oa6V~*?Rh?39)Fmx(t~{LH4z=&{}NtXvmj7~n!Emj z{@mM~*iF&f?A0xdUWQtNim=>!%A5j%$TzTaWt*{&wwZl?WEe3&cUj-saeDDu`=phVSgbGeXTpI5z`FKx9Nn!`D-+>g%8w zX8uXLpJFO8lOL8vhwautDQmDE1T-9kQs#Wfre25YFl2hq4sq2=@7aaWiKr*Fp(o)) zCkvL35xJnXV0QBhA33qUgSPRIr#1Jk8D>7hj`R*+oFBFVZ}uXz4Mt9=>Xg94vyZA#sZjWYZ@ zH(Q}kHuYG7#xSmn0eUHerg2K>$Hfc~{WukX|KK1WT!s!nz-4|0rZ_Pz|7b{suI6ns z-QCC^1%%K<0Ya~;lmhy$uzx`(Rf4>{=|!!_x*WU##7}9vF=5K+Y(Du`Np<$JC|}o( zucOyKvtOw)>C>PNn02zTYLgldiz6^H(GE0Qw-mz!zVW9pHP@SEN)@()+{1q3xHx6W zL1g=}?4yHmO8?rz`M(mmUtxv#CCx{t|3VL@0^j1nZ2r5ryTfV5fSfxah&7SME)Y};*<-vK~WKf{=etUy}Nglw4k5o_kW(iojd2unKNh3oH=u5?!DW+6F%S? zkCSx(x*rg`n6I+ANT#pyWLlo2uLjF`UYN{DoA7T^L9%u;nJ-1H~B-T&QM%;+Aq;}FK@e%<1Kdx#eZsrANih~8ceFq=#hAxptOt(7c#F&agdQy1 zBgcLfW1azqm=aw|3@RE$49P!Xia`($3f;kh#3A@8D3v+c2Wc#XqCx5;S%`vEbU*Hv zEqul%*{#51pI}NxR4K`*XcWm_;2N*xTlOSnz%&7#?1}EMr&n>a+tbAa(=4)!nn1fG z>X{@eQIHL6vE;m$XC-NH(F7cpUdqatJ7J3{;oJ%9M-lH@a5-8qCncV2clQLsFWhqv z-(iOb0OI#R{kx=XiJS}n1)W~_2gcAa1M_>r937~l0Obv;en}E`OP@&5%)@(8#rWlq z@5pLn(nc){w@~5LC`=ci`Uq=>GlICg0@B*IWY9}SlfejU=(@ClM_4b2D%%ayZOL8J zGvJc8ztb$L2BwlvDzK^e22t_3z~XFPNa?&Efpcl|OU-|Hd*b~NLEz`C$Pvpg%&`|n z=;M~ed)|7G3HH$ML}Xtoi)1jGv%Gv)Ymiw1NIp4!i zn7t6n+5Z><&zI(2_+rj({6pEMPzPmAUXrPeyDa!!>e>ua5N(Z4zyK2>$OvrJ)}UKt zZ$v?6F@pdqC4fx`$7Q_1c#|j*@DA%ER-IuO#LqdTD{P_)PT&Au1_xKOLa~?H;itu8 zVu~osM$~T+odqImp)~LOJZI<~GRgE>xA6W0a)IL&?sAmT3U|P~)7qb2;i#01aLdtx z*=Ps*BQ&yb&oXPObmfsU%&7uRm{SGC?Gx~XlNBN-9KK%$1Jac>K^L<2i2Dq4ad`&Z zXqFZeav||pWlhmM6us@G2DsWP9Pv$9wI?}YP4m&_Kj5)TDPrw?S!duoj$8!WKY8>~ zr}mHL4~{-D^N%~;KpZjjIO%}QEF&V2`zyT1%)Nr4?}4Gw`n+#RC69h7VDkl*AZtH+ z(?#^%&_XN`OAs?1K3ebr?8XeQ9^11#cOiZueb4OmN1vK`IO!hx5Umal_@4|Fiyww^ z2|!E&NICk0OP1&I6tD{<#l?O*hW1` z6KofKI)$ASem@y1;yvIJvlHeS#|11nBFE{F6z6Xi*`!ReRTfzelU-qviA=I4i|j4m z2y;t+Z+Em{wOI}CBXE5C9(x1pQ!mp#SJDS0v`L4jyM4kS^h0Kfoe=#?1(Ace@ZNLe zfc#u?Pn)TdoZY`7O>Qq1IlKRb933CAW^#68vSN48l8=20%THf($sZm%Jr^6ByJ%zc z0yZ`n>Zf?j>9fdI96{p~2|j@Ok=NXQTzGI%&b)!A26w(7!{EE{9w=%ozVL4YwR_s~ zcYSzt0Kd9`^SQQMI=-EQ=CG-HJBM(DXq5<@P2CU9_&w&M1v;y4C14E3(`ZP34oAC@ z2MOYH!8P<|I9>t8_xG@59?$ckY1?tfw^_4bu@;r^I|>%rS(#)PTV$s(*`pR&VCbWEdPe8wsDwbu{`3oSfUQBv$&={F0M9Yu#s7vO zzydo7Uj8>c8_3J^XG=*Qlc)IiDDfT%Lm$ZfPsVwE`ukY=Nrxw$_>}e>j1BLX;W!6; za#7`vU%!v3AdViyFLlj4eEzZv5ALYOFInQ>sF>T5725T|IRlQPkG}0i?lZ_ey=C&z zgEJ3b@XFhhX8s`2csxR#314hJh!1=AAB2+lpFeop#VSCHq5;?GxK`pi4_7Jvxl(!s z@W-@u!b9ah;x`4G+(6)u@SA{p4;jL3w&ABEVoBm6+`~3}5BOXP9*m>kAjBX2q@cxj zINstr9B=Wp<4K;`xV{VB{T5dv+Oq!cLFT`3J%#H7Tw~BLDt;GqZy?i%e#CLnpBLBP zfU~ZnCLg*@!&L#gZMg2W(NlfvI?2S7JV9K=XrmcdCocLUd^xV$06&iFFE)H7@XK&* z#6^o9{Skd7F2d0{&i8OxfAnL7#ki_)b>o_Ye$h&0;R*K*TrI%kmcsu})6X;Qu6p~rjG?NeEZw0M&-AxSe zNBSXKy&2aPp#7mqf3$iI=GdV_*1h@Q2JPsy|8~G3Rlk5#!jCX~Si+8@)9z&WE~Fa( z9bo8Yq=yc*?s;s3a&+20MqS3JLnY_;LV%h>r7*U!Xdg`*B)49tsx|rEAZ%M#W8+{{ z-Xq$?BSNn+2T1sYe~i#*eg#ms$gHad(J9@l>cR1-Q||;Hy+*4!bhBhSCJx^H#rw#7 zs|(eetBbUogA+hmqiVrwy2CquPtuk%?U)SO>S9NV-VvNAEBr{(S22Cb2>R+V&U77T za1vL#Q>&XOs}-qSj}wh$3Bu@EpcD9B^YzhW*C4xH7U+e9e_%A(C2e(ah8(qAf!^AJ zvd3k}o~ExJOm@~X^6wA_4D8%zrdE?Y0H1tmj8z>KUqoF-E7N0ZOaGLG|eVk{s~*Y zKugnf*z(`C<^O5Rw>r}FL$>^Fw*0Sb`3h&6K4!~*-&Z4=hPjU<_J z0|4gkNw^WuK%+x|k-*Yv6nYDNu8Q%^z93c$JptG1-KNH7;+CbmD=z48RaA6zuJO9c zSNYtPgQ(O|e^>_->OW>F_h0kY=BYZDuK8(f7V7Hd-Cl{9`lhN&9O*?7e$+vC4K29@ z8Lu9kie|xYtR{07UmTGdmGp0ELB85Ny|hzbrMhC!syFO)6|M5Q)Yz#2ro4g5G+t%k z3k4XOqggGiF}XXq5ufe~o(9^QahgMBpaaU!CM4@4vwqRY`bfFfYQfmf zH341KtBY27xp#)X+GjY6R|TB9d$Q)L`$c}h;i^+zHIlhwH3Y7KA<f&eQq>me2?RB9sz3v6M63A-BXH#2a)nJI! z)R4fq>VunLTQy|gbI~UK$zrYE7e5CRB{2uI+f=Q%xG>Rz?9P2;_2*Nq1ZsG9?j<;g zh1{E%Pw=IZpDLD0b~b*!DSlcEzRp|k3u40QiG|3ciBHG+(4R>({8KNMXxm8wu&N?f zt;}jW38Cf3P6woNx#ozS0YKV@LtxnW?-N-$>WUl#@aIZhj;cA5lq(EE4|kKXh zy_zrWBVSq==DV$aDeU(vaNJ(ZnNejVX2E43NGwO~g=&cZF*qXwY>J+=w}^5~VBkF5qqcgN1gEy{e{g&PWE*q(VOy7kz(xLbvv zA{|n)3plNeUKns0j*8&fEMmZIxPu^& z{R(&zVdCmFyaAu#O?NnuB}bs{ba{_;5?M~|xV)TT7iDv$PV9dA@O3O2XQG)Uer{T)W3vK|wYru_$Zjf(+zUz>v5A-K@Vs8)mjgJdqA*TGgD;7f;H|C0AiS-5wYmL0Z zAbguQP^240EEo`RAnM;l{lbq6vr_fZNu46MT3wm}4|gmH+Gu%4Alt|;Y?@RUD6TEh z;}^nP8O38h6QtEHHh{z^hBk_g;;}~d>M2JIXJAZWprp3I7-N*|50vUgsT}+gP?Z=Z z#+XZ^Q+{LUfwICtIjG8v^8JCax-nK#m4m9>C^yQ8N=jpRSSTY)%4TZTkk+^=8t@zb zl2rk}Zulh&Sx~s4snSTBuwmM;SuGkPBL?U5=;kfl5ZlZdY3MQR)x2nr3XfzNzB-rT zlZGNC!u;KfF{%+ESQy1OwFnv}<9;^oe%$Gg@B@Hv!My~49X|^&?Ul|RhIfO0CvX>H z{$q6bk7AC$0`DV#P>=9IjctWoUj6VAOePM)gIM>yb!t(Ket69=+*sf`hvABW!%WGj zI|exTi40r`aOhwLt`s=Trwm*faF|zioL1ol5_8LrRLg zXR1lUZ2vBd!RLza!_+8=ZKtuQb5$6z9mx3(W0uM~uHa4}ZXr0^QxhDAIh{Bkm~EOX zb^-2L?t*5&#uGUUs6n9I6|+}a>-^_n7qAIKorjuyV&cR-4_5`QI$Q|41mYIahHDKj zxEGEHKacxdev0cYSk-@e{TB5#)SvMiBn}zTvX7~8Xt$8T#fBh10c2^h z&zS(37{}mLLbes#WtNap#dezj8BVIs8UotQ5;C9Ig(g5o7rV#=$cAF*DmO~D6T8?1 z$Yx@fm;l*I>Z(5+!o`^EL6k9;AqU()fqHgm?NRKG3N1SswanWQLQ7 zcHvs4)ed52JNB^F-_lNKm(6gv0Y7@C?p|>T2}hl_2rC>WyFxU_Iy{gW1_ni=G9Y;{SpTLxqs7&lv&7+djO?-8ztfqdALHT}z#!NgK8C5UTueoDax92#wAWmAYYsjVX|CczS(avEk91-(b=;&aHt z79zh*QVi4*g>L9|Ir{eRQ}11dF4ucl6CEbg4*Z4p`G{H2dQ3m)kOQ3^Mk1Yi>>BVJ zq#>=y4Z^96|@}v?b{^^z6EKaTBvUVDj%wazE z+G$W0lmE3bPlK|UJhH7g4a#D2zP92tD2vI3+ltemEKY?1Qt~t?i&FuCEKY;6cta-h zG$@P7;oI6ygR+>G09$byl*Q9Ci_@Sirp3WlI}LL2Ao;ACQ*i1s&=gx(C0AjO@0d=fyDhy(Xbbap{GB94i zP9~xF{86Q?tzLr~q$dnekc4h!p%YjhK{QsCnNpm=oB^MXZW;2}5{H!Pc67>7v^uLG zyD;FWJ=Nu2Rn(QOdV*+7r+7qfb`}Qo+HB+|I}4E0ipV3@JBvUbKm;;b_uxRT*5LsY zm%Hf^!%>f;M3<+b*aIw92A`|2DN99$dFl=CxIGOOo~&AjyJ%IG+Y(@MmI~M;c|#4j z3bE~X<++1Un%5QZ)b6kig4B#^c!FVwxnY%g9D(yicpJU$43qS0s7mJqg;UB5FI2=c zfCiBl7pdgcQ;LFxsAPDD>zB?Kj*KFGFX7+4k5f+v}U}>2yT?@_x zL@q<;7%ofCt~F=7TlcK?#J+(#6okS!@>BIZhGTWMr?BzoZoT*%EeL~R7Iel$&s;%h zK%1=HER{^dljq=}uxSxvcRj>SXpavgBUPry8yet9ZkYxyQyr7S!%I9R}n${Syqxak@=%=+nCm zXBpmCFs{PI0|P5|)=ra*b(i5>jlhG)#%K8I3xfs2I**75ES!dKHBH09#wY(rWIEzA zaKP`8_Di!39>*2C1NR!ki*t)IifQDT1fH;oU*PfqroU47O6WsC(3EX>RbOG23p*l= zFKje5!7yYQrV;Ew?`%4S=}js198;r(JS9ED6{>DW`;MY!AN&&*XFcFJQlxtFoOJ$0 zfY)7zhbBBV4F|7rs>l{!_#(fy!1DhM@?*9eDqNqA4c;)ZrAyFK4kaG}EBEODx|eM= zzr<6}?8XC*S73YsqQud9I(`e@3EH}{U?uff7ILg2J-77ClIgcWm{q$94_5F@rQtNg zSvS*gR?MDk~S{dKr7PSgGaL^E^~I1MqF4ZQyRhi zz~ZXn3EvyPOY3AhdQmh&C#>lPpQ>SSNxii$_ybaUwwpKb$riDb(7i`Wd_kD6 z?n>MZwQb8{z%2oOw*ZCo=k<#0IcS&jMIyTed5Qz*U(zcferYovZVxtV@ahH4YJLsg zMxf#Mxoifo4Q`Xsi-`Y%e_QGWgtmK=9whlBN8eDNp$pv?jsT^14$}E z&*XAHkx&^(Hjd8a`$R%zAb|Sx zT({$T7}tN}`W3EsanT`k43p$Py(&@vWn42hfC;8f+6!8FwCLU2$tskEY;avRM0z%u z)zh|bBGQ&_BGN`{B534zX|bA3+pI~Eo(d-7KB~edYa-GHY9i9+Xd)h=D$*QeUx=yC z75p#4vpt%GgvV33wEdZQ5|iS#!pTKJ3>hAlvCz`x--JSJOR#=9>Q6QtBUaL;W7i?; z#ZkyXaH!#$#KeqSHp%GrBV|AOWXr9ia)BT}HC%hSSn0%Z@lXCA;ewvYY!l zoF%sIgmze{9lxKl=|p69c!vw8f45&h}HBSnVh8buF80r zH>Mrld)b86kCb@nRTW*Ai-(T^b6G*WZ_ZD*J3T+$1tNPMX$9*@zQ*q`+Fd*zy|zJ= z24C<5ur=QWNV^fl{VA7{U?5Q81UAX$bG!wwe{pi z!;6-&g_IShyj;5gWK{QTJbd;I(|~EQUq{1y;9$-WpEX{#a&(!jyM9@QD0DDT7|5z^ zu!+D=S%+zw=Ynvvq`XFCsqVdQ(Cwo zQxuadHDWs{?3bi+3y?w1XFRhp*XS>JHrc9=-kQ`CKOiI33a9uqb^8IZz>?|<;?yK{ z07FRa`s#Z3M8tbosq`8;&*%o9v6*Wj8;&)GB$qTUZNGe!YYI#=2GfqYx{8920le7+ zug^MSnGjXic|-WJyGF*KhB$2*KV0Me1Y%Ws%4TSAF~$6xFGsP(?!m{wpaz9-H^WTZ zn)7K1^^p>0O5(C4!9?OXzFABh^B{s4r9#Pe9D-1jbj7J&Ej2!pl;-*gvGs(3zQ2&^rN@q<4+OU-XXn*5MVu z^Jr+yTF@?5JlnGY97u`3;%ZP31>BCrMu;up2r1& z(Yf668Ms|+TF!9VL4LxsVP*)e9=8<|nw&{$>9QZZ*~GzZF>rRj<+96KI!efP2B&WH(8E|ia7$$iun8QF4O z^ldQUjd)#zn;9e3gm)OYC*&~_Pg9?=^rC$TS@9mVk#%$MM>fO{LdN=hJ7!Tf+(t-)H9)7n`6JgNPpBnrz z1fYjzkEfmhs@gy{N~-J2Ch}UR@vyTRB-J5Q*|zgGvSZsh^{s5zjNMLoK3t~uey?sP zY`s#W;bj}no3CY^1Bq~+%yshddkJ8ezw?`P(}K^S5jtNDo=;+1cRI%|hul@{m#Csw zug}V-SIYGS;%S`R#=27dbntt!<9IF=76o^qz^n5Mpi&R^a>s~rJhUt<6^elIs z+a2({f)}#|P%D87xSjQ09dFQV08s0uT$IqBSG*w7wOE9S@GyBw848`Kwi}o%7armr z25(~bDl~@ERTn*ClLr`GpWv8oW z2Rug>m|d^)nHk`kb+hxS-dIE)RG0lSUlH=k{ccn`a0Gvc!a=fH82Fjy15|D2cY(mu*L#NJ@0IxPNqjs-f1kwPPdDez z@5|yt=b7aNJ0GC}ZRbODtN3^)cK6gC&AY|uFa4?8nPxkYaF|}Sx&dd&gsepIU~|4& zxDzKI4wXUXh?n__Bu)|*tzvYMHCM4J@t>WIFE|!@u-*r30Zwdyq-GUE&Z*l9@}G*xBwbvR#Mv7dZMIlA`cpq@XCcRE)u^d zi?ZVDjbhT3^1PYWbgXp$FLci0kbZBp^$a6t8zIv?q_H8E(!>dn1O!ID*HJeP@0B}( z*mvpmMGo^@Gn9FARR>~&w~_OjF_p?TiMobbaZcV-~0A zxCW}@e~(is@DvnO;}03)t2hgEe6(0Vj6F`LstVQV)V(z|&H|@URn>!7Ls5Y%z<H>U5s^A!-{)PsJa;d{UjbqYkP-82WPOIDf=5x1gY4CGPo(0!gTj zv#N^jNMuH~>vRq&DkwY(hW5~!<(TLX3egzGx2w1x+$_XwnBRe&Laf65-64GPhC6-4 zdI0x>Ul8Ij?u~G5Z_%Ch0GQH;aWBUG^2>n3onGHuLokN_a@?2Qgu1vNyiJG~aK8)g zYX+t$9$g<@hWnR}(YrZqr_WL3BM}Zt&YRlM*wEbA+;kFEaEczJyT>D*JRh>+{xcZZ z__ky`+P5x26pt@LKM;|RU)d(g&c?eaz>Qz9vUM)*b$|yK0-n=DujKO|LImZkj+<*e zK7b!8&Gb+X!zaL=zxTwnWm#{)9c%@ROCUxQvv3g|*}sm9;wic`Tv+C~jLO}(&;}%z zmk?q6vIza@!e4mqI9+_e=?g`a?=7eq7Z6_bOBW9nyi}}?k&oSw>QG2^c)Aey%F`U$$|E~L~~a7R!V4|405 zaqB;;ZZ6Zs;RnL=08+$?l`~LSxKDRjOMaD@jIprl)bCui9d0^HlOgDIiC&ugW_-% zY2%l`e3$c&nV87|;=BX=3q?`Y8Kt`TW&vTo#clnPOaD9nT(q^oAPKitQ92R5D-rXn zg6R7bj5&WS^`dz!)qS32Uds93<&*r?jM)`zolqj~ukb*J%0!ZXp&t5#XC?=ZqoPU&cD|}Sim`~~4N-BMVORwVe zf#QZqy7)SDqKhZF#XoQ>OIgDTkI{!bUhN9ioy=(~r(b3I37mhKCHx$x|0*HMUoijo z%Sn?TvL=6oye__gAITakwvEq&uI>QMIPvGI+hCJZs;KlCuJtdbe2FnHBR^J@O`y`h zjC~)e%J%u(1R8}a(IVM>7n?uL@j)COeW*M+K7zc2&PClcgy{&V*DU+>gvW{_B1Z3# zqn-jiYz0)$T>d!zT_qwmiAIK;kei?;rW^}F8mwlB#Me7#> zasvui{dD{v#MPpm%TBAHvM-8N%;m%CpN>C4xs~2l2j%MlD*KkWka=E!vM&R=h|8Lp z=Uog5(SzPb#COD{co#iaobDy)yC$Sp5_GS)jLZJzA?QAFxp0E=LRfu7JRq)M=&Kdj z$tVZKb)s!BR^2(fZLrlx>E$Y=jaih&G)f=MrSv`*rBkyhZFErj0MmcO^c!;sb3UgV zb;=iUUgi86KVgz>SYf6g?IG0ioczmW59Grf!Xe1UoH;{2Dnr>}8O4W?Pn zJfCNL5$9iGEgfbpDV%?u+q#eW-@^QxnSML-S;zT1IDawIuj9JoIPc*69IiW+>wbyb zeSq6KjcF>GW;*w)lH2+W<4<9H3G?q_{=Z|IXP9OZ^DL2lXZ%ja?`Ha^nf|YwpU!Q4 zoq6tNo@cU7ma|U&%K6`OekaS;#(aLjJio#DA2Q7&O!H5sDdc=1<8Nf1+gP3rkf%w^ ztM)q#@oGh6c9ri6C~a3LZRPYyLHVz8I+-yOIDOe5{O36RR|(}O2PoaaJQpyxXD1ORz?io21GAS8 zt}On$(l+?6(TV(%d6d3V_ICw8ku_;6q%EKHIixpF_&idldO6bR6@EwC;BP?RHfVsp zNnFe6oblJAkGFgNu2hMuJ-2{Q1e_HyC;!&jn)oR94%FQ@@jj&6I2|+o0pva79znW& z0_BT2?=pUX{CdXxa2#=Xbo^7mybY2bcOJZc^l9N zDd-Ic-bAOM36y)Q&%ql$3sX=S>-d%wv>xAF-=2cn5na8If;Qvbb;m)A%LRz2rlz2a z5u){^peykP!VMB~AF6nF_E_GSG`w-R*QayhVXpv%c0MRuRTX)QfD3FDV+NXK^AgLs6)IUA@`Tc z3zV>U)r367-wK7rTMS(#-p~Cup!XS~amW>&VhtS~@u+;Lbcv@Jy2@QW{yG$85$C#vu?5Q7sTZXF-^HQ6cMW!8WI!n8@_ABeiJ&ivdQ-Wt>R@QSn8na8F{$)PKyyv#a&Hl!+i@ZcUE`;o ze;kU6^}m$Rb(0QBVG&P{iw@9pYVv$R^$k?G$VNK$MU37@aR}VCYFf+Py%0jUlP|3&iaV z4T(QjP}vJ6<&rV2;sWtO3i=>4D2o3`Jf9TLR-dhXPE1HaABT2{Muvt^=9qJ#c+w*)l>06=eMbd!LwH@RmG|? zlwVU#Co6O=W6x`%L>Ud^5coOt6y=`rl)tQkI1nGrpG4^%mU#ECEIlHG#A|qvdmR@xFb2Cgn(s#pii&)&%`CLRPP-r>f{?oxkn1+R|7L zRxP5@1l2trh9nt{CO6k%N+@-EGVA=B307P91!LB>z1MO?j*g`kA7Of#k_My|e9;GodZ^x(e_i!sRm9_k=RXbitJ^Vv%O#WP5jl5SOV?9Sr(CCB|L&QDR(mLTOMKB_q)&*yo> zC5Y+w;nHz=@x8`vxb8q?{}Hag;<`ilaW_y_h;xlX@ej~ZRAeA^;`uP;tB`s{6HQv~v11(p>Q@<6qalQX@PiF(2B81y(+i_>D^`R zNNM*&X_(XXNLN?)b6%Ei=KKYmUd-v0Vyp6`@g;D3qv~s7NNpB71Ho%tt&|9um3l}&1Q zNdZzv=~$!>j9((2X8zx2`bUwTq)Y&2sq&)Qnt!%Zhx~das(e2GBk>y3{EpK)@xCho zN=l>3k&=(_76DOCH8B(^y{De7yo%P}sCrYG#jQLm^oqZ;JWlj65BW<_>J&utA>(tA zKOGJ{s&tnqYE*fn#Hr%BMQJwjZ;cJ86U5&p7N8YM-%~xlSDAx?*6*t-Rp*M|Pi#~d zi4P~uL+U9$9T-}_Rzph)-ekD1YN^`JH0Lr6+AZ0rp39UT<>wQfIQb*`SXz%%>E+zBB)?od1y1qntW4!e7UE59j@y1~@I&?p8X3 ziu!%!uJKZy3hff*?MaigsPZRQ9a4&3lVV!<#&7UA0^r;?^Hk){Z%wIc(ClDn}5d zCowvoM@pl)gU699;Ra3Nu~V22^zZse>_+K>+7;SaWj{daV;F_&n5V*Hbd&aQ<*UGd z6OYDj(r%ylTkSpZSmobu_*`*Qx&pH(4Z$!{aBYv7*puecSf=7P)A^FeM{o-!e=YA0^CYt?alQiNvUEgiCa!To3aHuL(_Wes*F)g8SEv47t8Y?oD*lGv%6;(*hgjqCA!-Q&lX zRHaDE#Fvp)itiwuAf|f#cmmvtv`!pA+9>WtI#tZ``Na(JE2OY=q_f4|EI)qt?k1!Q z#o}zgSR^`;E)$>i`!VwpNLPvM9KSeMj6=Fc6zBS}!>UKRUNjkgJo}rEv|qI6`C->l zq+3LFzz=<&jC4@ki*&d65z>pr*Yo{iuXqsYp;5YE*6 z;;`6;^k?ESq%Vj~<$m$9xCrU1V#Qd$cwMYR`j+^6gn4smGKw_AbmmkHPV-r-y(fgc?aq1%6mxP zQvQbY9p#@$@!Qd}+~R%3iSz@-i}YVg4pO1!BXy`_kh;~eNVC-ONOL*OSA)nGtMy3B z)G0_S)e~ny&+06sK|I!Si#qic=wzFFOajyEU(^SmD2TpT~yHt|%9?-bO z{VK`%kV^WDZc^capk*6ThzCaeodqN z5r2n|Xev2B+ef8qvIsBy z{}i72l#ZES$$3i0%wOZzl&1NWIYe2R<5OlXIG9WMoAM}ql+&B?DX$b#>P9sB&}s9a z;qQqDumbp$G0FsGin3d|QGHpRs�awL7&#+F|V-?Vnnm<3vY`W09lXvDR^=;~vMu zj-NPw<@krA&e`Og=3M7|)_In`L%&kLNp~Qo_Q4BgiE)T*Cm@EcMpTQf0-m6MW)9vO zDCtC6XHeS3=>uhyzb!y%f=gF;D1Ww_(v_1a9XFBEU5t5j4CR+G<~q**p3?%Zd%vL4 zT&Dasr)|vnInEy#N2N98lzz38(hIosSJjmN7t>7UbQ;U>QYGPs^422#4yRu!-+=tg z;y$D&jE^G?de1}Z4HD*$6DgH6`-0n%CrTCC&;)(!j2B@L>${NVAjVg*iujNkC{b~m zpMx|2NX5DmKw5zRRA{dVX%Y4T^!vf3NXLkBq$T*WK?Pievt!sVF-E zX(i@}3W|E99bBB#?^;0^VW}7DW?D6onIsNN;D)*5yR7 zHX30;mV|pFO=5kM=$C9)w)Q2Pr*I20w=R-MxhLat0!rdp%i6$l9F5DDxMr7-m z;hurWvT!uMW{PNw#gmclxl!&~IKCCAW41m;te+zK@qaV7N&-xuY6)QGR`x;Ub$yZU zR#`$sP17wRkiPKJ;Iz2RH+DRw2v@}keGJi_b zl+cWMQ=3nk+A`y$#(9lRQ|5?yOI9xKShBReqpdx(qJ7?67GY5f-o4_Ixl5PK6Kz`) z$w+TQ>r&CSLYznX>F5w0{qaa55|5%s+n}RxGK}A?>|PN`#0KJB5r)r-N0TzwO~tK= z#fh$1yeHaeacqh8n0AwBSPA?pM?LCPzBk=^f+SC~C>uzeA8x5~(?G`Y$ z#o@m2x=451`mlwR^0h~{Bz565_C&id@brS2i4XPn&yDqlqkS#$2z0-)uLl-vmMs8TxF=*9VN1AceZ(wTHLdZa zmPkAaqwL}l>m5jvrCUtRDrBlHvF^y6aKb8T8|Wu(CR%&C!-?Kd60gQ}4nTFX&#-{L zu4sR_XIUiP8%>Z>hLg;VCa5LTR?^dPI9(fAU%a(%Efg})myE=LFk4AiqA5vJ7J4+x z<|Vqq{gIXNsC;S9suA7L*By@ZnBbzwx^UN4Imf`64Av}x@)OB8)EViXw4E9f_O8p@HOj%oQ33 zv?49WDjO!JSz}JL4--5ky@_4b+tUVt8tESBipbtB>!-mp@l2j6KRiG z+Au3hXGQ+e(m&g%v^;5rCS^-39v|q38MVRb!vecvFhHw)sd_md=ZDEItP;$u$d-9q zl99edv@=R;Xo_EVB-GUfKf^YiE=ajR*i1hhm{oyRqn0qHYPg$xy9mW2bK>Eyjge$x zPPixB*A?mJ<)XC@i$b(}De7`xI_C8a^v>(+HjAw_f%w2LBwBi639Dv;-960}=7goW zwHMyqT&b;^(q)?3I38hQA-9pz+_A-55=%m@w7ic_V>QidSiCKasm6pB-t;LQX}1ZE z)S1m7?10ON!?;@$m`Ksgiir*kS|r>H@^s-b`b}1{DAKnsx!zQ_{~6SQbk}X@-w1Wr{CKQ4g-mjTC`A_*h7;?#>U_*=#AmY4 zEBm5|IU-_yJOUoW!JymTHj#W4; z#IevT%oxXlhXoH5Z$%s81vPwpahpe(=21FLYF(M+Xc+_xV zct~Us+hZaNcZ{IODz;A`Gb*x*R*VRXtP)%NlZk?-izGQNvhX7VBda7G8CjT7`wE{) zHi$smA~Qp>RBUA7hxv+haAXmfL!TKPr3yz4kF3&95g%EksbxMRKuVQzWSkKprHbIN zGb16ZWJD-r6(A1cy`JcZM-pa^7Ke7)cVWTDW)908#avi*Of1Duq4>H1igVMeK7~PC zsg=ScgiOSs75=Y3#+x`^Nk>+rK!i!D{@fULtyq*=7GiUZjA=j|31p&ZB{MM{{n5TY z#0w?}k3xmC+*wt?xeWwhI5)Mo2W+jX)N&*~ANzy!;R`JXR`V8JTO>IO(ORsUk{WER z$lBMv#LC_SKw3;S*5})NT zkDsI!@tIVD@m7S#fYq4{$rQSJecnQZf@d$O9PWVIz0#4jW)l1_zXbAU6N85sFBydK7LeBD5LCCog z5!g4Q*xVz_+L)Fcp$n!AtwmTU;V#-_*kEgn7USd$9SEmRMpz~Y7`OO#r;BJm8I4C0 zEBoSB_zD*Z-(j85h?NwJ;=qSy=J1lPSYJ1X-QgZ9Kr+2ay8e>L08U1FtN~0c3iquW zK){%$m>cOFShtS&r*YO9Y8qp8uRWTy6E5wce-U3VfQ(wBqbx%hI)$!<9i4e5l|9V9LlSqhSdxz|pQm1MTf> zcvBM%rdd)6cb~sm7IZi&R|GgC>}=(Z*u=EQ%uL(HXupMn>&8%`hjipBnM4z21}w~6 zx9qYBnH6Y#CdZAlx;~4rZJ;wDxpZ^bK=P5YhxvO;GG4OUW3fe8DvnFXdOJQL`S}i< zT+wOy;%EZ%w`=`qJc%wz2ymaWI63AbP!GV`6bJf8(6&cZ1sH-}}VEfU!%;1qGNwixTq`fv}M z0FEc4y~vv@nYnt|4>j;iClZGtQb)}Q4~tz2+eT-UI0xZi!m>l+7VATpNXH627B)40 z4JjX+Xz1?g;b&-~bEg=mrw)e{X{dV9D788O021g4RVw1h}q$OW0zVk$Rb zm81?$Ov;sgz4CF3)P#v$hCnjXEhl6~K_bnx6_1Pr_9z{kk@F2&<4Hg^jk!3`D_uzp zYgD+8^H5Xslts}bRxzA5!Q}DOCpf z(@bJ(xg-6jTT7o<2%0YQ2l`|&b;UZ{0b_iOcxtAiRSxVk)-j=Qi zucJZ=XGUB${IZ^KlGbaxVWn$frYGQpa3~sGivxJ8r)h$b^D)8BrVFe}w7{jY zu#2T@r6Hz~!;hfTC7C`kjklPmG2n?l$cY`r^AT8U|JL>x4`d6TcHl7(#VmN4jU0zq z99|dg!UBTF8$upCn?_^eS}iTm$^jZ|9Az|loV^u>ORfQC9eEa$X26VE<>@%041VM# z`mGGjHj35J*vh`{SbH4yLPzT~%Dk?c4}92oO+qplOHlJnTO??Y%a3s_4D2TD$`(d? zaMCa25S)c4PXwhN-i!HI)-8^vmyD+lS5pYPvZ*?|L)-KI)Z8*9Ni=X^0ZNOv3F8Pk z6%3@#ip?^qJ`>nXPov{-JCstlty!=P-oe6+m=N>;q%W4FNs?N@E!i@?b_3N-(?Dfu z0IwUQfsSw@fw)5oC&!YS?~vgzWvwpKOB7f;1fGcV-8wNKoJEr!m$X>vr|MP>_KhFSCgsxOwn z^VozXq~$YtU5xd^8`5Lk5yhlp+MJCjx6DGodc&!4PmJ`sQq7jPO+RT!K>C5yLyv@* zzs1sMqQMk9jj*l1Z7Uk$smA5zj65IKZQ_{H2Mcnnrz@CY{Z`;hDn+;HS=7?Cc)fu) z)d`VgH1&-l?S!CpE+J%RQbBVaLbfKNQYY=vu8j%7vkY<$^nU6N zn{7%Tl-7fZmUGL7mf?U6Vd~Oa;2P2!x|HCy*FdoT@QxLWAjBjk*w-sl!zmNvK*oG{ zEzE@uKUWK~Vdrsk2l}NN$a5#4{>|&*3=lwcMI^>FN9o!UyxLlM%NXPtZG&Y=>_i(< z(--=LoytQ+%uNa#%m}6rkAU#}2S#Gq6P^^434=Y ziCfECY2KO06&y0abTVFQFyFz#&I(p0w@fB2JcF4dznB>Uq*ez+MWY<_ksm?pLob2xh7j*#^(6(H@-Q6rrA0o=<$m#7Gol@HmnSU0AjT)cyjs!=YN8`HknA5w zip6+Zza+9HX*oQ4J&DzXL8dcyfd_R~_Bbjd9i+~=JETUixlMHme8VNae~lE}sIp@MBUEn&RAK!**YBg{7v6xN%vP|9z_WzG?~BS1te3VS^h`z=l^f7i`!?0N;W4M=?v03xjSE zQ?3JMJ^odU;q^0Vm*CfrFpIcmH)?m{`-sS=taTPzjG}g`);!r(2lGpS^8hHiz;8b2 zi0?|sn*g>S|HV+21g;xz$VQM)fF_1=F>MYwcH@27Ufij7^O=?)7&GvF_(wUz;1>l| zJN}uEe$a>o$FH>nH0zi{6t!{k4PGQY=`TZH%a6~8G-dDET;>_h;E^ezX#Mv%E&)dx z>lW^HoV6wOB=AD`@ia4BwqtYZ5LOw<+Ktw>2+{WMaZlhY92yb4Mwu}pWSMKBqX^2x z(tnrdLe!%^l0}T7fqzq%36@RvuO0e~VVuau*8K;)wQDSGe;K{~1RESaK8e&Q43ll~ zr_q{a`81zNTXQfYVwk^+VbuwqJ4wtvcz_d+&&}d9qTZy;c_+D?h<0es)0ijmX8IJ& z*Wu$&^D)l0A!l?4-h=Oh9pi)rcN&du__a@8OZq69v0?Bv=fcuYu0LGgX+GA>?@0TP z)_tNmWaWAk8u$H2L$dhg3y2B+-2(kRw+=f#x#prJU zpDyTVICq+v`DN5EqvJru4(GlYyh*3DQeel&_15xgLwb;XmQsg+r8%;hTM+fbIoaiGg&ZwR zE#(nS!>JL|L(Az^SGI8`B#|qVIK!G*hD&?~co7$2tc6}^J+^$B$R^%#U})_T9?~bR zLBh2R-?PlckDU(ZW%Z3@r5W8I#Ndne#zz-&I2(d3)1Yt%2eeQTg$AU=PJH}QIV?Ol z1s{br;PX)HB8nC`mmYM0!i@+%ndw7HK@vHk9_FzOFqv}{+@%`Gw=*CR**gU;|Td(SOr`6$?ID`V|-wT0^32X~VT)&i0}5Pi8c_ z;y(?0Qr3UQ|Nn@ZeKHUkfGG(hX@%nkKzGOSj^qxB+vzkgK8v!QyK54o3DL)Izwj z_Kfn^cW=0G-hrj}9bEg*g$bcm3dK>WC~hYbqkyu0XQit6{ghQ1ThHloy&!M!3dK*i zs}(MGIx9uq;6F4MgML50SXBLf{AR7+UtR{l@5c|=YDmjm!a+9)`u#3o^9KB7_da5x5tWemB07EhAD`svEjT zhg=WQNGj>lLStIys8lBT$nOm=*3ER~AOz4K3zP zMdH?ls+8d$4-kq{=5aY;#b$SMoO!M+?x8JSmD)l%AQvx+~orTqa@>4CZ zOJ`~rnh*V&s`}l2BM99)kT-&5F3yyp9jz>H$dfmeO{V0}8+sAr2y1>AMv2ZkaOt>Q zxIDOgxUz6%b8BwD%j3ql1zFYQRTl&PbJGH5MOU!x?FE zC^ZZ}460-6Amv`$>#nS*fgp#1|vNE>uhp`Iq080N%GuExS?>epH0^@n_$?ikj z14o~!e`WdL0eqY;9E1E%C*Ys<68w)Xt!N81;YV2T6*zu_^PzN3Y+=r-x;$Gj52 zQ*DYSx)4QB1X2mEbns9K?0V4CZh;`V?W#n~K{OQ6P^8g-yxf9OAWxf{xLGQtO3N?< zc-a;O{uJjli2OFOTC`y=*NWYKBmdVV1`jOb9RP_Ng(_$#u#6e^0;bSM?ifO>D%vY_ zLL6dhb;%Ny#HT!Ui@MN>fT|LdQaN;5n2xE_-R|YiB|&NN8Q$el#GWDs2d(X0C1O7c z`e`#m8m91EYL^a!NCc^Qi~Yz$-bxm_lJ);-J20v?n<18E4AwBotsV`9)Cz40q=425 zKqZH%>r6dY;+Gp*upeK6-)^YH{)Gn5+CF}AS4I_UsyNeEg4rB z%9A7ZNopIlt!ZNLQfkbKt|>C_Wc|_*e-g{IS&-S#q0MF<(`68s)UcL0_2O`VW@Z~q zC-b;|l4M`H!WdX%n>KY zung0`6F)6NK9D>ViAipO3~m{m&nGwc|H2MN(e?~6_@UK-5cEviAfahycCpyf_hoK2 zsT-rw+MPN1RD8X8Ogb7tYK;~Ma$2;sQ2paMDQlsidU9%1LY@El#27^(Cz-QFnh?#D zUWiEQpktx`W8>F^6>RW*I`gM9L9%xHp#m8qnH)`Z`w5qQ&d6?(KcweoG_&NoW@-A< zF3Ggy(C8gMoanSkYB@5J#*$Rx=%7)qp)^hUQZ6qo_b79*2?tw4>Tq{XKc6L!K#- z`f)rEAcskwKZYDRP^xQ9BUzUoSSZT_}E|k)*LDUeqO0rwoCmy^=Sy zzn4L@mi#+?PZ4?-5AN7AE7HGnzZSdyN3}Y!omF~{cJQ*N$>3<`b9YxnO#L$_UPZZ`sK$?;A zp{Err(!wH7v1vHTB^@0q|XlbIEt&xe6xyE5)WQG*>$d?xP5X=g)c}bez75Sf9o-|dmaZ@T?2J^flvj-OGqzSXb-A7|fnsR} z$>*@-#if6ybTl_ej{(WbXtG!l0llk0JSs83G#@Q4o8bw_wbJw=Inq)|Yo#He67m&u zaZm3n>=H^Npd*X)-jqW3O8na?vd_n-Zoo1j<*?pAkgZ!iAH`Qpp&;%%BoYZNUsIqZ zbhOP2IZASR^0g{bn-w;XroQ10*So@JP8f`6ysX7=c!_;)L8AP--3x)gw4+a07N;N-8vJ+#R;&eBWYYST zW}IHZ$!icqF&(-6(Su}Kr=@|e16_KbCl`-O)RY~jpoqLby<$U4kMsueJWJ}GYNR)> z1g)^uCDZx;*gF%rCa$jGPgV$<5dsnxog{3D0t2q#3W^dyir@kW8bB;zli~`BMzmJk z8iq|=Lx4bVgScX=A{G^`txL7yQk7OKZdGjEx_#$nh~U=td4BKnz2En~!tXzKW-@c< z&bjxVbMBp)dktqI-O+g^szr(Ngz_U3;7p|947vIbg#yk`ac9vm=ypSHp+2kE^}Zj} z@eHU%=+@#uJV@>aG5u@mi7LXV=pk#5R420El=gUQUFnBSH*|U+XB_q3NFjZwl%e;k z{)0}8H#|I7T#tk$0ZMQrTpYasA_qlcXev`VPxWQ~^>N;C!-krRIyF!iVCSovsARof zcafu%0arxd6H&?ySscnyr;0st*3nBfRH>kp7SbYA_ZgKoR1T3lgU)+f`rj&YaAi)s z`HBV_lNdcPHst$1Q9$pyiLQkf)qkXKQKjnI^2mVOGrS~%>t7DZ(wkM(nTh)AkU&vs zGMLv0C<|{(^&9?!H#k0v*Zf_6c|c^8enxdI@&EeGcfIyMzx}RfQQ3veAXP#P8pxpY zk?G5T44nk!8SQ)f{k|=$-d)Nl)qqO2!L6R#5j)B*yrV1|S|x`2dTS|!o+=GW5;9jP z#}lD3OuaRn0ErINYodgI>{)b;$)NF2?+1Dd8O4X1ePf{njfAyV{fxi!dEc6NaGKbL z2qPeBKQNU@0;y0JH^Ks3&>!*-h2dsI!{sgH8gj+Y8sRu zr~&;pZDPnZ^mL|crKHmQssWFHvJc6e34LoeoJvaT%-#5+c>LLKU}8(~iW0^8VFkfX6F+XsC6EocVN! z`>lxwm*)j)G@-;(%~Mo(rt9+o*?qLXvm=G-BcgH%6=CXmjlqH=9sQ;mL?z_(k3c9-DZ+)JI-Z@s>3I_osz#4iz zZ;;;GrW6tCOv{4E(T$_7IfDz<)%pEvtAZ*GWnlkKBwdSk7@UPfO?ALc`?pi{H~wW` z3|y%%MnZXqZ`B~rP_qSl+X*8i`l3kA@zfl0QC(7^+Xfhq~V6q3LgyHe^kwwLw{idOnbaK&>8h zCrDFKT8eVXQ4_lJrC7>Id%ZpK56TjFl($|Nh~d@2H#=#NVnfd%{2*zsKD9&6%&V#n zDO*1Pd4x#oQ=OGmc|=DWw8upFrSq#ML%F7^O4sgqbOsUua+==tjO4E}>J4wRFY-2R zs1|2u7X@`;WP3-$g)*U(pgkV1e3J0@CFZ~OVGSy+NNrOFG+Un{suUQ!2b4y45DUt2 zB&ICbdQ(i)(I{Kdogq=8L;bTl95mijy%;d_v#xfhAjOjWttMK!M&qmHMoD2%|l{&i{kDFrDt*WcqlJO zvaiaHDn;n>=!Rd{Je@+Iv{@y2gmw~2Ar$G-P>x=q3kaj~p ztU*+i;zebZ(sy0W#9ul0$b_KSP)x5{K!b5XzyG}?3`ckE>N0d}y(y))md$=peRExq zy^Ms)1|u6mvXBlkI0{q+MNd>wb$r_~omC%IUnKm$YA+ddQvf(1BjM=IuE(y%!k`dd z*KMy0nDXOM8qpb4^8#HR={-ukRQf5)YS`OV6QY{%YIg?m&kZr4j<3#-$lmPSkb<00 zY=#!TRDbW(b^iIe2ARCA!=+D6M66c}s#5J@_{2-yIE@y$$ zj@~{q)Gb5VM)d%>K*)#ftbs^RzpdGfUY|sk0crEDR;g=kM9x2|$%co*R1rZ%9_^(% z{7`6kko$K<8l?oKn|em_dV?PcuCsITUzb(aO#i1vijpLyozOkK<0*VU)Ou=vms|L+ zyvbp3C0~81;IH54c-6Tx2+5#%sO(4Wc|*B0G$0H|pn`(L{&p|lPy|rze)WC5w^}7= zYdLkcj=R3SMm+$c9y21lj|}*mo&H_*`0ELL(~=?Q4S6;w`34*Fc5zVt2W2ha?A$>e z59sz$YYxRj)ej^xl%BVz7O5&ExX$|ZX8H_DnX;sv{?l7U2YvL7$el;k2UQ+a4Um~c z1(!071~ZMmfka07Rq>%MRQ+G?x#>(IY7-#^V7OU>iuezEDg)s4CWFE2wDIBkWT4xm zykjIIlmmZds;F3AKNIOe!&`M!nR)$)*X57e_tdjaRC1^{Dv*Ll0;UW$<(X1d2Kn2_ zBt}BNY6yghkRg6^Hns`u?F?!1EG_gB;V zdKSF33P795!|=5F^@b+0B(FcA_k(;feAkR}1m(+Ll_TVyp~su3yTD+*kaavhZ=sy#z#nDI~zQGbyXvJRD@H_IdCR{Y&@BOzw$DW#!_K)v&W zN);+&Zxtf6k#a}FMGb9ns;HvYF4FOonZN%;Tf8M<=;5Q{2UJt3dTw}~ z_|?MtHVt4fT~yb?zjiyw97^@2;;)j8-ik7)TZ0Q~sO6~gQ9`4>iJ+bYQjJZdI}F`R z$df`xzV+6juU+%+-8wRFs7nZG0VFuoLu(L-;o}38)K0a8+&T22$E$B;!(wXSJ6YAZvu9gs|ko zhCCSkPi~CrfI#hE>V40yJ-5g#%mOp<_wfXDm}#m$P|ckGtpg`4cKCPY_3HbA21ACr zX^~6*^wj+Eg3ejdkOpD=@TKGnn z-F*avqu>w*3n@aFP#L4c7tJuJ0eqD&hb;?$2Hyq4XT$JqF?{yB7(N+>kE=h{e`@N0 zzBh$qVF4ed8pB7au!P|N&==?WB^uBGKA47&kx>W%8qokguGSwz(6_#BKugAKSO^`iy)@){1c;GmHO;9Ltj`g|IGM^~pAJHRK> zFs1?xVt}P_H+@2>&vM~&Wy~@X(oBbsoZ+i%F8a;E7*2OE=5}|3FP=d{a439Djy7QH zcZkx$f(}au7z+Srz>;o)F&!-X8PmDl2_&Ua7KYD^sV{lqtK*Jujj3eqhUCE=jBtQh zAj*C$^r5x@ena2wBH7U`z`}sAjX~IMdW<1-fNeVZVHC>^onTDsn4`HT`doKB-3E~Go(n8*T$0dc3{`XO?C%}xG7HXfaWnsymzl^V46uTT7|@uRWsG|UXC zC-1%<{rh?meKEb<^a`tY|NedZC-(J<8|dZTw_o450ic_v4V*U6XF$TVK?D0G_8-tU zv432GS3i9&abF6$aXXI628Q*cVdmk9Fj_4dSvd=acNHYfnw2rr*VFSun1*0>yhp-} zG|y=>W~C?2@`RDR(Y&dU1;=Sv_N&9XX5xn7T=iLt%V&ZpscB#I`7twT#_ZH-AnaLq zd?G#@1nPmq*!Z~NYH=xX9%(Z@dDG|)W8`D&xd39hOBz!td7ze|%<1_VBOyEgzGvQtn)A7V~R4u0IZ)R2! zNCpNg?)IhS7U*h+}Rd)B^|;^j9{NP<%!@6y(G%MSMC8 z{fo!J^^uT4_)uZcb@BKt{ejdKGO65{0q5z*frr`x)dyxWmnY8bgX_n8)*q2Z&6ta) z#bu!k%Y-Q?sOq=a8${GOC4gVY@Xo!YA>M@d0N-~_R)lnvc@W&eOB1PRgKE_o(aSm8 z@wsxiUlK*54e=qL@Hne%Ec_Q7Y)K<6TL0or6HoQH~bIx3Mk%sWbzQIru-rZZ6r^cn*t zA03pKnK>iV1JopFaVSElv4Q@+gue%1ZUnII2h0Yvz*WE!H0l^271#`%1<)7|zQ7Ei z0%!%qV9Q6d{(Umw{4Ky(xF*u#UO*nKP63jD9H0z13fuuK;CIm1SR}9jPytQA9e@kz z830TMRskd7-u_*h;QWsvT{5_)4aAM)pat;P{$C$Z%0D8=UvG}!Jk*|>(+^Z;U!t!U zjO7NF3i=Q*=!=A5?G|EK1B4D}of@MUMyGeo*Uk%i(U! z%Bv%eB>psO*{qbMcEcv^+S~Qxsn-A<`O;#J<2Z06!iMeWTvQWs#l*Y?-yIh%> zg5Eew!Ym_45$0LUb4SA>9JG}Y5e_+YEGn8Uh!RP~?M$*!6os>+nEAU$6ZHd`j3^d) zK0;RCgT=<=W^92S97OVQp9N}+&R6+y?NXQpJx3C%ak$-3R&mdXGF6;X^+=Y{)}3v1 zbRoRQz!D5p=R3s=RvOV^%pL)urqOM>O-xp^J^1#KwbI2HU2yCXmqo{Xqxf99s7Ro5;()&NJgB znL**%>IMN8KZHCh@X7HHRsZZl4kC+9bjtae&CW%R;Vn)j9wZq`$6e@>hEZJ4=F!}S z@wn#%ykVO1AsNf&WJMc^J*OIppYNPW)D&kb)oK#sd**PIWmzd4Wu3B+=HuC1O!LtS z#CY@5d?vY25~YOEaYCcG;i7PFSB+EDWc4k4@Qm<0VPPa$AV!ypstw>e_+c@Q%mO*w ze|V~i zqk8I|GNbYYDW%LPtpFqQor05+` z995>0=FYm$#3qlGa@7OEU6Os(46;dJ7pg9{)fH!{=;V1p$}mi-8qG}}%q+08U^0?f z>Z9Q@ZN3kS4KXAqW@w>Ks^%!mCK?sl<$V;UW!z_AgHwFCI)W^BNgk3pRQDr0YItJ_ zA7U#Jr9@+#+7VpS`T(Lf3^!ddPF5>Z)+%GzMy5u5)3(eoYyhefOghbvyHOBJcZXUb z=5tBH-=)NbA4DE=zTor&R5b>NnSa`kP=?{MFl@DEK?_TeHxMouvp{{;Il7y=Xpp)@ zt}cq!;&tC6!_GhvB zsjs?yvifl<`6ws+CsA)<@p7l$d0D!`ea=C7Lv@9>U9(HYjfGmFY3XEjVT(vw8cG&D z7tYs;h1x8kHjQ-E<<~g%)S9F9!aFYH7N?SxE(&9$Cwe(TvUlsVacr@n!?kZ`Ms`IxyMYZ*gZ*!pIK|VSS zODr%u;B0qQNK{rxle{?YHS+4oAkX4db_G{6A zUu;WbPDEuz?8tAaxKc*^;_z$QT@3`9)H;Hjj?$$OQp|j^<`wwTObIJahIEO

Yc^)AP9lDO(a?2EMDO%*)I_97fCKTi|^o)l|u1KC&?BU z@n-_bWs&%@i)0lpUL}%jbrye)ORfmTSDYokxr$c{CEJ|D+gv4A1>&ntlHXm#YXr&L zMNtP`hkWN8{lKN$W~X25)jvCgf9;%g&B^2^5rcdv*F7=C@)%I}Cop&J8cpO?`4i0= ze`1B&pV$xK3kbJBco)JA5MF_BHH4=i{2juB5I%r#2ZYxk%o9tP1;;gDJ|^H%6J)_D ztm1hRR&lC?rEvpN08a^PPcq;kVI7qN?h@98B%lv4L&Cb%8%US1o=AW+ICrf&T$Y!b zH&LdYVa~dsiqFWaiqB})#AmEf$7k$^@CAfhAiN9V1_-Y}xEjJ!5dIF~K?ol}xC6p# z5JDQ+CC7^sGfMeGj0#qiWf0mKLyZcygmW_LQ%yJ-vAzd-nfLhJ8e6wh9?R8vHyShQA5ZQUhq-JRXKJEU=U&(@tYT95d&eqq}BMNDgxM{Cof_6yR+^Mf1D4k73L zj!S=c4cdec-tHtl=;Zr{K>Dfc;I9QH*MvboIStNp(-jx4A$l8*7i;3rUzCHwO7BC8asx4qv?b}JR%1hNf zGyY-guLp$HPQWK?#}n%gPa@VgOeAU(m9<6LYz;q~y@wOcW=}TaPo8SDm*Z$u6~yOb z+l*tG*pSIt$hz2tql66UrfPRC45Z^e$RL$D1*a$v3xmgE9AslkM-ZxscycewwwSYx zl8yMu9~iMPGcz!U`Y6KtfFm*_RMjphH0}}!#@C-eT87SF#4=U2t>^YNbH3UL-jwOu6h7e3#8%;gV8=iSN3~S2;Ct$VL;g$&`+Z1<_siWLx6VsxjQ` zdViT}5}w@_PMlCivyNoXXPx6ju^8a^gv`O*lLr>r<~7X1ntz@ZkymEcHa8;gaCT4F zxl!c?%dfwM-T6HS^D{0LnTlm!RunOw*k(mxd16@HH;QEjVvDe3E;*pR!ZjqMl^oPc z_GwkywyMoq)r(qnDXqGxt&PF0jl)~d_HI4ftF_g%wRK@@dva_0lvc9ZDJriw9fueD zqNQA&QZPSARVU~b+i=53@&Q>O0gp-$o#z~+#aX7zI@N-HO|=~ReodGuOQ#HCF-|d7 zgI|>eepNBJW8hcKpQ8Ll=#a;bDitMbUB$Q$^2hk|br)z%IWu1)XENk0vbYDCGES@j{R%6TF3odXFRcL+&Zh-;Fl1z_F0{ zaJZzGnsnXeV(vl5Ilj6*EC!#MUpAT;oDIIZy*+p_{WE>kl`g~0{l}`6zC!$|bGnu; z78*@v))mcXGJ3Mw{aCH8?BdWMw#H7vW>3NTga49=gZqMI85x0>UgZ)JOyGLYhYpd~ zjU;loIJQD%Jd8{7MLuG0X=c4}kJB+>{WqeU&c6tYll|+3B}rtyd3dvsrOqGg-G zl0#PDm4V^w@Ny}s&L)SP5LG&ol^)*rM0q>y)eW)2dm`uXlS0X5*Il7!k1Pn^il=|c z7XRja!Il(}XMDVIyNv>tT97F))9*XWvBd3_kR1MrF~q7TG0cLy#ePJyg*#+Q={UT! zYysT4ksO|@~Y(5r%PcY|K4>KY>YL+rg~(9mkT+- zNg)rrWOrIvmLEf8OzOmPR6H8v72dg)I)vadjk9x-+a`6%Je-bgtr!hLVnyY(^ zc8R}sqS1p-`*Ynx`!y+3>gO@#%@%y7yu~vHr0vc+tDeugl>?^vVih>n(TiAYcQ(iU zw*=ykT2lo0<&t zFU5yG7iiqn$|0@A-IQffYIX1U{m%RoP7JcUx)QC6Pl#xApV~3p`5tf|cIyLV_jZnn zFsq$}o5!%r-MaCKl~p9|NKy%HXCz5KW|}3=h{Q?@drs!+W|O3APj}&1^%Gp4Cn$={ zEOid~Oq6}nRk~kP>7Xn3(CrC4tBY3Jt-$!}T=_Mw9O3S<@t0lscU{os_3Prh&NO=O z20yNYc4UNETg7N>9q9&&L@H+1CJ%;{o+Gil74!QwUH7$PUJnU~F#9f!`7jW6V3S$8 zrg21lVAHaa`@tm@A6CNd+mIDWZTR08GCX_=8-JE zlyGMyVe@0<+{O&J1+l66u*fGi-hym(WsqF;H_o?kak?e$1MS`ralfY1#s1uGQ6QX1 zKW<(nq@ZQz=m@ihFx*s+g{x$XAO@7pU8k5;&XTRJ zF;{TOZ^Dp`BFO>gn5!<5HF)ZFf#fSu>H{aqr!Fx&*g=P!B-dPG{$$(b1**wv7kS=f zVV;A!_@XO$TU7K+kes(jm7nMAgAY@V;!5*I5Za?7h;=(h5-UMn-rYNrxUzdBacb8{ z;vnqXp&Lo$aiBT+41OqG2rHNN`VsY>_RNCS?PG{Gb7&SqZ;LPZ%84@VQSivh<7F$$ z;$?TM;$>GV<7KBR;$;V6A9%2`JdT82{0x35T?i|eV5Qy@R#wALZRQEG=8DXWb>*2E zE6Xx7?p9@HT&c{=I0gF-!oD5q%nUf)sN@;^0H+(3T!Ixi-Kb|HT#yUFW!C)e|@vsq7p9ge1HYfWdY@;nb)&{$y%IU@>2cnEfYZM7_ z&Pq}Ade@MXc+6cv$SPsTR#D8CPBHhKLsq+lY;%pdipTseNZlk%Jt&I#4t!%g;g_+B$>a~d1ziQGP2Z@ELDa`1O2s(Pwyc{rv02TSe*3 z?qSVC$^6wqDbBy&S;<)pLt|5pLxy=Siom~?*ruUnU8}2_^n`Hi62oK2`ep@ z%!2(^%z`b2a%RDAkNkm*VpBa1~=MPw*HR4Sh)$V#X3IfOP%%E#+d z^OTI@-yRh+G;J!9p}ABAi_034QG9=AKC|R2O%c1~T7I-V<3y}Po^fd`ES^iHSuj!= zQ=cO-`W*2gq>W`pkRzsqedA%M5=@#o9~vyHihb)bflpRrgHV#DZWoG6or~5eXOwzp3aRr@>lTp>S?Z&~>Z6XrZBAdLwVvgWXXLQ@&Dh4X z!-V%;Tjk`r57gH=!c8teBxfFU$^Oo@T^|3yX~<^0U7mSJ_#`C$C#RsiNL_xlvq_!< zS#VL1o#!bmjg8M+q*j(XaY}>LMa`~ci*wORJiA^(7Mye)Qa_a}-s%F~PtOI#D@4UL zq9AREF8_{GbbYX{_#~U(?4v8*%Hn71j?(yfLg=jGKX-9oXT-lGEUk-F)-BNO>8U&% zs5>fGK62DO8K%tFLXXy9W&S!mMJrJX=@R8gu21tIX2F;Fa^~aR(`2Q`GBehb#aPv| zJtf#N@$&-Otp$Y))!-nhY=sYBDqF3LjLP`#NIo=rEV5*IAt~St_;}jMv<{{cta(9- zuMB$MqPS#Vvch?nfS;p&B!be0mk3jWl!u*C(%_m*A7=hJf0;4@_ladOGO+o*qQJ#& zEO%NM+n&!+J7sD`LzbtC)M$7VQ17xTfp2{jPma`ZbOaLGdi%LWiJC)kVB=|ZE|1W0dlTcXQd00)kBe-`1 z$wHAvB*wE+TcJOS%p#SJjpT5$!cDCXA}cJ;KJiqO7X=J*ca~ZgOxAW&H_amtXOV|P z)J?qXv(*4afWay_ikSFIu9{(_6InSt{Dli$U1#jQQncGAe7$SRXSAJz z<3DrR+b8^_GxSjL?+R1Dbc*`bDSCtO3;V`~5V9$jJUT^p)|Nbzl)2GWd_b6eRrGDB z?);+oH6p2yzsZ@YzJa`c)W|;UY-XXfY*gjM-w}yl=sWyQqvOPzgf#jhWgs&@u-;F$ zUgIy@3f!B(yb!g1luQ}OA_MEk5$i9EBerV7iF*@R=c4Y8CrZVWh|<9hEJofmr82NS zPPs=eVavB_;+03I#3|W%vy}OP&6ydO`Wfw&ON@>s15;*aY}_4cR8pF0R8ntcRAOIR zghgE{RAN!<_mEiBRt+rfP2eccN8K&NqDsXjuo%qYD2pcNDQVL6M~i9F6XV3PySobM z#ci#{^b00psq&U>vQ&8?x`-wHeP>dXvamE07WF%eS<<%hSb4Y0Y&=TRmNVtoGNSQ$SdasstL_`_5+x_qLP@atw2?Fv{I=Q=+?)WeDgyWQU@?55 zZ+)h+?E6evjF^*=S67G`x%WE79cwmS-@V;Rqtixnu@$XY4wp<8@$FELL<~82I2o-K zG?$o?3+i1hT15QkB9j%u7EK?r%tNi}MW%fwxU|zjJ^!*`mj}6Lxc@3A`3Bc7OvwfZ z-Ql7B_nhp1bF$bdG~MQ$dR376yL0p=+$9`$_zI8yPPFJ6Zt|1B>PR^HW@sPc#+TG_%yO0Df5k6pk*^@UtxAkj{YG3KADbm zyb6+m(V{^BcG1v7(O>9XgcpgT92&nBGU)ICl?G2vn|~qFXNQkVmAOzfdRijqx8EpDu*lf&&VDGT}*@5%i7J z`VnKndkda`jh%q?tdHXk)6T{I*<<53o+H2C@m>P&CGh{N1e6WbJ1~%b{cEApg-X>4Xb{FVZ;#{d>o*EW5zh_40TR5>d}B*N=OB`(I7Ze}}K}{+jP4@LmG{J0xIriH7w6?15f@ z5O4;>fIC0{{eVHhU|={f5*Pyn1ED}TFcF9brU3E4bRZc>2Qq;Kn115kOzy)}KE#Lt31aQC!Z~@!^3E&QR0sR3VU?|`R1OS0R2=D3#dFm)&rY>GN1y`05!mN zpbpptoB}zY2F?K&fEM5~a0B=OxCi_Mv;)5Y&wv-eZ@?b_?IPqkU<8-|W`G6I9pC{y z06V}DzyTrP1h@hcz#SlfzCeFqAm9TG28IIuKmZU31OXwy2f#RBJTMW61SSKqz*Ha} zm=2h?Kwbe>Ko7tk=m`h`7eE4d00V&$fE1VjOa6Si0K*}D(*QY;0;B;mfRBKgz-(YHkOj;Ka)3p^ zVqgjIG4KhH2do5E1D^u<0D% z^}uJq0pJjD7&rnn0mnLMhVUeC3iuj01DxyNJcNjEffnF0a22=)+yK4@Zg%h^gl)iG z;6CtE2ah0p4E)l;GYEeL{s3qoXC`0-m;mUn0c0!#T`&zu0+NALAOn~M%mWqx3jqa? z3oHYc1FHZfKmvt8F|ZEU1gHTGPz}@o+W{T03)lnf13m)|0gXTt&$n503qN4xC8xwA;2gg1egd+ z1(JY|fO)_oU>UFmXa>Fn&HuvUC#typEP*levRw^meI%dEq!y{!jX5485QR-Cd{ zT(wrLx1MA@&RVh6dZe{topqe`L~BK}^#|5o){4W{{jGhh6?b@a9+Su7neohd7QAje zF0VV!l4r%^@vM0^ydFGT9-n8&v*$VRdh&YlaGroC%2c`iIxo*PfhlknVm{dhjS z!MqVXKi)`Q0B;OW$_wHJ^FnxIdEvZR-c(*3FM*fHo6h@?m&B9vl6fh-R9+e{oi~G* z!TX4p$(zZW&6~rU%Uj4(Ea54Z@)UVI#R{H+ z4R8s#44ktH(~p-jEF4z4E6&q~Wg1pK(*IOXAJ&~vl(=);DM(*Jx0*3w_D$qh#i z8~t)^?8`MpiT=Q5IA!|FwFLdzDcCpdVbp64xkEX08}>IIX2uF8*Y>h zHwCsA+i-_KummUqR@-oALy!lo0G0x|Hr(+Ld<-lDJ^_~7a3gKFX|T2i*l5EYW5W%x z;YQnVXW4M)+HmLDaAV;Ir44t04R_m2iotNDpyYY6>c8Z5~ibq__ z#izL47uRtexE8Q}82AW?1EHWMB1P5?ZdCG6 zfRcs6QyroH(j7Z2LnI*bp2o1eik3NOu~=ZiWk{$9ENt~_C=LrZ3$znO)A74LCJQ%y z$8O7zj{PY4u!?gdaP9Hh%_PJK}xgXi* zHrX$+wqG*Ue#uVzCBND)mD(@W+AqCk|8Xz-k7w9_TyMWD%6?gq{jwYOpY*W*B+mYm zPwhWBY`@&xe)(kkvR~z7zlyM5oZUW2`9@pwfdvRBAdxC^a2x-3i045JyJ`M>L|Aqa(}Fk>Tk0@)5P(S9-tr zFOUE{>#v1p^GelPl}c5us#Wb%HK>lOPOC1euB&dV9;%+HurlK^ZkcTvUglOtlnp8y zQ5IMhRu)+%D@!WND4SQNC|h2pEL&TqDyuH5E!$VtPt(mg9+o{T!_>xV zuG&_OtKHz^pF!#o>OggvI#MlDC#f^k^VACUaeK3r>g(#; z>WAuQYOLJ2oLg>Nj+eWYSC`k8?<;R8KVE*i{9^g_^4sMP%b%5F6~-0Z3fl_2!mWa+ z7*sK$BCsN?BC~0QC(46v9F?`;&{dBii;K3D{fajtaw&| zRT@`vD{U+BO1Da)a!}=n%D~F7%E(GtWm08E<-AHo2p<=RSBWp!n3<-W>>%Hx%% zD=$`Fue@FPu<}_YrZLuVHMSaDr!^Nf*EP2_4>iv;Se0=Vx5~B(uX3v*ss>e!s0yqKtBS0WRV7trRL!eW zR4uPkR;{g4RaIBjR_&{5s5)MCy6R%p^{U%d538P4Vb#Xf+-louyxOgrs2)^3qB^iT ztU9tFSHs*Q;+=KdgRMjcJXwT&=AZ z*Scv5?I7(4ZJ;(x8>y9Ple8Jyd0K^bxmKxNt5s>MwYA!P+6L`$?P={r?RD*K?L+M| zEmmV(!>zHc!E4-Vh?+q)BWeO`!fGOGWHm`O88!226gA6hlr?K>R5jH#wKe-{8fuQ$ zoUXZ8bG_zv&BK~!HP{y8E!-`(TktJzTZk=#wv5;kxFu{$Q#7j`YA(`a7Z2y-?^wk(jA$bezmSQrt*HtEf#F=@F2=`^M)ln5bu zblKC5;4t8M2UAT~V1(Ce=Q8oMk-E+4k7ip+{>YdR8#ka{ySZ;&YtHtkV@jXj4q#uu zlCgAM-r}@vyZ9W>xWolXS$&EgR~PO4#xDEJ?KQu6?<)R&ciU%U<7XUD9Z?_c8BqD8 zVQ@qeeS-8TbPc?cx$a6paxZnEh>in%8D@n8|Y*gZJ9Rd#~m zvRoEk*3fEKJbm_6?b1KlA02YAomRNiyG{H<-pw1&9#!thD*wf*_EK+R{R;D&lP{&t zPrI`6iRBkJ(|UgEk>%q3vA>nmm-O7Rbf_1!@?6?X$dMUDFUUGaXQnM-^Rn`U7}MCs zP__&yKiqOpTr<socc)dKm`yJt z#-r=>VvZ$(38`v~Dqz`2G=Mh@MLu`#T#q!v2=e+7$um;qGcsm+CS<01LWO~v0`&x{ z2&y4d(ZZb==uQmsfZvHY!;R8t%n3vUG0w0}(3b`L?FAAuXU+VZOUxq7P{OTfv_F}2 z0_$2aKw&U)>F}Pyj=JYH{TA_#-c;0ZmEU@QF?UBhA&EU*<{KMS;-#N2W?9p_^!Fh`?4u6^5DpCogVeA%Tu>I z{c-e)BK7#ZPr@zTQatYF=7*kpI^|5ie*MR~ak5li!`2-il=LjZa#7TP{PfRjy%o5eEppr=N`NqkU#9{6;03WAGY64{vc4fNwD*C zthMfLQ*Fjq->_))Q45|ck4G!rmwf$A@`Tc&v$OmjhgeoUZno*KYy3V>u`FO*_lt^< zw8fV`+~71|(uue+txKDaYvJ zT?>mTSeM1i|0C+#n;=k?WLSwi%}e+U7$$xslyCWTc|zQ*MBH!otfU!e-jtVy2F%Ss zRCD|L5I)Gz4A5_S5oq(j-4N!|g}b*U-k#0b@ojO`37H+MZTg(s-r+8mo_$bOUqUD53;H`B@kyVyC}RJHZrk{x zr6+&h&setl>C}g(!w;=0{N(cn!7S&gwr+dAbafl#yw)dZz}E}v$n6fID(&=palMjn zxJfroUEb;(drlBMEc}bzlZHDn9-hgb?0H`FeMW4zaiiz%e1_fld@DV7>g5LOod@i{ znGpP=Pg%D%k@R!heWMlx{BZb4X7=^WADylp8eQ7-Q=`MA%S%?@8!IKMwye2!Z}R3H zzg?}J-g>xX@%;N2?g#%U6<1lgX{vHQRNPt-KX0nebLqE}HpCp7>*n_8e%cYYPy2j2 zV&DgdZ+tu|?+Z>y(>IkPJ!cg?O@B5IkL+U=D=S(z?1R2bFV!yZ@!h7-pNnfhAFWDV zd-nQ;<*Q5$Ht;^!z&3)*=5%{vxMA0lGy1mr>EP??h;1x5M>F!JSQjVMJ(Qo5|1+%mk^5!iN)Z^cL>RYK`dTE3^!bkPP6IzSC>P^d)kbI znVx9omAK5Tgp8RUNwd<35icX46a9L6OO-h*M~6+gXhtqLYZvVBc!>YX z@YA)wr+h!m$V0sT)=>M(Z+1J*E%a$^o7OyP=)7#vW2+pEa@La7Kc94=kCrs9;C@!2 zWtpwNpY$ThqevwgE=h@$CiLRS)1%iG|FGoehEE@l?)}|wgHIpm|5Lj2_M1Cg@1MT< z*kVViTglq77DG+jjq@(_I^u2H`mo9UtI3;oNqtO?nlv8Ww*98A<+AniaRHHoygzca zTU7V6>$C6r_~P=lx~ROQ^chuqXB`>AVrys;w_&-%t;S9_ZP+>Xmm8lha+qPA6Hqnh z<_Jk*`H|T0_+>{N69$wlyLRdEvv!-Zb*?u~RhFE75S!rl{bZvJpA2KqWuIZ!&F;lJ z7#Fwu;gzHI%!Akbj#;=p_%6}&UdfZPnBt4rg|g8Hq8^u2a)JZ7rHWpsF>zDf`pV$} zb9?qbdRDF8oSiLv5m?k~%dcZZieEN8JCw3FxTN*&?0I(g?hGu=vJL+8!cI}r>{~lt z{I>G0sp5`&@Q&Yz`^=Cv*RIV@OUNI3X5+-r4-P3N3Cren_ZDRT*q{1Q3R&^p0)Y#fwZ*b=J1(T=GkGVymIY&5xI8u(8MGb7yWk| zu6MRFyEJ@Ni?9{c9ZSvzNM z4eRJ=(s9J21<9-zwsAW@UvhBL>@nS~yA6%=DV?qTq$=jKG2cCm*#6*(`@o->eKS7| z-IndXcS+RIXslzrOsA}Vf$9JV}`clOAcn6GwyuzS}1JI6L}y1MZg=a1ih4fyyE z-M%=h-y+MNty(UO`}uar`22gS>koq3{^SPanEv@&v9IHvhl@{`IG$_STyjElap@=P z?l<(Ee(unOk9P7dPC7T-@?z@r?)|oK|M+=x-@xx|ei&Fccza`M_lUV)ev?}rJwg1- zhDTNX13kAc44i!A;ONiFa(jr0+#b#^Gnc`jdFA%7hJD>%mJrnc(t}Rq+WbMMG2ZNd z$)!cHO%0`x3tcd|v_Qz)QD9O>KyB)XdTEpigZ7pVnAsQFgN}VTFtf~dlf&t--8MAO zXAiQ~X|?nbp-Xv-_8HOH(@XZry0_a}LFYw3XjyuD4e0Cb-QS~czkbmKQ$eTwtXh1r zV)1!m@!9`~TkKBsMsBgGl~bo%EX_3OUr<1uk;hK zsdBr|gLZ){vTyptV0e`G#EgW|6n8U;$W@tob`#$6aL8ew5>{-^y%(ccDM z9dcR}G}OuMzQ_Dp@z)W4K|R(yym+vt=M|Uh(#FEr4cl)vRXv|Stbn^DeboI$UKS~j zb~N}|RVxA+jSF{umwvaQu4sev^2`V8uA5w+dhhneDQo*oj`_T*GW9{jljJ*#&ffhl zp+`yKmegT=>6&pr6pX&~Y@X?|)ttMH8`roW=^J1Fl-B;#?tWeF!KvSFN%bndBrmyk zuwL#F_S3JbZdTSyhLk@J$=h6Mx=DKEiHFlTvkA*8XV3onNRLWd_PuNSrDj>*-QUrl zIqi$gqV0E<{l?4U$i8$ERIYve>tJqf(aoV>{=~m3tT}iS>(gs|GJdNx@YETvgI6OM zjnlLK?Eg!}iH4lO@&1IY)_4EUpXc`&(dID2>*$|kdq8 z-?!~WV{QD?PjgJyxu!3*M*6@zZX9onAk5-_v=lNqBilf9K3$C;1k?-TZOddJns+!%oFt zb@b>V$Qb?Y{%)UV?L$1eH#=Gnw5`-uMh zwoGCwacPssJgI(rUK#SCDu0)QVlLP zG!bOP6zF5`=m>-+LfGG4%g{>rXIFc@Ys?1SoHObuGb2n4*JT*%yUQKBq5aUPGE%&z z^z=@A{gMga+~2oagayz0=Kh+#4TYB%p0oY3Y-{qbJ-lk`%OCq?xpCnPle} zIpkD`R|kf&|G1m^^_>mRzB)R)$-oOk-|zVd45E>dd^dR4I8%vbJd)QbTF`0o`9qoJCPL>hG)GGA`Rd3U6X9|6 z6*iA}lrol|-H)HRj&apmDirlzNNe*FzU*l!A&0f>QbVQ@Hc-YmM1D6k^=NsiR1rDi z5d^m^6Q$g$WBcK_;+^4OqnSQ)C(4UOqHslI3RU3jyhNkQ@=W#6N24H8c>PIM8^(m} z2H)j@PuE}bwI;Wb1mmXt(Pm=yeQfWA;AwUT;65Ymj9CW9NO7vKQXu)e>%AfND;D=% zq^$K_$icPo%_oP2tp8{j71WE)OC_ubEaC8N1l{7U(*ZbBz*Hkq0ERgSx{e`Ax96%`chZZJAlUaT%~ zL1Dwrq``GL!Q97Y3~ykw{sMuD&^^FW5X0Mc`A8+V;M9d6%;_-gqU=~*H)g(1b(fYR zO$V#A8-47|;L2n+70;NNUar1F=D4E{;H$L{XujwtLJxDPhzg}c&aZE)mh1^scobrb zkLKQ&a(caR%JJYJhO6z|;(DxkThDZarP16_Sxn|QiI{g()-3tGH=45vA?Fj zAAf#Lyk_(z5nphIIJm}Waogjf>K)zknasymrId4+!&RcEBo zSAHPqd`QW_13Y3HVMvwnybm#(qA=3m?eNPsQpU#bQzUTBb1xL>3C`S`O2i=z2cFnh zf9me@Q#)Z?rEt_R7F?!v@i6y?a5LSpj&h8kjRVN936){rD|rb#rXc9sdrx$}dJV$X z>*U4p65)KqmYf~7|J->PmJ}b~S8Te<9_8=}!K5S@t~W5aw}Y0~wXe>NYIoY~9Aw~N z$rDq?tL}9?LcVh`tEX!t1mY@>^AGG%ZeZaTL;gjwH{roZU;Hp48K91I}`)c)NJvuw7Z3BY?Z4ma4| z2jH##pWyuizxT&$WB=5BZb3K2BIt|sqc^f5C=guS_D!nqsV_NRAME%F)s6L>1D-L( z{Ywq5P;j)j+6RGr?d*_h7NgZc%BJqR329tX}$1ep7aTNMwbeTr49|8#N0 zg_>#kU^$^s(n6wLyj+7!ffhm_Jm=ITMm27!lWyVlw}Rzdr!*Cu^l;i7t2vLO$vFX_Rc`vRn??4a_z{T z{#%PgDhWhwt;f=+hQ40h5B*_vichchnXa>04{024IOCDxgw>aQW~?Vy#mC@7ab6X# z@bhSjWi8JKS)DxN-0#;LENrU{inW1~+&I%RB?nPZN%0buQ+GT+??2;)(c#g{`B?P0 zaVpB*B;{4R;iUqrGpx_hAZ8ggNHCJh##N*>CnpadOiuftdTh+7R zX5nQNa1|jVP(X-3D#m+#vY=gZPZT>5@w&utHEDzGz2b_v5=v&2^ToU9#GW@|ALb>r z)DFhK!(weQ9jWRwcrsiesI2*YAVtMnyW{4^#)rqQA7gP0?)P>wH;S1hcw{khSNSku zxStDIC|KvGd%G^A6Tn@-_(yHioDqPf(f_}r@r(m(z-MG;VwBlM58gx1Pp6e3vOmg6 zNVg?D23f6Qr5il0&q)RpcW6i^{%_{>ju|P8j6mT~x)08Qj4&}lu5FD1bQ%gu$~NB* zD118r!@`dJ?ra!9a9Nl%jP2V6HZu`s-~|lk5B#>l)A_a4<(G)fR>Y2T!JAW7zKh3A z0|X|s!B{rWr)LJ%?`&^_BFQ^D7U2EThlT>{mls3mf8JfIh#mA^!zY$aJ5Z#%j|QG% z(hv5D&Ju6HXI{{+oC@kGR(u(q+lE_8d)nweoS?Vs2~&tz$KD2)k5p*EzTTN`jker=1ei>1ySd2FjVdN}7FyxhMz->iV*TA;{7jcrv> zalW&aA9xiz;_%?}-Z#NvI0i?xnHLlxrH$6t48aWGP12OTk!1ijIYF*@q80phgDIzKeJ>nh75dhpVo! z)Tvzd*MY8;h-${3E&0iG+2}-qfqYiqiThDXL(*i2Dp}52tu-fT;hU_uQIdtiEv^1z zSXAUIE1R3;C+pr#&^hxT@S(cgMxnEQW?U|F5vfbclT0|Fb5(z&co<#X|K5+-I~5_7 ztQhGt%JgtaS|cUg4BSChx~rgsvU{2HkfCK`oC0f9Jea3B?w<4j`i398hzmPg+40CZ z=Mknnc7|`HjJI1hwoZ!}#A`6TM4_H*i_G?)8h>r1Jfac)9kq6RjQ5hazQ6n0ndeIr&X^y)dKUBFASi YnHq2RHR*}FCNd*8WY+gj0VBxY0Q=3ipa1{> literal 0 HcmV?d00001 From a356d593186d5a6f13dc7c80001ac1843493bdee Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 15:45:36 +1000 Subject: [PATCH 118/741] Add source link as per GPL --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 95143e891..48efded7d 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ A simple tool to take the work out of uploading. - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs - +Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-ng ## Coming Soon: - Features From 04dd171018969c61112bdf31d6131db1fa13d9f9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 17:05:08 +1000 Subject: [PATCH 119/741] Increase max lines in linter --- .flake8 | 2 +- .github/workflows/.flake8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.flake8 b/.flake8 index 0cb611f43..ac7343518 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 220 \ No newline at end of file +max-line-length = 1000 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 index 0cb611f43..ac7343518 100644 --- a/.github/workflows/.flake8 +++ b/.github/workflows/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 220 \ No newline at end of file +max-line-length = 1000 \ No newline at end of file From 901ed79098c43a4f85acba3da0ca645f0a882449 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 31 Aug 2024 20:26:27 +1000 Subject: [PATCH 120/741] Add note about click --- requirements.txt | 3 ++- upload.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index b20352a43..c63a8c332 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,5 @@ pyoxipng rich Jinja2 pyotp -str2bool \ No newline at end of file +str2bool +click \ No newline at end of file diff --git a/upload.py b/upload.py index fc33d083b..9a2f8f254 100644 --- a/upload.py +++ b/upload.py @@ -179,6 +179,7 @@ async def do_the_thing(base_dir): f.close() except FileNotFoundError: pass + console.print("[red]Click package will be required in a future update, install with requirements.txt now to be prepared") console.print(f"[green]Gathering info for {os.path.basename(path)}") if meta['imghost'] == None: meta['imghost'] = config['DEFAULT']['img_host_1'] @@ -479,7 +480,7 @@ def get_confirmation(meta): if not kf_confirm: cli_ui.info('Aborting...') exit() - + console.print("[red]Click package will be required in a future update, install with requirements.txt now to be prepared") cli_ui.info_section(cli_ui.yellow, f"Is this correct?") cli_ui.info(f"Name: {meta['name']}") confirm = cli_ui.ask_yes_no("Correct?", default=False) From 7d7d948985dd14aecffae7086f8724bd43a0ec0a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 02:19:05 +1000 Subject: [PATCH 121/741] Squashed commit of the following: commit c7da5d0963947b1aa0c0960a13e8b9ba397d3a42 Author: Audionut Date: Sun Sep 1 02:14:52 2024 +1000 Fix PTP image return and skip more screenshots if at least 3 images already exist commit 08e369a03f25c88c25579f5c4b1f6747ce3c6bd1 Author: Audionut Date: Sun Sep 1 01:33:16 2024 +1000 Fix remove bot code from blu description Also show the cleaned description, rather than raw commit 1dd50d048e3b970f400027bd38f73c59a19bb982 Author: Audionut Date: Sun Sep 1 01:09:39 2024 +1000 Fix to skip HDB if no data is found commit a7311130c6ef0f7dcbebb80a06afc1e925737bc0 Author: Audionut Date: Sun Sep 1 00:54:45 2024 +1000 Skipping any part of auto ptp skips the original PTP description handling commit 68c20f68e693910352c017e6ebd3212b1ebd8225 Author: Audionut Date: Sun Sep 1 00:30:49 2024 +1000 Fix error created with existing get_desc and remove dupe console todo: stop existing functions being called again when more screens are taken: Auto limit screens to those returned via site description. Stop many functions being called again when answering no to "Is this correct?" commit 6ac1fd5dfb5c32e466a93728d88781301a422736 Author: Audionut Date: Sat Aug 31 23:28:22 2024 +1000 Working auto PTP description with image links Need to fix the duplicate calls commit 7da102b7c9d51fd37be533b7ab28b079f173468f Author: Audionut Date: Sat Aug 31 21:02:18 2024 +1000 Keep or discard found image links commit 413121c51429310b490d49df8d968f51a376e168 Author: Audionut Date: Sat Aug 31 20:35:05 2024 +1000 Silence consoles commit e9968209acbb03b8391d8f9343c009fae91b34c9 Author: Audionut Date: Sat Aug 31 20:21:54 2024 +1000 Allow use, edit or discard uni3d description commit 722dbceeb8fc78fa47741732e377ffebffa92bf0 Author: Audionut Date: Sat Aug 31 20:08:02 2024 +1000 Pull updated bdinfo binary commit 74501c2a25fd62121187b455f0da807b43583f11 Author: Audionut Date: Sat Aug 31 20:02:58 2024 +1000 More linting commit b7ed255f0815c154e684aafc978b7e160fe9756d Author: Audionut Date: Sat Aug 31 19:24:48 2024 +1000 More linting commit 7c462350e1f53767a134b253e2be7a6c55b34f59 Author: Audionut Date: Sat Aug 31 17:04:19 2024 +1000 More linter commit 6457df7ab60749e8f339baf25002e6bb5cdbc917 Author: Audionut Date: Sat Aug 31 14:07:31 2024 +1000 More linting commit 9016a4df5af914ad4bf61e27a5fa032b6dcc2651 Author: Audionut Date: Sat Aug 31 13:01:28 2024 +1000 More linting commit 05145b6b8f2664de6d0dd9ee3f3f012dd3996b7b Author: Audionut Date: Sat Aug 31 12:08:18 2024 +1000 Don't fail when sub title is empty commit 1bf6a76688c69ca592d5cb88a7c3ac843e4c8477 Author: Audionut Date: Fri Aug 30 21:26:06 2024 +1000 Only skip site, not function commit fc28d7ca65d6a346c7f71c2998a47bb546703089 Author: Audionut Date: Fri Aug 30 20:22:07 2024 +1000 Rename lint workflow commit cbc7433333e72526ecc59a91eabaa31b27fdc03d Author: Audionut Date: Fri Aug 30 20:18:55 2024 +1000 Add linter dispatch commit 96e7b479f9465b3120d69917311fe091e0609bf5 Author: Audionut Date: Fri Aug 30 20:10:32 2024 +1000 Move flake8 back commit 3b57da3f5f5cb6893d0e44789a2f66c1ec090566 Author: Audionut Date: Fri Aug 30 20:09:17 2024 +1000 Check lint action commit e330ae07ef4f9f036de270e6b7418cd8231ff07e Author: Audionut Date: Fri Aug 30 19:58:49 2024 +1000 More linting commit 5caa9f5b4c90de0b642d7659d7d3bdd4e5c1badf Author: Audionut Date: Fri Aug 30 18:11:07 2024 +1000 Print links and allow skip ID commit 26956c900f57de46bdebb8b2453ea8fe28f3ab9d Author: Audionut Date: Fri Aug 30 17:07:32 2024 +1000 More linter commit ded208f6dc75975121255924e8cbac83acec4091 Author: Audionut Date: Fri Aug 30 12:41:29 2024 +1000 Auto search ID and description from BLU Also auto pulls image links todo: Probably kill the derived description text as I don't believe it applies anywhere else. commit 1bab9ebafcf6c32d28a8191aff7e9ef10f433297 Author: Audionut Date: Fri Aug 30 10:51:13 2024 +1000 More code cleanup commit 73946c4b39d1774cd27dff2137322d701fb6bf49 Author: Audionut Date: Fri Aug 30 01:32:38 2024 +1000 Stop linter complaining 3 commit 54919c6c236582993697b2ffd6afa9bf68841f66 Author: Audionut Date: Fri Aug 30 01:11:05 2024 +1000 Move the flake8 config file commit d46bbdbbf7db4c5a1381230bacc8a211c0d9a4a3 Author: Audionut Date: Fri Aug 30 01:09:11 2024 +1000 Stop linter complaining 2 commit 7ab744a0d108e824555039651eed690c085de816 Author: Audionut Date: Fri Aug 30 01:03:23 2024 +1000 Stop linter complaining 1 first check commit ff1d09d574109b8fee321f2d1093ccfb5ba6cf6a Author: Audionut Date: Fri Aug 30 00:59:09 2024 +1000 Add linter commit 6e21868bad72e1b48da8d996d0ae1896d0a43b7e Author: Audionut Date: Fri Aug 30 00:51:34 2024 +1000 Update docker build for this branch commit 19e47425dca56290ac64ba157d234ea4847f4479 Author: Audionut Date: Fri Aug 30 00:49:28 2024 +1000 Auto search trackers for ID and description Don't rely only on PTP or require torrent ID, auto search instead. No idea if I broke the manual arguments, will check that later. HDB with is_disc working for ID. BLU searching is working, but needs to be updated to search file list in the API instead. Also waiting for a PR to be approved so that we can search folder names. --- .github/workflows/.flake8 | 2 +- .github/workflows/docker-image.yml | 2 +- README.md | 1 - cogs/commands.py | 232 ++--- data/example-config.py | 346 +++---- discordbot.py | 10 +- src/args.py | 37 +- src/bbcode.py | 174 ++-- src/clients.py | 119 +-- src/console.py | 4 +- src/discparse.py | 92 +- src/exceptions.py | 19 +- src/prep.py | 1487 +++++++++++++++------------- src/search.py | 16 +- src/trackers/ACM.py | 233 ++--- src/trackers/AITHER.py | 119 ++- src/trackers/ANT.py | 34 +- src/trackers/BHD.py | 114 +-- src/trackers/BHDTV.py | 56 +- src/trackers/BLU.py | 126 ++- src/trackers/CBR.py | 118 +-- src/trackers/COMMON.py | 293 +++--- src/trackers/FL.py | 50 +- src/trackers/HDB.py | 254 ++--- src/trackers/LST.py | 132 ++- src/trackers/MTV.py | 91 +- src/trackers/NBL.py | 61 +- src/trackers/PTP.py | 322 +++--- src/trackers/RTF.py | 33 +- src/trackers/SN.py | 24 +- src/trackers/TL.py | 24 +- src/trackers/UNIT3D_TEMPLATE.py | 113 +-- src/vs.py | 19 +- upload.py | 171 ++-- 34 files changed, 2417 insertions(+), 2511 deletions(-) diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 index ac7343518..8a386a47b 100644 --- a/.github/workflows/.flake8 +++ b/.github/workflows/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 1000 \ No newline at end of file +max-line-length = 1000 diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index d96e23a6c..557a516bb 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - anime-integers + - ID-and-Description env: REGISTRY: ghcr.io diff --git a/README.md b/README.md index 48efded7d..0346df7be 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,6 @@ Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-n ## Coming Soon: - Features - ## **Setup:** diff --git a/cogs/commands.py b/cogs/commands.py index 7a92faa64..a02b7362c 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -1,4 +1,3 @@ -from discord.ext.commands.errors import CommandInvokeError from src.prep import Prep from src.args import Args from src.clients import Clients @@ -17,19 +16,16 @@ from datetime import datetime import asyncio import json -import shutil import multiprocessing from pathlib import Path from glob import glob import argparse - class Commands(commands.Cog): def __init__(self, bot): self.bot = bot - @commands.Cog.listener() async def on_guild_join(self, guild): """ @@ -46,7 +42,7 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): return parser = Args(config) - if path == None: + if path is None: await ctx.send("Missing Path") return elif path.lower() == "-h": @@ -61,17 +57,17 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): try: args = (meta['path'],) + args + search_args meta, help, before_args = parser.parse(args, meta) - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return - if meta['imghost'] == None: + if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] # if not meta['unattended']: # ua = config['DEFAULT'].get('auto_mode', False) # if str(ua).lower() == "true": # meta['unattended'] = True prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - preparing_embed = discord.Embed(title=f"Preparing to upload:", description=f"```{path}```", color=0xffff00) + preparing_embed = discord.Embed(title="Preparing to upload:", description=f"```{path}```", color=0xffff00) if message_id == 0: message = await ctx.send(embed=preparing_embed) meta['embed_msg_id'] = message.id @@ -87,7 +83,6 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): else: await ctx.send("Invalid Path") - @commands.command() async def args(self, ctx): f""" @@ -103,57 +98,7 @@ async def args(self, ctx): await ctx.send(f"```{help[1991:]}```") else: await ctx.send(help.format_help()) - # await ctx.send(""" - # ```Optional arguments: - - # -s, --screens [SCREENS] - # Number of screenshots - # -c, --category [{movie,tv,fanres}] - # Category - # -t, --type [{disc,remux,encode,webdl,web-dl,webrip,hdtv}] - # Type - # -res, --resolution - # [{2160p,1080p,1080i,720p,576p,576i,480p,480i,8640p,4320p,other}] - # Resolution - # -tmdb, --tmdb [TMDB] - # TMDb ID - # -g, --tag [TAG] - # Group Tag - # -serv, --service [SERVICE] - # Streaming Service - # -edition, --edition [EDITION] - # Edition - # -d, --desc [DESC] - # Custom Description (string) - # -nfo, --nfo - # Use .nfo in directory for description - # -k, --keywords [KEYWORDS] - # Add comma seperated keywords e.g. 'keyword, keyword2, etc' - # -reg, --region [REGION] - # Region for discs - # -a, --anon Upload anonymously - # -st, --stream Stream Optimized Upload - # -debug, --debug Debug Mode```""") - - - # @commands.group(invoke_without_command=True) - # async def foo(self, ctx): - # """ - # check out my subcommands! - # """ - # await ctx.send('check out my subcommands!') - - # @foo.command(aliases=['an_alias']) - # async def bar(self, ctx): - # """ - # I have an alias!, I also belong to command 'foo' - # """ - # await ctx.send('foo bar!') - - - - - + @commands.command() async def edit(self, ctx, uuid=None, *args): """ @@ -161,7 +106,7 @@ async def edit(self, ctx, uuid=None, *args): """ if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return - if uuid == None: + if uuid is None: await ctx.send("Missing ID, please try again using the ID in the footer") parser = Args(config) base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) @@ -172,7 +117,7 @@ async def edit(self, ctx, uuid=None, *args): except FileNotFoundError: await ctx.send("ID not found, please try again using the ID in the footer") return - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) try: args = (meta['path'],) + args meta, help, before_args = parser.parse(args, meta) @@ -183,15 +128,10 @@ async def edit(self, ctx, uuid=None, *args): new_msg = await msg.channel.send(f"Editing {meta['uuid']}") meta['embed_msg_id'] = new_msg.id meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode="discord") + meta = await prep.gather_prep(meta=meta, mode="discord") meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) await self.send_embed_and_upload(ctx, meta) - - - - - @commands.group(invoke_without_command=True) async def search(self, ctx, *, args=None): """ @@ -206,14 +146,14 @@ async def search(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return files_total = await search.searchFile(search_terms) @@ -234,14 +174,12 @@ async def search(self, ctx, *, args=None): message = await ctx.send(embed=embed) await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) @@ -250,8 +188,6 @@ def check(reaction, user): else: await self.upload(ctx, files_total[0], search_args=tuple(args.split(" ")), message_id=message.id) - - @search.command() async def dir(self, ctx, *, args=None): """ @@ -266,14 +202,14 @@ async def dir(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return folders_total = await search.searchFolder(search_terms) @@ -295,13 +231,11 @@ async def dir(self, ctx, *, args=None): await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) @@ -311,39 +245,31 @@ def check(reaction, user): await self.upload(ctx, path=folders_total[0], search_args=tuple(args.split(" ")), message_id=message.id) # await ctx.send(folders_total) return - - - - - - - - - async def send_embed_and_upload(self,ctx,meta): + + async def send_embed_and_upload(self, ctx, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('uploaded_screens', False) == False: + + if meta.get('uploaded_screens', False) is False: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) else: message = await ctx.send(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) meta['embed_msg_id'] = message.id - + channel = message.channel.id return_dict = multiprocessing.Manager().dict() - u = multiprocessing.Process(target = prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) + u = multiprocessing.Process(target=prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) u.start() - while u.is_alive() == True: + while u.is_alive() is True: await asyncio.sleep(3) meta['image_list'] = return_dict['image_list'] if meta['debug']: print(meta['image_list']) meta['uploaded_screens'] = True - #Create base .torrent - + # Create base .torrent if len(glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) == 0: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(int(meta['embed_msg_id'])) @@ -352,15 +278,15 @@ async def send_embed_and_upload(self,ctx,meta): message = await ctx.send(embed=discord.Embed(title="Creating .torrent", color=0xffff00)) meta['embed_msg_id'] = message.id channel = message.channel - if meta['nohash'] == False: - if meta.get('torrenthash', None) != None: + if meta['nohash'] is False: + if meta.get('torrenthash', None) is not None: reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: + if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - p = multiprocessing.Process(target = prep.create_torrent, args=(meta, Path(meta['path']))) + p = multiprocessing.Process(target=prep.create_torrent, args=(meta, Path(meta['path']))) p.start() - while p.is_alive() == True: + while p.is_alive() is True: await asyncio.sleep(5) if int(meta.get('randomized', 0)) >= 1: @@ -368,8 +294,7 @@ async def send_embed_and_upload(self,ctx,meta): else: meta['client'] = 'none' - - #Format for embed + # Format for embed if meta['tag'] == "": tag = "" else: @@ -388,19 +313,25 @@ async def send_embed_and_upload(self,ctx,meta): res = meta['resolution'] missing = await self.get_missing(meta) - embed=discord.Embed(title=f"Upload: {meta['title']}", url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", description=meta['overview'], color=0x0080ff, timestamp=datetime.utcnow()) + embed = discord.Embed( + title=f"Upload: {meta['title']}", + url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", + description=meta['overview'], + color=0x0080ff, + timestamp=datetime.utcnow() + ) embed.add_field(name="Links", value=f"[TMDB](https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}){imdb}{tvdb}") embed.add_field(name=f"{res} / {meta['type']}{tag}", value=f"```{meta['name']}```", inline=False) if missing != []: - embed.add_field(name=f"POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) + embed.add_field(name="POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) embed.set_thumbnail(url=f"https://image.tmdb.org/t/p/original{meta['poster']}") embed.set_footer(text=meta['uuid']) - embed.set_author(name="L4G's Upload Assistant", url="https://github.com/L4GSP1KE/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") - + embed.set_author(name="L4G's Upload Assistant", url="https://github.com/Audionut/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") + message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=embed) - if meta.get('trackers', None) != None: + if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] @@ -423,21 +354,21 @@ async def send_embed_and_upload(self,ctx,meta): await asyncio.sleep(0.3) if "CBR" in each.replace(' ', ''): await message.add_reaction(config['DISCORD']['discord_emojis']['CBR']) - await asyncio.sleep(0.3) + await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['MANUAL']) await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['CANCEL']) await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) - #Save meta to json - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + # Save meta to json + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() - + def check(reaction, user): if reaction.message.id == meta['embed_msg_id']: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction if str(reaction.emoji) == config['DISCORD']['discord_emojis']['CANCEL']: @@ -455,7 +386,7 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=timeout_embed) return - except: + except Exception: print("timeout after edit") pass except CancelException: @@ -464,20 +395,9 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=cancel_embed) return - # except ManualException: - # msg = await ctx.fetch_message(meta['embed_msg_id']) - # await msg.clear_reactions() - # archive_url = await prep.package(meta) - # if archive_url == False: - # archive_fail_embed = discord.Embed(title="Unable to upload prep files", description=f"The files can be found at `tmp/{meta['title']}.tar`", color=0xff0000) - # await msg.edit(embed=archive_fail_embed) - # else: - # archive_embed = discord.Embed(title="Files can be found at:",description=f"{archive_url} or `tmp/{meta['title']}.tar`", color=0x00ff40) - # await msg.edit(embed=archive_embed) - # return else: - - #Check which are selected and upload to them + + # Check which are selected and upload to them msg = await ctx.fetch_message(message.id) tracker_list = list() tracker_emojis = config['DISCORD']['discord_emojis'] @@ -488,62 +408,59 @@ def check(reaction, user): tracker = list(config['DISCORD']['discord_emojis'].keys())[list(config['DISCORD']['discord_emojis'].values()).index(str(each))] if tracker not in ("UPLOAD"): tracker_list.append(tracker) - + upload_embed_description = ' / '.join(tracker_list) upload_embed = discord.Embed(title=f"Uploading `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) await msg.edit(embed=upload_embed) await msg.clear_reactions() - - - client = Clients(config=config) if "MANUAL" in tracker_list: for manual_tracker in tracker_list: manual_tracker = manual_tracker.replace(" ", "") if manual_tracker.upper() == "BLU": - blu = BLU(config=config) + blu = BLU(config=config) await blu.edit_desc(meta) if manual_tracker.upper() == "BHD": bhd = BHD(config=config) - await bhd.edit_desc(meta) + await bhd.edit_desc(meta) if manual_tracker.upper() == "AITHER": aither = AITHER(config=config) - await aither.edit_desc(meta) + await aither.edit_desc(meta) if manual_tracker.upper() == "STC": stc = STC(config=config) - await stc.edit_desc(meta) + await stc.edit_desc(meta) if manual_tracker.upper() == "LCD": lcd = LCD(config=config) await lcd.edit_desc(meta) if manual_tracker.upper() == "CBR": cbr = CBR(config=config) - await cbr.edit_desc(meta) + await cbr.edit_desc(meta) archive_url = await prep.package(meta) upload_embed_description = upload_embed_description.replace('MANUAL', '~~MANUAL~~') - if archive_url == False: + if archive_url is False: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0xff0000) upload_embed.add_field(name="Unable to upload prep files", value=f"The files can be found at `tmp/{meta['title']}.tar`") await msg.edit(embed=upload_embed) else: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - upload_embed.add_field(name="Files can be found at:",value=f"{archive_url} or `tmp/{meta['uuid']}`") + upload_embed.add_field(name="Files can be found at:", value=f"{archive_url} or `tmp/{meta['uuid']}`") await msg.edit(embed=upload_embed) if "BLU" in tracker_list: blu = BLU(config=config) dupes = await blu.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await blu.upload(meta) await client.add_to_client(meta, "BLU") upload_embed_description = upload_embed_description.replace('BLU', '~~BLU~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "BHD" in tracker_list: bhd = BHD(config=config) dupes = await bhd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") upload_embed_description = upload_embed_description.replace('BHD', '~~BHD~~') @@ -553,27 +470,27 @@ def check(reaction, user): aither = AITHER(config=config) dupes = await aither.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await aither.upload(meta) await client.add_to_client(meta, "AITHER") upload_embed_description = upload_embed_description.replace('AITHER', '~~AITHER~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "STC" in tracker_list: stc = STC(config=config) dupes = await stc.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await stc.upload(meta) await client.add_to_client(meta, "STC") upload_embed_description = upload_embed_description.replace('STC', '~~STC~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "LCD" in tracker_list: lcd = LCD(config=config) dupes = await lcd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await lcd.upload(meta) await client.add_to_client(meta, "LCD") upload_embed_description = upload_embed_description.replace('LCD', '~~LCD~~') @@ -583,26 +500,24 @@ def check(reaction, user): cbr = CBR(config=config) dupes = await cbr.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await cbr.upload(meta) await client.add_to_client(meta, "CBR") upload_embed_description = upload_embed_description.replace('CBR', '~~CBR~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) return None - - - + async def dupe_embed(self, dupes, meta, emojis, channel): if not dupes: print("No dupes found") - meta['upload'] = True + meta['upload'] = True return meta else: dupe_text = "\n\n•".join(dupes) dupe_text = f"```•{dupe_text}```" embed = discord.Embed(title="Check if these are actually dupes!", description=dupe_text, color=0xff0000) - embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") + embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") message = await channel.send(embed=embed) await message.add_reaction(emojis['CANCEL']) await asyncio.sleep(0.3) @@ -610,7 +525,7 @@ async def dupe_embed(self, dupes, meta, emojis, channel): def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == emojis['UPLOAD']: return reaction if str(reaction.emoji) == emojis['CANCEL']: @@ -624,7 +539,7 @@ def check(reaction, user): try: await channel.send(f"{meta['uuid']} timed out") meta['upload'] = False - except: + except Exception: return except CancelException: await channel.send(f"{meta['title']} cancelled") @@ -644,19 +559,18 @@ async def get_missing(self, meta): missing.append('--imdb') if isinstance(meta['potential_missing'], list) and len(meta['potential_missing']) > 0: for each in meta['potential_missing']: - if meta.get(each, '').replace(' ', '') == "": + if meta.get(each, '').replace(' ', '') == "": missing.append(f"--{each}") return missing + def setup(bot): bot.add_cog(Commands(bot)) - - - class CancelException(Exception): pass + class ManualException(Exception): pass diff --git a/data/example-config.py b/data/example-config.py index 5013ab63d..4bff88201 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -1,16 +1,16 @@ config = { - "DEFAULT" : { - + "DEFAULT": { + # ------ READ THIS ------ # Any lines starting with the # symbol are commented and will not be used. # If you change any of these options, remove the # # ----------------------- - "tmdb_api" : "tmdb_api key", - "imgbb_api" : "imgbb api key", - "ptpimg_api" : "ptpimg api key", - "lensdump_api" : "lensdump api key", - "ptscreens_api" : "ptscreens api key", + "tmdb_api": "tmdb_api key", + "imgbb_api": "imgbb api key", + "ptpimg_api": "ptpimg api key", + "lensdump_api": "lensdump api key", + "ptscreens_api": "ptscreens api key", # Order of image hosts, and backup image hosts "img_host_1": "imgbb", @@ -21,205 +21,212 @@ "img_host_6": "ptscreens", - "screens" : "6", + "screens": "6", # Enable lossless PNG Compression (True/False) - "optimize_images" : True, + "optimize_images": True, # The name of your default torrent client, set in the torrent client sections below - "default_torrent_client" : "Client1", + "default_torrent_client": "Client1", # Play the bell sound effect when asking for confirmation - "sfx_on_prompt" : True, + "sfx_on_prompt": True, }, - "TRACKERS" : { + "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB # Remove the ones not used to save being asked everytime - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL", + "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB", - "BLU" : { - "useAPI" : False, # Set to True if using BLU - "api_key" : "BLU api key", - "announce_url" : "https://blutopia.cc/announce/customannounceurl", + "BLU": { + "useAPI": False, # Set to True if using BLU + "api_key": "BLU api key", + "announce_url": "https://blutopia.cc/announce/customannounceurl", # "anon" : False }, - "BHD" : { - "api_key" : "BHD api key", - "announce_url" : "https://beyond-hd.me/announce/customannounceurl", - "draft_default" : "True", + "BHD": { + "api_key": "BHD api key", + "announce_url": "https://beyond-hd.me/announce/customannounceurl", + "draft_default": "True", # "anon" : False }, "BHDTV": { "api_key": "found under https://www.bit-hdtv.com/my.php", "announce_url": "https://trackerr.bit-hdtv.com/announce", - #passkey found under https://www.bit-hdtv.com/my.php + # passkey found under https://www.bit-hdtv.com/my.php "my_announce_url": "https://trackerr.bit-hdtv.com/passkey/announce", # "anon" : "False" }, - "PTP" : { - "useAPI" : False, # Set to True if using PTP - "add_web_source_to_desc" : True, - "ApiUser" : "ptp api user", - "ApiKey" : 'ptp api key', - "username" : "", - "password" : "", - "announce_url" : "" - }, - "AITHER" :{ - "api_key" : "AITHER api key", - "announce_url" : "https://aither.cc/announce/customannounceurl", + "PTP": { + "useAPI": False, # Set to True if using PTP + "add_web_source_to_desc": True, + "ApiUser": "ptp api user", + "ApiKey": 'ptp api key', + "username": "", + "password": "", + "announce_url": "" + }, + "AITHER": { + "api_key": "AITHER api key", + "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False }, - "R4E" :{ - "api_key" : "R4E api key", - "announce_url" : "https://racing4everyone.eu/announce/customannounceurl", + "R4E": { + "api_key": "R4E api key", + "announce_url": "https://racing4everyone.eu/announce/customannounceurl", # "anon" : False }, - "HUNO" : { - "api_key" : "HUNO api key", - "announce_url" : "https://hawke.uno/announce/customannounceurl", + "HUNO": { + "api_key": "HUNO api key", + "announce_url": "https://hawke.uno/announce/customannounceurl", # "anon" : False }, "MTV": { - 'api_key' : 'get from security page', - 'username' : '', - 'password' : '', - 'announce_url' : "get from https://www.morethantv.me/upload.php", - 'anon' : False, + 'api_key': 'get from security page', + 'username': '', + 'password': '', + 'announce_url': "get from https://www.morethantv.me/upload.php", + 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' }, - "STC" :{ - "api_key" : "STC", - "announce_url" : "https://skipthecommericals.xyz/announce/customannounceurl", + "STC": { + "api_key": "STC", + "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", # "anon" : False }, - "STT" :{ - "api_key" : "STC", - "announce_url" : "https://stt.xyz/announce/customannounceurl", + "STT": { + "api_key": "STC", + "announce_url": "https://stt.xyz/announce/customannounceurl", # "anon" : False }, "SN": { "api_key": "SN", "announce_url": "https://tracker.swarmazon.club:8443//announce", }, - "HP" :{ - "api_key" : "HP", - "announce_url" : "https://hidden-palace.net/announce/customannounceurl", + "HP": { + "api_key": "HP", + "announce_url": "https://hidden-palace.net/announce/customannounceurl", # "anon" : False }, - "ACM" :{ - "api_key" : "ACM api key", - "announce_url" : "https://asiancinema.me/announce/customannounceurl", + "ACM": { + "api_key": "ACM api key", + "announce_url": "https://asiancinema.me/announce/customannounceurl", # "anon" : False, # FOR INTERNAL USE ONLY: # "internal" : True, # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], }, - "NBL" : { - "api_key" : "NBL api key", - "announce_url" : "https://nebulance.io/customannounceurl", + "NBL": { + "api_key": "NBL api key", + "announce_url": "https://nebulance.io/customannounceurl", }, - "ANT" :{ - "api_key" : "ANT api key", - "announce_url" : "https://anthelion.me/announce/customannounceurl", + "ANT": { + "api_key": "ANT api key", + "announce_url": "https://anthelion.me/announce/customannounceurl", # "anon" : False }, - "THR" : { - "username" : "username", - "password" : "password", - "img_api" : "get this from the forum post", - "announce_url" : "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", - "pronfo_api_key" : "pronfo api key", - "pronfo_theme" : "pronfo theme code", - "pronfo_rapi_id" : "pronfo remote api id", + "THR": { + "username": "username", + "password": "password", + "img_api": "get this from the forum post", + "announce_url": "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", + "pronfo_api_key": "pronfo api key", + "pronfo_theme": "pronfo theme code", + "pronfo_rapi_id": "pronfo remote api id", # "anon" : False }, - "LCD" : { - "api_key" : "LCD api key", - "announce_url" : "https://locadora.cc/announce/customannounceurl", + "LCD": { + "api_key": "LCD api key", + "announce_url": "https://locadora.cc/announce/customannounceurl", # "anon" : False }, - "CBR" : { - "api_key" : "CBR api key", - "announce_url" : "https://capybarabr.com/announce/customannounceurl", + "CBR": { + "api_key": "CBR api key", + "announce_url": "https://capybarabr.com/announce/customannounceurl", # "anon" : False - }, - "LST" : { - "api_key" : "LST api key", - "announce_url" : "https://lst.gg/announce/customannounceurl", + }, + "LST": { + "api_key": "LST api key", + "announce_url": "https://lst.gg/announce/customannounceurl", # "anon" : False }, - "LT" : { - "api_key" : "LT api key", - "announce_url" : "https://lat-team.com/announce/customannounceurl", + "LT": { + "api_key": "LT api key", + "announce_url": "https://lat-team.com/announce/customannounceurl", # "anon" : False }, - "PTER" : { - "passkey":'passkey', - "img_rehost" : False, - "username" : "", - "password" : "", + "PTER": { + "passkey": 'passkey', + "img_rehost": False, + "username": "", + "password": "", "ptgen_api": "", "anon": True, }, "TL": { "announce_key": "TL announce key", }, - "TDC" :{ - "api_key" : "TDC api key", - "announce_url" : "https://thedarkcommunity.cc/announce/customannounceurl", + "TDC": { + "api_key": "TDC api key", + "announce_url": "https://thedarkcommunity.cc/announce/customannounceurl", # "anon" : "False" }, - "HDT" : { - "username" : "username", - "password" : "password", + "HDT": { + "username": "username", + "password": "password", "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", # "anon" : "False" - "announce_url" : "https://hdts-announce.ru/announce.php", #DO NOT EDIT THIS LINE + "announce_url": "https://hdts-announce.ru/announce.php", # DO NOT EDIT THIS LINE }, - "OE" : { - "api_key" : "OE api key", - "announce_url" : "https://onlyencodes.cc/announce/customannounceurl", + "OE": { + "api_key": "OE api key", + "announce_url": "https://onlyencodes.cc/announce/customannounceurl", # "anon" : False }, "RTF": { - "username" : "username", - "password" : "password", + "username": "username", + "password": "password", "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', "announce_url": "get from upload page", # "tag": "RetroFlix, nd", "anon": True }, - "RF" : { - "api_key" : "RF api key", - "announce_url" : "https://reelflix.xyz/announce/customannounceurl", + "RF": { + "api_key": "RF api key", + "announce_url": "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, - "OTW" : { - "api_key" : "OTW api key", - "announce_url" : "https://oldtoons.world/announce/customannounceurl", + "OTW": { + "api_key": "OTW api key", + "announce_url": "https://oldtoons.world/announce/customannounceurl", # "anon" : False }, - "FNP" :{ - "api_key" : "FNP api key", - "announce_url" : "https://fearnopeer.com/announce/customannounceurl", + "FNP": { + "api_key": "FNP api key", + "announce_url": "https://fearnopeer.com/announce/customannounceurl", # "anon" : "False" }, - "UTP" : { - "api_key" : "UTP api key", - "announce_url" : "https://UTP/announce/customannounceurl", + "UTP": { + "api_key": "UTP api key", + "announce_url": "https://UTP/announce/customannounceurl", # "anon" : False }, - "AL" : { - "api_key" : "AL api key", - "announce_url" : "https://animelovers.club/announce/customannounceurl", + "AL": { + "api_key": "AL api key", + "announce_url": "https://animelovers.club/announce/customannounceurl", # "anon" : False }, - "MANUAL" : { + "HDB": { + "useAPI": True, + "username": "HDB username", + "passkey": "HDB passkey", + "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", + "anon": False, + }, + "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" }, @@ -228,40 +235,40 @@ # enable_search to true will automatically try and find a suitable hash to save having to rehash when creating torrents # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes # If you find issue, use the "--debug" command option to print out some related details - "TORRENT_CLIENTS" : { + "TORRENT_CLIENTS": { # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above # All options relate to the webui, make sure you have the webui secured if it has WAN access # See https://github.com/Audionut/Upload-Assistant/wiki - "Client1" : { - "torrent_client" : "qbit", - # "enable_search" : True, - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", - # "torrent_storage_dir" : "path/to/BT_backup folder" + "Client1": { + "torrent_client": "qbit", + # "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" # Remote path mapping (docker/etc.) CASE SENSITIVE - # "local_path" : "/LocalPath", - # "remote_path" : "/RemotePath" - }, - "qbit_sample" : { - "torrent_client" : "qbit", - "enable_search" : True, - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", - # "torrent_storage_dir" : "path/to/BT_backup folder" - # "qbit_tag" : "tag", - # "qbit_cat" : "category" - + # "local_path": "/LocalPath", + # "remote_path": "/RemotePath" + }, + "qbit_sample": { + "torrent_client": "qbit", + "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" + # "qbit_tag": "tag", + # "qbit_cat": "category" + # Content Layout for adding .torrents: "Original"(recommended)/"Subfolder"/"NoSubfolder" - "content_layout" : "Original" - + "content_layout": "Original" + # Enable automatic torrent management if listed path(s) are present in the path - # If using remote path mapping, use remote path - # For using multiple paths, use a list ["path1", "path2"] + # If using remote path mapping, use remote path + # For using multiple paths, use a list ["path1", "path2"] # "automatic_management_paths" : "" # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "E:\\downloads\\tv", @@ -271,9 +278,9 @@ # "VERIFY_WEBUI_CERTIFICATE" : True }, - "rtorrent_sample" : { - "torrent_client" : "rtorrent", - "rtorrent_url" : "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", + "rtorrent_sample": { + "torrent_client": "rtorrent", + "rtorrent_url": "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", # "torrent_storage_dir" : "path/to/session folder", # "rtorrent_label" : "Add this label to all uploads" @@ -282,48 +289,47 @@ # "remote_path" : "/RemotePath" }, - "deluge_sample" : { - "torrent_client" : "deluge", - "deluge_url" : "localhost", - "deluge_port" : "8080", - "deluge_user" : "username", - "deluge_pass" : "password", + "deluge_sample": { + "torrent_client": "deluge", + "deluge_url": "localhost", + "deluge_port": "8080", + "deluge_user": "username", + "deluge_pass": "password", # "torrent_storage_dir" : "path/to/session folder", - + # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", # "remote_path" : "/RemotePath" }, - "watch_sample" : { - "torrent_client" : "watch", - "watch_folder" : "/Path/To/Watch/Folder" + "watch_sample": { + "torrent_client": "watch", + "watch_folder": "/Path/To/Watch/Folder" }, }, - "DISCORD" :{ - "discord_bot_token" : "discord bot token", - "discord_bot_description" : "L4G's Upload Assistant", - "command_prefix" : "!", - "discord_channel_id" : "discord channel id for use", - "admin_id" : "your discord user id", + "DISCORD": { + "discord_bot_token": "discord bot token", + "discord_bot_description": "L4G's Upload Assistant", + "command_prefix": "!", + "discord_channel_id": "discord channel id for use", + "admin_id": "your discord user id", - "search_dir" : "Path/to/downloads/folder/ this is used for search", + "search_dir": "Path/to/downloads/folder/ this is used for search", # Alternatively, search multiple folders: # "search_dir" : [ # "/downloads/dir1", # "/data/dir2", # ] - "discord_emojis" : { + "discord_emojis": { "BLU": "💙", "BHD": "🎉", "AITHER": "🛫", "STC": "📺", "ACM": "🍙", - "MANUAL" : "📩", - "UPLOAD" : "✅", - "CANCEL" : "🚫" + "MANUAL": "📩", + "UPLOAD": "✅", + "CANCEL": "🚫" } } } - diff --git a/discordbot.py b/discordbot.py index 4e6e6d3ae..297c29713 100644 --- a/discordbot.py +++ b/discordbot.py @@ -1,21 +1,18 @@ import asyncio import datetime -import json import logging -import configparser from pathlib import Path import discord from discord.ext import commands - - def config_load(): # Python Config from data.config import config return config + async def run(): """ Where the bot gets started. If you wanted to create an database connection pool or other session for the bot to use, @@ -75,7 +72,6 @@ async def load_all_extensions(self): error = f'{extension}\n {type(e).__name__} : {e}' print(f'failed to load extension {error}') print('-' * 10) - async def on_ready(self): """ @@ -102,10 +98,6 @@ async def on_message(self, message): await self.process_commands(message) - - - - if __name__ == '__main__': logging.basicConfig(level=logging.INFO) diff --git a/src/args.py b/src/args.py index b1f55cf3b..485e76235 100644 --- a/src/args.py +++ b/src/args.py @@ -3,7 +3,6 @@ import urllib.parse import os import datetime -import traceback from src.console import console @@ -15,9 +14,7 @@ class Args(): def __init__(self, config): self.config = config pass - - def parse(self, args, meta): input = args parser = argparse.ArgumentParser() @@ -63,7 +60,7 @@ def parse(self, args, meta): parser.add_argument('-webdv', '--webdv', action='store_true', required=False, help="Contains a Dolby Vision layer converted using dovi_tool") parser.add_argument('-hc', '--hardcoded-subs', action='store_true', required=False, help="Contains hardcoded subs", dest="hardcoded-subs") parser.add_argument('-pr', '--personalrelease', action='store_true', required=False, help="Personal Release") - parser.add_argument('-sdc','--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") + parser.add_argument('-sdc', '--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") @@ -81,7 +78,6 @@ def parse(self, args, meta): parser.add_argument('-ua', '--unattended', action='store_true', required=False, help=argparse.SUPPRESS) parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") - parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") args, before_args = parser.parse_known_args(input) args = vars(args) @@ -95,8 +91,8 @@ def parse(self, args, meta): break else: break - - if meta.get('tmdb_manual') != None or meta.get('imdb') != None: + + if meta.get('tmdb_manual') is not None or meta.get('imdb') is not None: meta['tmdb_manual'] = meta['imdb'] = None for key in args: value = args.get(key) @@ -104,7 +100,7 @@ def parse(self, args, meta): if isinstance(value, list): value2 = self.list_to_string(value) if key == 'type': - meta[key] = value2.upper().replace('-','') + meta[key] = value2.upper().replace('-', '') elif key == 'tag': meta[key] = f"-{value2}" elif key == 'screens': @@ -122,7 +118,7 @@ def parse(self, args, meta): parsed = urllib.parse.urlparse(value2) try: meta['ptp'] = urllib.parse.parse_qs(parsed.query)['torrentid'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -ptp') else: @@ -135,7 +131,7 @@ def parse(self, args, meta): if blupath.endswith('/'): blupath = blupath[:-1] meta['blu'] = blupath.split('/')[-1] - except: + except Exception: console.print('[red]Unable to parse id from url') console.print('[red]Continuing without --blu') else: @@ -145,7 +141,7 @@ def parse(self, args, meta): parsed = urllib.parse.urlparse(value2) try: meta['hdb'] = urllib.parse.parse_qs(parsed.query)['id'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -hdb') else: @@ -169,17 +165,15 @@ def parse(self, args, meta): # parser.print_help() return meta, parser, before_args - def list_to_string(self, list): if len(list) == 1: return str(list[0]) try: result = " ".join(list) - except: + except Exception: result = "None" return result - def parse_tmdb_id(self, id, category): id = id.lower().lstrip() if id.startswith('tv'): @@ -191,18 +185,3 @@ def parse_tmdb_id(self, id, category): else: id = id return category, id - - - - - - - - - - - - - - - diff --git a/src/bbcode.py b/src/bbcode.py index 8ddff33c8..0dfa3f352 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -1,6 +1,7 @@ import re import html import urllib.parse +from src.console import console # Bold - KEEP # Italic - KEEP @@ -36,63 +37,62 @@ def __init__(self): pass def clean_ptp_description(self, desc, is_disc): + # console.print(f"[yellow]Cleaning PTP description...") + # Convert Bullet Points to - desc = desc.replace("•", "-") # Unescape html desc = html.unescape(desc) - # End my suffering desc = desc.replace('\r\n', '\n') # Remove url tags with PTP/HDB links - url_tags = re.findall("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - url_tags = url_tags + re.findall("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - if url_tags != []: + url_tags = re.findall(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + url_tags += re.findall(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + if url_tags: for url_tag in url_tags: url_tag = ''.join(url_tag) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(url_tag, url_tag_removed) - # Remove links to PTP + # Remove links to PTP/HDB desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') # Remove Mediainfo Tags / Attempt to regex out mediainfo - mediainfo_tags = re.findall("\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) - if len(mediainfo_tags) >= 1: - desc = re.sub("\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) + if mediainfo_tags: + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) elif is_disc != "BDMV": - desc = re.sub("(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "" - + return "", [] # Convert Quote tags: - desc = re.sub("\[quote.*?\]", "[code]", desc) + desc = re.sub(r"\[quote.*?\]", "[code]", desc) desc = desc.replace("[/quote]", "[/code]") - + # Remove Alignments: - desc = re.sub("\[align=.*?\]", "", desc) + desc = re.sub(r"\[align=.*?\]", "", desc) desc = desc.replace("[/align]", "") # Remove size tags - desc = re.sub("\[size=.*?\]", "", desc) + desc = re.sub(r"\[size=.*?\]", "", desc) desc = desc.replace("[/size]", "") # Remove Videos - desc = re.sub("\[video\][\s\S]*?\[\/video\]", "", desc) + desc = re.sub(r"\[video\][\s\S]*?\[\/video\]", "", desc) # Remove Staff tags - desc = re.sub("\[staff[\s\S]*?\[\/staff\]", "", desc) + desc = re.sub(r"\[staff[\s\S]*?\[\/staff\]", "", desc) - - #Remove Movie/Person/User/hr/Indent + # Remove Movie/Person/User/hr/Indent remove_list = [ '[movie]', '[/movie]', '[artist]', '[/artist]', @@ -103,34 +103,41 @@ def clean_ptp_description(self, desc, is_disc): ] for each in remove_list: desc = desc.replace(each, '') - - #Catch Stray Images - comps = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall("\[hide[\s\S]*?\[\/hide\]", desc) + + # Catch Stray Images and Prepare Image List + imagelist = [] + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) comps.extend(hides) nocomp = desc comp_placeholders = [] # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images - for i in range(len(comps)): - nocomp = nocomp.replace(comps[i], '') - desc = desc.replace(comps[i], f"COMPARISON_PLACEHOLDER-{i} ") - comp_placeholders.append(comps[i]) - + for i, comp in enumerate(comps): + nocomp = nocomp.replace(comp, '') + desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") + comp_placeholders.append(comp) # Remove Images in IMG tags: - desc = re.sub("\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) - desc = re.sub("\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - # Replace Images - loose_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if len(loose_images) >= 1: - for image in loose_images: - desc = desc.replace(image, '') + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + + # Extract loose images and add to imagelist as dictionaries + loose_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + if loose_images: + for img_url in loose_images: + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url # Since there is no distinction here, use the same URL for all + } + imagelist.append(image_dict) + desc = desc.replace(img_url, '') + # Re-place comparisons - if comp_placeholders != []: - for i, comp in enumerate(comp_placeholders): - comp = re.sub("\[\/?img[\s\S]*?\]", "",comp, flags=re.IGNORECASE) - desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) + for i, comp in enumerate(comp_placeholders): + comp = re.sub(r"\[\/?img[\s\S]*?\]", "", comp, flags=re.IGNORECASE) + desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) # Convert hides with multiple images to comparison desc = self.convert_collapse_to_comparison(desc, "hide", hides) @@ -142,11 +149,12 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('\n', '', 1) desc = desc.strip('\n') - if desc.replace('\n', '') == '': - return "" - return desc + if desc.replace('\n', '').strip() == '': + console.print(f"[yellow]Description is empty after cleaning.") + return "", imagelist + + return desc, imagelist - def clean_unit3d_description(self, desc, site): # Unescape html desc = html.unescape(desc) @@ -155,12 +163,12 @@ def clean_unit3d_description(self, desc, site): # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc - site_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" site_url_tags = re.findall(site_regex, desc) if site_url_tags != []: for site_url_tag in site_url_tags: site_url_tag = ''.join(site_url_tag) - url_tag_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" + url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" url_tag_removed = re.sub(url_tag_regex, "", site_url_tag) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(site_url_tag, url_tag_removed) @@ -168,32 +176,33 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(site_netloc, site_netloc.split('.')[0]) # Temporarily hide spoiler tags - spoilers = re.findall("\[spoiler[\s\S]*?\[\/spoiler\]", desc) + spoilers = re.findall(r"\[spoiler[\s\S]*?\[\/spoiler\]", desc) nospoil = desc spoiler_placeholders = [] for i in range(len(spoilers)): nospoil = nospoil.replace(spoilers[i], '') desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) - + # Get Images from outside spoilers imagelist = [] - url_tags = re.findall("\[url=[\s\S]*?\[\/url\]", desc) + url_tags = re.findall(r"\[url=[\s\S]*?\[\/url\]", desc) if url_tags != []: for tag in url_tags: - image = re.findall("\[img[\s\S]*?\[\/img\]", tag) + image = re.findall(r"\[img[\s\S]*?\[\/img\]", tag) if len(image) == 1: image_dict = {} img_url = image[0].lower().replace('[img]', '').replace('[/img]', '') - image_dict['img_url'] = image_dict['raw_url'] = re.sub("\[img[\s\S]*\]", "", img_url) + image_dict['img_url'] = image_dict['raw_url'] = re.sub(r"\[img[\s\S]*\]", "", img_url) url_tag = tag.replace(image[0], '') - image_dict['web_url'] = re.match("\[url=[\s\S]*?\]", url_tag, flags=re.IGNORECASE)[0].lower().replace('[url=', '')[:-1] + image_dict['web_url'] = re.match(r"\[url=[\s\S]*?\]", url_tag, flags=re.IGNORECASE)[0].lower().replace('[url=', '')[:-1] imagelist.append(image_dict) desc = desc.replace(tag, '') # Remove bot signatures - desc = desc.replace("[img=35]https://blutopia/favicon.ico[/img] [b]Uploaded Using [url=https://github.com/HDInnovations/UNIT3D]UNIT3D[/url] Auto Uploader[/b] [img=35]https://blutopia/favicon.ico[/img]", '') - desc = re.sub("\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + bot_signature_regex = r"\[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\]Uploaded Using \[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] Auto Uploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]" + desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) # Replace spoiler tags if spoiler_placeholders != []: @@ -201,7 +210,7 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(f"SPOILER_PLACEHOLDER-{i} ", spoiler) # Check for empty [center] tags - centers = re.findall("\[center[\s\S]*?\[\/center\]", desc) + centers = re.findall(r"\[center[\s\S]*?\[\/center\]", desc) if centers != []: for center in centers: full_center = center @@ -213,7 +222,7 @@ def clean_unit3d_description(self, desc, site): # Convert Comparison spoilers to [comparison=] desc = self.convert_collapse_to_comparison(desc, "spoiler", spoilers) - + # Strip blank lines: desc = desc.strip('\n') desc = re.sub("\n\n+", "\n\n", desc) @@ -225,41 +234,25 @@ def clean_unit3d_description(self, desc, site): return "", imagelist return desc, imagelist - - - - - - - - - - - - - - - def convert_pre_to_code(self, desc): desc = desc.replace('[pre]', '[code]') desc = desc.replace('[/pre]', '[/code]') return desc - def convert_hide_to_spoiler(self, desc): desc = desc.replace('[hide', '[spoiler') desc = desc.replace('[/hide]', '[/spoiler]') return desc - + def convert_spoiler_to_hide(self, desc): desc = desc.replace('[spoiler', '[hide') desc = desc.replace('[/spoiler]', '[/hide]') return desc def remove_spoiler(self, desc): - desc = re.sub("\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) return desc - + def convert_spoiler_to_code(self, desc): desc = desc.replace('[spoiler', '[code') desc = desc.replace('[/spoiler]', '[/code]') @@ -269,15 +262,15 @@ def convert_code_to_quote(self, desc): desc = desc.replace('[code', '[quote') desc = desc.replace('[/code]', '[/quote]') return desc - + def convert_comparison_to_collapse(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -295,15 +288,14 @@ def convert_comparison_to_collapse(self, desc, max_width): desc = desc.replace(comp, new_bbcode) return desc - def convert_comparison_to_centered(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -326,17 +318,17 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): if collapses != []: for i in range(len(collapses)): tag = collapses[i] - images = re.findall("\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) + images = re.findall(r"\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) if len(images) >= 6: comp_images = [] final_sources = [] for image in images: - image_url = re.sub("\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) + image_url = re.sub(r"\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) comp_images.append(image_url) if spoiler_hide == "spoiler": - sources = re.match("\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] + sources = re.match(r"\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] elif spoiler_hide == "hide": - sources = re.match("\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] + sources = re.match(r"\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] sources = re.sub("comparison", "", sources, flags=re.IGNORECASE) for each in ['vs', ',', '|']: sources = sources.split(each) @@ -348,4 +340,4 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): final_sources = ', '.join(final_sources) spoil2comp = f"[comparison={final_sources}]{comp_images}[/comparison]" desc = desc.replace(tag, spoil2comp) - return desc \ No newline at end of file + return desc diff --git a/src/clients.py b/src/clients.py index 518babd80..a0cc51ea8 100644 --- a/src/clients.py +++ b/src/clients.py @@ -4,7 +4,7 @@ import bencode import os import qbittorrentapi -from deluge_client import DelugeRPCClient, LocalDelugeRPCClient +from deluge_client import DelugeRPCClient import base64 from pyrobase.parts import Bunch import errno @@ -12,7 +12,8 @@ import ssl import shutil import time -from src.console import console +from src.console import console + class Clients(): """ @@ -21,30 +22,30 @@ class Clients(): def __init__(self, config): self.config = config pass - + async def add_to_client(self, meta, tracker): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent" - if meta.get('no_seed', False) == True: - console.print(f"[bold red]--no-seed was passed, so the torrent will not be added to the client") - console.print(f"[bold yellow]Add torrent manually to the client") + if meta.get('no_seed', False) is True: + console.print("[bold red]--no-seed was passed, so the torrent will not be added to the client") + console.print("[bold yellow]Add torrent manually to the client") return if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) else: return - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] if meta.get('client', None) == 'none': return if default_torrent_client == "none": - return + return client = self.config['TORRENT_CLIENTS'][default_torrent_client] torrent_client = client['torrent_client'] - + local_path, remote_path = await self.remote_path_map(meta) - + console.print(f"[bold green]Adding to {torrent_client}") if torrent_client.lower() == "rtorrent": self.rtorrent(meta['path'], torrent_path, torrent, meta, local_path, remote_path, client) @@ -57,9 +58,9 @@ async def add_to_client(self, meta, tracker): elif torrent_client.lower() == "watch": shutil.copy(torrent_path, client['watch_folder']) return - + async def find_existing_torrent(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] @@ -68,22 +69,22 @@ async def find_existing_torrent(self, meta): client = self.config['TORRENT_CLIENTS'][default_torrent_client] torrent_storage_dir = client.get('torrent_storage_dir', None) torrent_client = client.get('torrent_client', None).lower() - if torrent_storage_dir == None and torrent_client != "watch": + if torrent_storage_dir is None and torrent_client != "watch": console.print(f'[bold red]Missing torrent_storage_dir for {default_torrent_client}') return None elif not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") torrenthash = None - if torrent_storage_dir != None and os.path.exists(torrent_storage_dir): - if meta.get('torrenthash', None) != None: + if torrent_storage_dir is not None and os.path.exists(torrent_storage_dir): + if meta.get('torrenthash', None) is not None: valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['torrenthash'] - elif meta.get('ext_torrenthash', None) != None: + elif meta.get('ext_torrenthash', None) is not None: valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['ext_torrenthash'] - if torrent_client == 'qbit' and torrenthash == None and client.get('enable_search') == True: + if torrent_client == 'qbit' and torrenthash is None and client.get('enable_search') is True: torrenthash = await self.search_qbit_for_torrent(meta, client) if not torrenthash: console.print("[bold yellow]No Valid .torrent found") @@ -93,14 +94,14 @@ async def find_existing_torrent(self, meta): valid2, torrent_path = await self.is_valid_torrent(meta, torrent_path, torrenthash, torrent_client, print_err=False) if valid2: return torrent_path - + return None async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): valid = False wrong_file = False err_print = "" - + # Normalize the torrent hash based on the client if torrent_client in ('qbit', 'deluge'): torrenthash = torrenthash.lower().strip() @@ -108,22 +109,22 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client elif torrent_client == 'rtorrent': torrenthash = torrenthash.upper().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) - + if meta['debug']: console.log(f"[DEBUG] Torrent path after normalization: {torrent_path}") - + # Check if torrent file exists if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) - + # Reuse if disc and basename matches or --keep-folder was specified - if meta.get('is_disc', None) != None or (meta['keep_folder'] and meta['isdir']): + if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True if meta['debug']: console.log(f"[DEBUG] Torrent is valid based on disc/basename or keep-folder: {valid}") - + # If one file, check for folder if len(torrent.files) == len(meta['filelist']) == 1: if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): @@ -133,36 +134,36 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client wrong_file = True if meta['debug']: console.log(f"[DEBUG] Single file match status: valid={valid}, wrong_file={wrong_file}") - + # Check if number of files matches number of videos elif len(torrent.files) == len(meta['filelist']): torrent_filepath = os.path.commonpath(torrent.files) actual_filepath = os.path.commonpath(meta['filelist']) local_path, remote_path = await self.remote_path_map(meta) - + if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): actual_filepath = torrent_path.replace(local_path, remote_path) actual_filepath = torrent_path.replace(os.sep, '/') - + if meta['debug']: console.log(f"[DEBUG] torrent_filepath: {torrent_filepath}") console.log(f"[DEBUG] actual_filepath: {actual_filepath}") - + if torrent_filepath in actual_filepath: valid = True if meta['debug']: console.log(f"[DEBUG] Multiple file match status: valid={valid}") - + else: console.print(f'[bold yellow]{torrent_path} was not found') - + # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): reuse_torrent = Torrent.read(torrent_path) if meta['debug']: console.log(f"[DEBUG] Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") - + if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" valid = False @@ -178,11 +179,11 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.log(f"[DEBUG] Final validity after piece checks: valid={valid}") else: err_print = '[bold yellow]Unwanted Files/Folders Identified' - + # Print the error message if needed if print_err: console.print(err_print) - + return valid, torrent_path async def search_qbit_for_torrent(self, meta, client): @@ -193,7 +194,7 @@ async def search_qbit_for_torrent(self, meta, client): console.print(f"Torrent storage directory found: {torrent_storage_dir}") else: console.print("No torrent storage directory found.") - if torrent_storage_dir == None and client.get("torrent_client", None) != "watch": + if torrent_storage_dir is None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None @@ -213,7 +214,7 @@ async def search_qbit_for_torrent(self, meta, client): if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): remote_path_map = True if meta['debug']: - console.print(f"Remote path mapping found!") + console.print("Remote path mapping found!") console.print(f"Local path: {local_path}") console.print(f"Remote path: {remote_path}") @@ -251,7 +252,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c except EnvironmentError as exc: console.print("[red]Error making fast-resume data (%s)" % (exc,)) raise - + new_meta = bencode.bencode(fast_resume) if new_meta != metainfo: fr_file = torrent_path.replace('.torrent', '-resume.torrent') @@ -261,7 +262,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c isdir = os.path.isdir(path) # if meta['type'] == "DISC": # path = os.path.dirname(path) - #Remote path mount + # Remote path mount modified_fr = False if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path_dir = os.path.dirname(path) @@ -270,16 +271,16 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c shutil.copy(fr_file, f"{path_dir}/fr.torrent") fr_file = f"{os.path.dirname(path)}/fr.torrent" modified_fr = True - if isdir == False: + if isdir is False: path = os.path.dirname(path) - + console.print("[bold yellow]Adding and starting torrent") rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") time.sleep(1) # Add labels - if client.get('rtorrent_label', None) != None: + if client.get('rtorrent_label', None) is not None: rtorrent.d.custom1.set(torrent.infohash, client['rtorrent_label']) - if meta.get('rtorrent_label') != None: + if meta.get('rtorrent_label') is not None: rtorrent.d.custom1.set(torrent.infohash, meta['rtorrent_label']) # Delete modified fr_file location @@ -291,7 +292,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): # infohash = torrent.infohash - #Remote path mount + # Remote path mount isdir = os.path.isdir(path) if not isdir and len(filelist) == 1: path = os.path.dirname(path) @@ -313,15 +314,15 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d am_config = client.get('automatic_management_paths', '') if isinstance(am_config, list): for each in am_config: - if os.path.normpath(each).lower() in os.path.normpath(path).lower(): + if os.path.normpath(each).lower() in os.path.normpath(path).lower(): auto_management = True else: - if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": + if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": auto_management = True qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') content_layout = client.get('content_layout', 'Original') - + qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) # Wait for up to 30 seconds for qbit to actually return the download # there's an async race conditiion within qbt that it will return ok before the torrent is actually added @@ -330,27 +331,27 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d break await asyncio.sleep(1) qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) != None: + if client.get('qbit_tag', None) is not None: qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') != None: + if meta.get('qbit_tag') is not None: qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) console.print(f"Added to: {path}") - + def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) # client = LocalDelugeRPCClient() client.connect() - if client.connected == True: - console.print("Connected to Deluge") + if client.connected is True: + console.print("Connected to Deluge") isdir = os.path.isdir(path) - #Remote path mount + # Remote path mount if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path = path.replace(local_path, remote_path) path = path.replace(os.sep, '/') - + path = os.path.dirname(path) - client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location' : path, 'seed_mode' : True}) + client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location': path, 'seed_mode': True}) if meta['debug']: console.print(f"[cyan]Path: {path}") else: @@ -393,19 +394,21 @@ def add_fast_resume(self, metainfo, datapath, torrent): resume["files"].append(dict( priority=1, mtime=int(os.path.getmtime(filepath)), - completed=(offset+fileinfo["length"]+piece_length-1) // piece_length - - offset // piece_length, + completed=( + (offset + fileinfo["length"] + piece_length - 1) // piece_length + - offset // piece_length + ), )) offset += fileinfo["length"] return metainfo async def remote_path_map(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: torrent_client = self.config['DEFAULT']['default_torrent_client'] else: torrent_client = meta['client'] - local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path','/LocalPath') + local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path', '/LocalPath') remote_path = list_remote_path = self.config['TORRENT_CLIENTS'][torrent_client].get('remote_path', '/RemotePath') if isinstance(local_path, list): for i in range(len(local_path)): @@ -418,4 +421,4 @@ async def remote_path_map(self, meta): if local_path.endswith(os.sep): remote_path = remote_path + os.sep - return local_path, remote_path \ No newline at end of file + return local_path, remote_path diff --git a/src/console.py b/src/console.py index 61aeecb04..223c51181 100644 --- a/src/console.py +++ b/src/console.py @@ -1,2 +1,2 @@ -from rich.console import Console -console = Console() \ No newline at end of file +from rich.console import Console +console = Console() diff --git a/src/discparse.py b/src/discparse.py index 33d9b8c68..0a1cb28c0 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -9,8 +9,8 @@ import json from src.console import console - - + + class DiscParse(): def __init__(self): pass @@ -28,7 +28,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): for file in os.listdir(save_dir): if file == f"BD_SUMMARY_{str(i).zfill(2)}.txt": bdinfo_text = save_dir + "/" + file - if bdinfo_text == None or meta_discs == []: + if bdinfo_text is None or meta_discs == []: if os.path.exists(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt"): bdinfo_text = os.path.abspath(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt") else: @@ -39,7 +39,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): console.print(f"[bold green]Scanning {path}") proc = await asyncio.create_subprocess_exec('mono', f"{base_dir}/bin/BDInfo/BDInfo.exe", '-w', path, save_dir) await proc.wait() - except: + except Exception: console.print('[bold red]mono not found, please install mono') elif sys.platform.startswith('win32'): @@ -54,7 +54,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): try: if bdinfo_text == "": for file in os.listdir(save_dir): - if file.startswith(f"BDINFO"): + if file.startswith("BDINFO"): bdinfo_text = save_dir + "/" + file with open(bdinfo_text, 'r') as f: text = f.read() @@ -64,7 +64,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): result = result2.split("********************", 1) bd_summary = result[0].rstrip(" \n") f.close() - with open(bdinfo_text, 'r') as f: # parse extended BDInfo + with open(bdinfo_text, 'r') as f: # parse extended BDInfo text = f.read() result = text.split("[code]", 3) result2 = result[2].rstrip(" \n") @@ -84,21 +84,19 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): with open(f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}.txt", 'w') as f: f.write(bd_summary.strip()) f.close() - with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file + with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file f.write(ext_bd_summary.strip()) f.close() - + bdinfo = self.parse_bdinfo(bd_summary, files[1], path) - + discs[i]['summary'] = bd_summary.strip() discs[i]['bdinfo'] = bdinfo # shutil.rmtree(f"{base_dir}/tmp") else: discs = meta_discs - + return discs, discs[0]['bdinfo'] - - def parse_bdinfo(self, bdinfo_input, files, path): bdinfo = dict() @@ -113,21 +111,21 @@ def parse_bdinfo(self, bdinfo_input, files, path): line = l.replace("*", "").strip().lower() if line.startswith("playlist:"): playlist = l.split(':', 1)[1] - bdinfo['playlist'] = playlist.split('.',1)[0].strip() + bdinfo['playlist'] = playlist.split('.', 1)[0].strip() if line.startswith("disc size:"): size = l.split(':', 1)[1] - size = size.split('bytes', 1)[0].replace(',','') - size = float(size)/float(1<<30) + size = size.split('bytes', 1)[0].replace(',', '') + size = float(size)/float(1 << 30) bdinfo['size'] = size if line.startswith("length:"): length = l.split(':', 1)[1] - bdinfo['length'] = length.split('.',1)[0].strip() + bdinfo['length'] = length.split('.', 1)[0].strip() if line.startswith("video:"): split1 = l.split(':', 1)[1] split2 = split1.split('/', 12) while len(split2) != 9: split2.append("") - n=0 + n = 0 if "Eye" in split2[2].strip(): n = 1 three_dim = split2[2].strip() @@ -137,21 +135,21 @@ def parse_bdinfo(self, bdinfo_input, files, path): bit_depth = split2[n+6].strip() hdr_dv = split2[n+7].strip() color = split2[n+8].strip() - except: + except Exception: bit_depth = "" hdr_dv = "" color = "" bdinfo['video'].append({ - 'codec': split2[0].strip(), - 'bitrate': split2[1].strip(), - 'res': split2[n+2].strip(), - 'fps': split2[n+3].strip(), - 'aspect_ratio' : split2[n+4].strip(), + 'codec': split2[0].strip(), + 'bitrate': split2[1].strip(), + 'res': split2[n+2].strip(), + 'fps': split2[n+3].strip(), + 'aspect_ratio': split2[n+4].strip(), 'profile': split2[n+5].strip(), - 'bit_depth' : bit_depth, - 'hdr_dv' : hdr_dv, - 'color' : color, - '3d' : three_dim, + 'bit_depth': bit_depth, + 'hdr_dv': hdr_dv, + 'color': color, + '3d': three_dim, }) elif line.startswith("audio:"): if "(" in l: @@ -167,15 +165,15 @@ def parse_bdinfo(self, bdinfo_input, files, path): fuckatmos = "" try: bit_depth = split2[n+5].strip() - except: + except Exception: bit_depth = "" bdinfo['audio'].append({ - 'language' : split2[0].strip(), - 'codec' : split2[1].strip(), - 'channels' : split2[n+2].strip(), - 'sample_rate' : split2[n+3].strip(), - 'bitrate' : split2[n+4].strip(), - 'bit_depth' : bit_depth, # Also DialNorm, but is not in use anywhere yet + 'language': split2[0].strip(), + 'codec': split2[1].strip(), + 'channels': split2[n+2].strip(), + 'sample_rate': split2[n+3].strip(), + 'bitrate': split2[n+4].strip(), + 'bit_depth': bit_depth, # Also DialNorm, but is not in use anywhere yet 'atmos_why_you_be_like_this': fuckatmos, }) elif line.startswith("disc title:"): @@ -202,12 +200,10 @@ def parse_bdinfo(self, bdinfo_input, files, path): m2ts['file'] = bd_file m2ts['length'] = bd_length bdinfo['files'].append(m2ts) - except: + except Exception: pass return bdinfo - - """ Parse VIDEO_TS and get mediainfos """ @@ -215,7 +211,7 @@ async def get_dvdinfo(self, discs): for each in discs: path = each.get('path') os.chdir(path) - files = glob(f"VTS_*.VOB") + files = glob("VTS_*.VOB") files.sort() # Switch to ordered dictionary filesdict = OrderedDict() @@ -232,10 +228,9 @@ async def get_dvdinfo(self, discs): vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') vob_set_mi = json.loads(vob_set_mi) vob_set_duration = vob_set_mi['media']['track'][1]['Duration'] - - + # If the duration of the new vob set > main set by more than 10% then it's our new main set - # This should make it so TV shows pick the first episode + # This should make it so TV shows pick the first episode if (float(vob_set_duration) * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: main_set = vob_set main_set_duration = vob_set_duration @@ -243,20 +238,19 @@ async def get_dvdinfo(self, discs): set = main_set[0][:2] each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" each['ifo'] = ifo = f"{path}/VTS_{set}_0.IFO" - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - + each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))/float(1<<30) + size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))/float(1 << 30) if size <= 7.95: dvd_size = "DVD9" if size <= 4.37: dvd_size = "DVD5" each['size'] = dvd_size return discs - + async def get_hddvd_info(self, discs): for each in discs: path = each.get('path') @@ -270,6 +264,6 @@ async def get_hddvd_info(self, discs): if file_size > size: largest = file size = file_size - each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}) + each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version': '1'}) each['largest_evo'] = os.path.abspath(f"{path}/{largest}") return discs diff --git a/src/exceptions.py b/src/exceptions.py index b4c6dbead..e5de6f944 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -7,9 +7,10 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) + class UploadException(Exception): def __init__(self, *args, **kwargs): @@ -20,14 +21,18 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) class XEMNotFound(Exception): pass + + class WeirdSystem(Exception): pass + + class ManualDateException(Exception): - pass \ No newline at end of file + pass diff --git a/src/prep.py b/src/prep.py index 26566927f..b3c5bb1d8 100644 --- a/src/prep.py +++ b/src/prep.py @@ -9,14 +9,11 @@ try: import traceback - import nest_asyncio from src.discparse import DiscParse import multiprocessing import os - from os.path import basename import re import math - import sys from str2bool import str2bool import asyncio from guessit import guessit @@ -32,7 +29,7 @@ import pyimgbox from pymediainfo import MediaInfo import tmdbsimple as tmdb - from datetime import datetime, date + from datetime import datetime from difflib import SequenceMatcher import torf from torf import Torrent @@ -41,8 +38,6 @@ import anitopy import shutil from imdb import Cinemagoer - from subprocess import Popen - import subprocess import itertools import cli_ui from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn @@ -56,9 +51,6 @@ exit() - - - class Prep(): """ Prepare for upload: @@ -73,6 +65,216 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] + async def prompt_user_for_id_selection(self, blu_tmdb=None, blu_imdb=None, blu_tvdb=None, blu_filename=None, imdb=None): + if imdb: + imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}") + if blu_tmdb or blu_imdb or blu_tvdb: + if blu_imdb: + blu_imdb = str(blu_imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print("[cyan]Found the following IDs on BLU:") + console.print(f"TMDb ID: {blu_tmdb}") + console.print(f"IMDb ID: https://www.imdb.com/title/tt{blu_imdb}") + console.print(f"TVDb ID: {blu_tvdb}") + console.print(f"Filename: {blu_filename}") + + selection = input("Do you want to use this ID? (y/n): ").strip().lower() + return selection == 'y' + + async def prompt_user_for_confirmation(self, message): + selection = input(f"{message} (y/n): ").strip().lower() + return selection == 'y' + + async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): + tracker_key = tracker_name.lower() + manual_key = f"{tracker_key}_manual" + found_match = False + + # Handle each tracker separately + if tracker_name == "BLU": + if meta.get(tracker_key) is not None: + meta[manual_key] = meta[tracker_key] + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}") + blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( + "BLU", + tracker_instance.torrent_url, + tracker_instance.search_url, + id=meta[tracker_key] + ) + if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: + console.print(f"[green]Valid data found on {tracker_name}, setting meta values") + if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) # Pad IMDb ID with leading zeros + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): + meta['image_list'] = blu_imagelist + if blu_filename: + meta['blu_filename'] = blu_filename # Store the filename in meta for later use + found_match = True + else: + console.print(f"[yellow]User skipped the found ID on {tracker_name}, moving to the next site.") + await self.handle_image_list(meta, tracker_name) + return meta, found_match + else: + console.print(f"[yellow]No valid data found on {tracker_name}") + else: + # BLU tracker handling when tracker_key is not in meta + blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( + "BLU", + tracker_instance.torrent_url, + tracker_instance.search_url, + file_name=search_term + ) + if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: + console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values") + if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): + meta['image_list'] = blu_imagelist + if blu_filename: + meta['blu_filename'] = blu_filename + found_match = True + else: + console.print(f"[yellow]User skipped the found ID on {tracker_name}, moving to the next site.") + await self.handle_image_list(meta, tracker_name) + return meta, found_match + else: + console.print(f"[yellow]No valid data found on {tracker_name}") + + elif tracker_name == "PTP": + # Handle PTP separately to avoid duplication + if meta.get('ptp') is None: + # Only fetch if not already in meta + imdb, ptp_torrent_id, meta['ext_torrenthash'] = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder) + if ptp_torrent_id: + meta['ptp'] = ptp_torrent_id + meta['imdb'] = str(imdb).zfill(7) if imdb else None + + if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): + console.print(f"[green]{tracker_name} IMDb ID found: {meta['imdb']}") + found_match = True + + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) + if ptp_desc.strip(): + meta['description'] = ptp_desc + meta['image_list'] = ptp_imagelist + meta['skip_gen_desc'] = True + console.print(f"[green]PTP description and images added to metadata.") + + if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): + meta['skip_gen_desc'] = True + found_match = True + else: + console.print(f"[yellow]Description discarded from PTP") + meta['skip_gen_desc'] = True + meta['description'] = None + return meta, found_match + else: + console.print(f"[yellow]User skipped the found IMDb ID on {tracker_name}, moving to the next site.") + meta['skip_gen_desc'] = True + return meta, found_match + + elif tracker_name == "HDB": + if meta.get(tracker_key) is not None: + meta[manual_key] = meta[tracker_key] + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}") + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + if tracker_id: + meta[tracker_key] = tracker_id + found_match = True + else: + # Handle HDB when tracker_key is not in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + if tracker_id: + meta[tracker_key] = tracker_id + found_match = True + + if found_match: + if imdb or tvdb_id or hdb_name: + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}") + if await self.prompt_user_for_confirmation(f"Do you want to keep the data found on {tracker_name}?"): + console.print(f"[green]{tracker_name} data retained.") + else: + console.print(f"[yellow]{tracker_name} data discarded.") + meta[tracker_key] = None + meta['tvdb_id'] = None + meta['hdb_name'] = None + found_match = False + else: + console.print(f"[yellow]Could not find a matching release on {tracker_name}.") + found_match = False + else: + # Handle other trackers if any + meta['imdb'], meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(meta.get(tracker_key)) + if meta['imdb']: + meta['imdb'] = str(meta['imdb']).zfill(7) + if await self.prompt_user_for_id_selection(imdb=meta['imdb']): + console.print(f"[green]{tracker_name} IMDb ID found: {meta['imdb']}") + found_match = True + + # Additional PTP handling if needed + if tracker_name == "PTP": + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) + if ptp_desc.strip(): + meta['description'] = ptp_desc + meta['image_list'] = ptp_imagelist + console.print(f"[green]PTP description and images added to metadata.") + + if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): + meta['skip_gen_desc'] = True + found_match = True + else: + console.print(f"[yellow]Description discarded from PTP") + meta['skip_gen_desc'] = True + meta['description'] = None + return meta, found_match + else: + console.print(f"[yellow]User skipped the found IMDb ID on {tracker_name}, moving to the next site.") + meta['skip_gen_desc'] = True + return meta, found_match + else: + console.print(f"[yellow]No IMDb ID found on {tracker_name}") + + # Handle image list at the end + await self.handle_image_list(meta, tracker_name) + return meta, found_match + + async def handle_image_list(self, meta, tracker_name): + if meta.get('image_list'): + console.print(f"[cyan]Found the following images from {tracker_name}:") + for img in meta['image_list']: + console.print(f"[blue]{img}[/blue]") + keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + if not keep_images: + meta['image_list'] = [] + console.print(f"[yellow]Images discarded from {tracker_name}") + else: + console.print(f"[green]Images retained from {tracker_name}") async def gather_prep(self, meta, mode): meta['mode'] = mode @@ -80,258 +282,211 @@ async def gather_prep(self, meta, mode): meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] - if meta.get('uuid', None) == None: + if meta.get('uuid', None) is None: folder_id = os.path.basename(meta['path']) - meta['uuid'] = folder_id + meta['uuid'] = folder_id if not os.path.exists(f"{base_dir}/tmp/{meta['uuid']}"): Path(f"{base_dir}/tmp/{meta['uuid']}").mkdir(parents=True, exist_ok=True) - + if meta['debug']: console.print(f"[cyan]ID: {meta['uuid']}") - meta['is_disc'], videoloc, bdinfo, meta['discs'] = await self.get_disc(meta) - - # If BD: + + # Debugging information + # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") + if meta['is_disc'] == "BDMV": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] + meta['filelist'] = [] # No filelist for discs, use path + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' try: - guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['title'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] except Exception: meta['search_year'] = "" except Exception: - guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['label'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] except Exception: meta['search_year'] = "" - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) mi = None mi_dump = None - #IF DVD + elif meta['is_disc'] == "DVD": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-',' ') - # filename = guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name))['title'] - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', ' ') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - - #NTSC/PAL + meta['dvd_size'] = await self.get_dvd_size(meta['discs']) meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) + elif meta['is_disc'] == "HDDVD": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-','') - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', '') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(meta['discs'][0]['path']) videopath = meta['discs'][0]['largest_evo'] try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) - #If NOT BD/DVD/HDDVD + else: - videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None + search_file_folder = 'file' video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) - guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + guess_name = ntpath.basename(video).replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] except Exception: meta['search_year'] = "" - - if meta.get('edit', False) == False: + + if not meta.get('edit', False): mi = self.exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) - - - if " AKA " in filename.replace('.',' '): + if " AKA " in filename.replace('.', ' '): filename = filename.split('AKA')[0] meta['filename'] = filename meta['bdinfo'] = bdinfo - - + # Debugging information after population + # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") + # Reuse information from trackers with fallback + if search_term: # Ensure there's a valid search term + found_match = False - # Reuse information from other trackers - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - ptp = PTP(config=self.config) - if meta.get('ptp', None) != None: - meta['ptp_manual'] = meta['ptp'] - meta['imdb'], meta['ext_torrenthash'] = await ptp.get_imdb_from_torrent_id(meta['ptp']) - else: - if meta['is_disc'] in [None, ""]: - ptp_search_term = os.path.basename(meta['filelist'][0]) - search_file_folder = 'file' - else: - search_file_folder = 'folder' - ptp_search_term = os.path.basename(meta['path']) - ptp_imdb, ptp_id, meta['ext_torrenthash'] = await ptp.get_ptp_id_imdb(ptp_search_term, search_file_folder) - if ptp_imdb != None: - meta['imdb'] = ptp_imdb - if ptp_id != None: - meta['ptp'] = ptp_id - - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - hdb = HDB(config=self.config) - if meta.get('ptp', None) == None or meta.get('hdb', None) != None: - hdb_imdb = hdb_tvdb = hdb_id = None - hdb_id = meta.get('hdb') - if hdb_id != None: - meta['hdb_manual'] = hdb_id - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'] = await hdb.get_info_from_torrent_id(hdb_id) - else: - if meta['is_disc'] in [None, ""]: - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'], hdb_id = await hdb.search_filename(meta['filelist']) - else: - # Somehow search for disc - pass - if hdb_imdb != None: - meta['imdb'] = str(hdb_imdb) - if hdb_tvdb != None: - meta['tvdb_id'] = str(hdb_tvdb) - if hdb_id != None: - meta['hdb'] = hdb_id - - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - if meta.get('blu', None) != None: - meta['blu_manual'] = meta['blu'] - blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist = await COMMON(self.config).unit3d_torrent_info("BLU", blu.torrent_url, meta['blu']) - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb) - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - if blu_category.upper() == 'TV SHOW': - meta['category'] = 'TV' - else: - meta['category'] = blu_category.upper() - if meta.get('image_list', []) == []: - meta['image_list'] = blu_imagelist - else: - # Seach automatically - pass - + if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + ptp = PTP(config=self.config) + # console.print(f"[cyan]Attempting to search PTP with search_term: {search_term}[/cyan]") + meta, found_match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + if not found_match and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + # console.print(f"[cyan]Attempting to search HDB with search_term: {search_term}[/cyan]") + hdb = HDB(config=self.config) + meta, found_match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + if not found_match and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + # console.print(f"[cyan]Attempting to search BLU with search_term: {search_term}[/cyan]") + blu = BLU(config=self.config) + meta, found_match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + if not found_match: + console.print("[yellow]No matches found on any trackers.[/yellow]") + else: + console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") # Take Screenshots if meta['is_disc'] == "BDMV": - if meta.get('edit', False) == False: - if meta.get('vapoursynth', False) == True: + if meta.get('edit', False) is False: + if meta.get('vapoursynth', False) is True: use_vs = True else: use_vs = False try: ds = multiprocessing.Process(target=self.disc_screenshots, args=(filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: - ds.terminate() + ds.terminate() elif meta['is_disc'] == "DVD": - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: try: ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: ds.terminate() else: - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: try: s = multiprocessing.Process(target=self.screenshots, args=(videopath, filename, meta['uuid'], base_dir, meta)) s.start() - while s.is_alive() == True: + while s.is_alive() is True: await asyncio.sleep(3) except KeyboardInterrupt: s.terminate() - - - meta['tmdb'] = meta.get('tmdb_manual', None) - if meta.get('type', None) == None: + if meta.get('type', None) is None: meta['type'] = self.get_type(video, meta['scene'], meta['is_disc']) - if meta.get('category', None) == None: + if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: meta['category'] = meta['category'].upper() - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: + meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) - elif meta.get('imdb', None) != None and meta.get('tmdb_manual', None) == None: + elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') meta = await self.get_tmdb_from_imdb(meta, filename) else: meta['tmdb_manual'] = meta.get('tmdb', None) - # If no tmdb, use imdb for meta if int(meta['tmdb']) == 0: meta = await self.imdb_other_meta(meta) else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id','0'), meta.get('tvdb_id', 0)) + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0)) # If no imdb, search for it - if meta.get('imdb_id', None) == None: + if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) - if meta.get('imdb_info', None) == None and int(meta['imdb_id']) != 0: + if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) - if meta.get('tag', None) == None: + if meta.get('tag', None) is None: meta['tag'] = self.get_tag(video, meta) else: if not meta['tag'].startswith('-') and meta['tag'] != "": @@ -352,33 +507,25 @@ async def gather_prep(self, meta, mode): meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) meta['hdr'] = self.get_hdr(mi, bdinfo) meta['distributor'] = self.get_distributor(meta['distributor']) - if meta.get('is_disc', None) == "BDMV": #Blu-ray Specific + if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific meta['region'] = self.get_region(bdinfo, meta.get('region', None)) meta['video_codec'] = self.get_video_codec(bdinfo) else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - + meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') - - - - #WORK ON THIS + + # WORK ON THIS meta.get('stream', False) meta['stream'] = self.stream_optimized(meta['stream']) meta.get('anon', False) meta['anon'] = self.is_anon(meta['anon']) - - - meta = await self.gen_desc(meta) return meta - - - """ Determine if disc and if so, get bdinfo """ @@ -391,40 +538,40 @@ async def get_disc(self, meta): parse = DiscParse() for path, directories, files in os. walk(meta['path']): for each in directories: - if each.upper() == "BDMV": #BDMVs + if each.upper() == "BDMV": # BDMVs is_disc = "BDMV" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'BDMV', - 'summary' : "", - 'bdinfo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'BDMV', + 'summary': "", + 'bdinfo': "" } discs.append(disc) - elif each == "VIDEO_TS": #DVDs + elif each == "VIDEO_TS": # DVDs is_disc = "DVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'DVD', - 'vob_mi' : '', - 'ifo_mi' : '', - 'main_set' : [], - 'size' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'DVD', + 'vob_mi': '', + 'ifo_mi': '', + 'main_set': [], + 'size': "" } discs.append(disc) elif each == "HVDVD_TS": is_disc = "HDDVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'HDDVD', - 'evo_mi' : '', - 'largest_evo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'HDDVD', + 'evo_mi': '', + 'largest_evo': "" } discs.append(disc) if is_disc == "BDMV": - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: discs, bdinfo = await parse.get_bdinfo(discs, meta['uuid'], meta['base_dir'], meta.get('discs', [])) else: discs, bdinfo = await parse.get_bdinfo(meta['discs'], meta['uuid'], meta['base_dir'], meta['discs']) @@ -441,9 +588,6 @@ async def get_disc(self, meta): discs = sorted(discs, key=lambda d: d['name']) return is_disc, videoloc, bdinfo, discs - - - """ Get video files @@ -457,7 +601,7 @@ def get_video(self, videoloc, mode): if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): filelist.append(os.path.abspath(f"{videoloc}{os.sep}{file}")) try: - video = sorted(filelist)[0] + video = sorted(filelist)[0] except IndexError: console.print("[bold red]No Video files found") if mode == 'cli': @@ -468,8 +612,6 @@ def get_video(self, videoloc, mode): filelist = sorted(filelist) return video, filelist - - """ Get and parse mediainfo """ @@ -482,7 +624,7 @@ def filter_mediainfo(data): "track": [] } } - + for track in data["media"]["track"]: if track["@type"] == "General": filtered["media"]["track"].append({ @@ -628,7 +770,7 @@ def filter_mediainfo(data): "@type": track["@type"], "extra": track.get("extra"), }) - + return filtered if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: @@ -651,9 +793,8 @@ def filter_mediainfo(data): with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) - - return mi + return mi """ Get Resolution @@ -665,13 +806,13 @@ def get_resolution(self, guess, folder_id, base_dir): try: width = mi['media']['track'][1]['Width'] height = mi['media']['track'][1]['Height'] - except: + except Exception: width = 0 height = 0 framerate = mi['media']['track'][1].get('FrameRate', '') try: scan = mi['media']['track'][1]['ScanType'] - except: + except Exception: scan = "Progressive" if scan == "Progressive": scan = "p" @@ -700,56 +841,54 @@ def closest(self, lst, K): res = each break return res - + # return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))] def mi_resolution(self, res, guess, width, scan, height, actual_height): res_map = { - "3840x2160p" : "2160p", "2160p" : "2160p", - "2560x1440p" : "1440p", "1440p" : "1440p", - "1920x1080p" : "1080p", "1080p" : "1080p", - "1920x1080i" : "1080i", "1080i" : "1080i", - "1280x720p" : "720p", "720p" : "720p", - "1280x540p" : "720p", "1280x576p" : "720p", - "1024x576p" : "576p", "576p" : "576p", - "1024x576i" : "576i", "576i" : "576i", - "854x480p" : "480p", "480p" : "480p", - "854x480i" : "480i", "480i" : "480i", - "720x576p" : "576p", "576p" : "576p", - "720x576i" : "576i", "576i" : "576i", - "720x480p" : "480p", "480p" : "480p", - "720x480i" : "480i", "480i" : "480i", - "15360x8640p" : "8640p", "8640p" : "8640p", - "7680x4320p" : "4320p", "4320p" : "4320p", - "OTHER" : "OTHER"} + "3840x2160p": "2160p", "2160p": "2160p", + "2560x1440p": "1440p", "1440p": "1440p", + "1920x1080p": "1080p", "1080p": "1080p", + "1920x1080i": "1080i", "1080i": "1080i", + "1280x720p": "720p", "720p": "720p", + "1280x540p": "720p", "1280x576p": "720p", + "1024x576p": "576p", "576p": "576p", + "1024x576i": "576i", "576i": "576i", + "854x480p": "480p", "480p": "480p", + "854x480i": "480i", "480i": "480i", + "720x576p": "576p", "576p": "576p", + "720x576i": "576i", "576i": "576i", + "720x480p": "480p", "480p": "480p", + "720x480i": "480i", "480i": "480i", + "15360x8640p": "8640p", "8640p": "8640p", + "7680x4320p": "4320p", "4320p": "4320p", + "OTHER": "OTHER"} resolution = res_map.get(res, None) if actual_height == 540: resolution = "OTHER" - if resolution == None: - try: + if resolution is None: + try: resolution = guess['screen_size'] - except: + except Exception: width_map = { - '3840p' : '2160p', - '2560p' : '1550p', - '1920p' : '1080p', - '1920i' : '1080i', - '1280p' : '720p', - '1024p' : '576p', - '1024i' : '576i', - '854p' : '480p', - '854i' : '480i', - '720p' : '576p', - '720i' : '576i', - '15360p' : '4320p', - 'OTHERp' : 'OTHER' + '3840p': '2160p', + '2560p': '1550p', + '1920p': '1080p', + '1920i': '1080i', + '1280p': '720p', + '1024p': '576p', + '1024i': '576i', + '854p': '480p', + '854i': '480i', + '720p': '576p', + '720i': '576i', + '15360p': '4320p', + 'OTHERp': 'OTHER' } resolution = width_map.get(f"{width}{scan}", "OTHER") resolution = self.mi_resolution(resolution, guess, width, scan, height, actual_height) - + return resolution - - def is_sd(self, resolution): if resolution in ("480i", "480p", "576i", "576p", "540p"): @@ -775,7 +914,7 @@ def is_scene(self, video, imdb=None): scene = True r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") r = r.json() - if r['releases'] != [] and imdb == None: + if r['releases'] != [] and imdb is None: imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb console.print(f"[green]SRRDB: Matched to {response['results'][0]['release']}") except Exception: @@ -784,24 +923,17 @@ def is_scene(self, video, imdb=None): console.print("[yellow]SRRDB: No match found, or request has timed out") return video, scene, imdb - - - - - - - """ Generate Screenshots """ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): - if num_screens == None: + if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: return - #Get longest m2ts - length = 0 + # Get longest m2ts + length = 0 for each in bdinfo['files']: int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) if int_length > length: @@ -810,85 +942,84 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ for name in files: if name.lower() == each['file'].lower(): file = f"{root}/{name}" - - + if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "": keyframe = 'nokey' else: keyframe = 'none' - os.chdir(f"{base_dir}/tmp/{folder_id}") - i = len(glob.glob(f"{filename}-*.png")) + os.chdir(f"{base_dir}/tmp/{folder_id}") + i = len(glob.glob(f"{filename}-*.png")) if i >= num_screens: i = num_screens console.print('[bold green]Reusing screenshots') else: console.print("[bold yellow]Saving Screens...") - if use_vs == True: + if use_vs is True: from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: - if bool(ffdebug) == True: + if bool(ffdebug) is True: loglevel = 'verbose' debug = False else: loglevel = 'quiet' debug = True - with Progress( + with Progress( TextColumn("[bold green]Saving Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" - try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) - ( - ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - time.sleep(1) - progress.advance(screen_task) - #remove smallest image + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + ss_times = [] + for i in range(num_screens + 1): + image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" + try: + ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) + ( + ffmpeg + .input(file, ss=ss_times[-1], skip_frame=keyframe) + .output(image, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + except Exception: + console.print(traceback.format_exc()) + + self.optimize_images(image) + if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + i += 1 + elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + i += 1 + elif os.path.getsize(Path(image)) <= 75000: + console.print("[bold yellow]Image is incredibly small, retaking") + time.sleep(1) + elif self.img_host == "ptpimg": + i += 1 + elif self.img_host == "lensdump": + i += 1 + else: + console.print("[red]Image too large for your image host, retaking") + time.sleep(1) + progress.advance(screen_task) + # remove smallest image smallest = "" smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): + for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): screensize = os.path.getsize(screens) if screensize < smallestsize: smallestsize = screensize smallest = screens - os.remove(smallest) - + os.remove(smallest) + def dvd_screenshots(self, meta, disc_num, num_screens=None): - if num_screens == None: + if num_screens is None: num_screens = self.screens if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): return - ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version' : '1'}) + ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": @@ -907,7 +1038,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = par w_sar = sar h_sar = 1 - + main_set_length = len(meta['discs'][disc_num]['main_set']) if main_set_length >= 3: main_set = meta['discs'][disc_num]['main_set'][1:-1] @@ -917,63 +1048,64 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): main_set = meta['discs'][disc_num]['main_set'] n = 0 os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - i = 0 + i = 0 if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: i = num_screens console.print('[bold green]Reusing screenshots') else: - if bool(meta.get('ffdebug', False)) == True: + if bool(meta.get('ffdebug', False)) is True: loglevel = 'verbose' debug = False looped = 0 retake = False with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - if not os.path.exists(image) or retake != False: - retake = False - loglevel = 'quiet' - debug = True - if bool(meta.get('debug', False)): - loglevel = 'error' - debug = False - def _is_vob_good(n, loops, num_screens): - voblength = 300 - vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') - vob_mi = json.loads(vob_mi) + TextColumn("[bold green]Saving Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + ss_times = [] + for i in range(num_screens + 1): + if n >= len(main_set): + n = 0 + if n >= num_screens: + n -= num_screens + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + if not os.path.exists(image) or retake is not False: + retake = False + loglevel = 'quiet' + debug = True + if bool(meta.get('debug', False)): + loglevel = 'error' + debug = False + + def _is_vob_good(n, loops, num_screens): + voblength = 300 + vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') + vob_mi = json.loads(vob_mi) + try: + voblength = float(vob_mi['media']['track'][1]['Duration']) + return voblength, n + except Exception: try: - voblength = float(vob_mi['media']['track'][1]['Duration']) + voblength = float(vob_mi['media']['track'][2]['Duration']) return voblength, n except Exception: - try: - voblength = float(vob_mi['media']['track'][2]['Duration']) + n += 1 + if n >= len(main_set): + n = 0 + if n >= num_screens: + n -= num_screens + if loops < 6: + loops = loops + 1 + voblength, n = _is_vob_good(n, loops, num_screens) return voblength, n - except Exception: - n += 1 - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - if loops < 6: - loops = loops + 1 - voblength, n = _is_vob_good(n, loops, num_screens) - return voblength, n - else: - return 300, n + else: + return 300, n try: voblength, n = _is_vob_good(n, 0, num_screens) - img_time = random.randint(round(voblength/5) , round(voblength - voblength/5)) + img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) ss_times = self.valid_ss_time(ss_times, num_screens+1, voblength) ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) if w_sar != 1 or h_sar != 1: @@ -989,7 +1121,7 @@ def _is_vob_good(n, loops, num_screens): console.print(traceback.format_exc()) self.optimize_images(image) n += 1 - try: + try: if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": i += 1 elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: @@ -1015,10 +1147,10 @@ def _is_vob_good(n, loops, num_screens): exit() looped += 1 progress.advance(screen_task) - #remove smallest image + # remove smallest image smallest = "" smallestsize = 99**99 - for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): + for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): screensize = os.path.getsize(screens) if screensize < smallestsize: smallestsize = screensize @@ -1026,11 +1158,22 @@ def _is_vob_good(n, loops, num_screens): os.remove(smallest) def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None): - if num_screens == None: - num_screens = self.screens - len(meta.get('image_list', [])) - if num_screens == 0: - # or len(meta.get('image_list', [])) >= num_screens: + # Ensure the image list is initialized and preserve existing images + if 'image_list' not in meta: + meta['image_list'] = [] + + # Check if there are already at least 3 image links in the image list + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + if len(existing_images) >= 3: + console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + return + + # Determine the number of screenshots to take + if num_screens is None: + num_screens = self.screens - len(existing_images) + if num_screens <= 0: return + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: mi = json.load(f) video_track = mi['media']['track'][1] @@ -1048,7 +1191,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non w_sar = 1 h_sar = sar else: - sar = w_sar = par + sar = w_sar = par h_sar = 1 length = round(float(length)) os.chdir(f"{base_dir}/tmp/{folder_id}") @@ -1059,10 +1202,10 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non else: loglevel = 'quiet' debug = True - if bool(meta.get('ffdebug', False)) == True: + if bool(meta.get('ffdebug', False)) is True: loglevel = 'verbose' debug = False - if meta.get('vapoursynth', False) == True: + if meta.get('vapoursynth', False) is True: from src.vs import vs_screengn vs_screengn(source=path, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: @@ -1076,60 +1219,67 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non ss_times = [] screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) for i in range(num_screens + 1): - image = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image) or retake != False: + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or retake is not False: retake = False try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) + ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) ff = ffmpeg.input(path, ss=ss_times[-1]) if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) ( ff - .output(image, vframes=1, pix_fmt="rgb24") + .output(image_path, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) .run(quiet=debug) ) except Exception: console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 75000: + + self.optimize_images(image_path) + if os.path.getsize(Path(image_path)) <= 75000: console.print("[yellow]Image is incredibly small, retaking") retake = True time.sleep(1) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb" and retake == False: + if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake == False: + elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake == False: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake is False: i += 1 elif self.img_host == "freeimage.host": console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") exit() - elif retake == True: + elif retake is True: pass else: console.print("[red]Image too large for your image host, retaking") retake = True - time.sleep(1) + time.sleep(1) else: i += 1 progress.advance(screen_task) - #remove smallest image - smallest = "" - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): - screensize = os.path.getsize(screens) - if screensize < smallestsize: - smallestsize = screensize - smallest = screens - os.remove(smallest) + + # Add new images to the meta['image_list'] as dictionaries + new_images = glob.glob(f"{filename}-*.png") + for image in new_images: + img_dict = { + 'img_url': image, + 'raw_url': image, + 'web_url': image # Assuming local path, but you might need to update this if uploading + } + meta['image_list'].append(img_dict) + + # Remove the smallest image if there are more than needed + if len(meta['image_list']) > self.screens: + smallest = min(meta['image_list'], key=lambda x: os.path.getsize(x['img_url'])) + os.remove(smallest['img_url']) + meta['image_list'].remove(smallest) def valid_ss_time(self, ss_times, num_screens, length): valid_time = False - while valid_time != True: + while valid_time is not True: valid_time = True if ss_times != []: sst = random.randint(round(length/5), round(length/2)) @@ -1137,26 +1287,27 @@ def valid_ss_time(self, ss_times, num_screens, length): tolerance = length / 10 / num_screens if abs(sst - each) <= tolerance: valid_time = False - if valid_time == True: + if valid_time is True: ss_times.append(sst) else: ss_times.append(random.randint(round(length/5), round(length/2))) return ss_times def optimize_images(self, image): - if self.config['DEFAULT'].get('optimize_images', True) == True: + if self.config['DEFAULT'].get('optimize_images', True) is True: if os.path.exists(image): try: pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng + import oxipng if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: oxipng.optimize(image, level=3) - except: + except Exception: pass return + """ Get type and category """ @@ -1173,7 +1324,7 @@ def get_type(self, video, scene, is_disc): # type = "ENCODE" elif "hdtv" in filename: type = "HDTV" - elif is_disc != None: + elif is_disc is not None: type = "DISC" elif "dvdrip" in filename: console.print("[bold red]DVDRip Detected, exiting") @@ -1186,15 +1337,15 @@ def get_cat(self, video): # if category is None: category = guessit(video.replace('1.0', ''))['type'] if category.lower() == "movie": - category = "MOVIE" #1 + category = "MOVIE" # 1 elif category.lower() in ("tv", "episode"): - category = "TV" #2 + category = "TV" # 2 else: category = "MOVIE" return category async def get_tmdb_from_imdb(self, meta, filename): - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] return meta imdb_id = meta['imdb'] @@ -1204,17 +1355,17 @@ async def get_tmdb_from_imdb(self, meta, filename): info = find.info(external_source="imdb_id") if len(info['movie_results']) >= 1: meta['category'] = "MOVIE" - meta['tmdb'] = info['movie_results'][0]['id'] + meta['tmdb'] = info['movie_results'][0]['id'] elif len(info['tv_results']) >= 1: meta['category'] = "TV" - meta['tmdb'] = info['tv_results'][0]['id'] + meta['tmdb'] = info['tv_results'][0]['id'] else: imdb_info = await self.get_imdb_info(imdb_id.replace('tt', ''), meta) title = imdb_info.get("title") - if title == None: + if title is None: title = filename year = imdb_info.get('year') - if year == None: + if year is None: year = meta['search_year'] console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") meta = await self.get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) @@ -1234,11 +1385,11 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil search.movie(query=filename, year=search_year) elif category == "TV": search.tv(query=filename, first_air_date_year=search_year) - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] else: meta['tmdb'] = search.results[0]['id'] - meta['category'] = category + meta['category'] = category except IndexError: try: if category == "MOVIE": @@ -1257,7 +1408,7 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil meta = await self.get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) elif attempted == 2: attempted += 1 - meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes" : ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) + meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) if meta['tmdb'] in (None, ""): console.print(f"[red]Unable to find TMDb match for {filename}") if meta.get('mode', 'discord') == 'cli': @@ -1268,17 +1419,17 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil return meta return meta - + async def tmdb_other_meta(self, meta): - + if meta['tmdb'] == "0": try: - title = guessit(meta['path'], {"excludes" : ["country", "language"]})['title'].lower() + title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() title = title.split('aka')[0] - meta = await self.get_tmdb_id(guessit(title, {"excludes" : ["country", "language"]})['title'], meta['search_year'], meta) + meta = await self.get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) if meta['tmdb'] == "0": meta = await self.get_tmdb_id(title, "", meta, meta['category']) - except: + except Exception: if meta.get('mode', 'discord') == 'cli': console.print("[bold red]Unable to find tmdb entry. Exiting.") exit() @@ -1290,14 +1441,14 @@ async def tmdb_other_meta(self, meta): response = movie.info() meta['title'] = response['title'] if response['release_date']: - meta['year'] = datetime.strptime(response['release_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = movie.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) @@ -1315,9 +1466,9 @@ async def tmdb_other_meta(self, meta): break except Exception: console.print('[yellow]Unable to grab videos from TMDb.') - + meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1326,7 +1477,7 @@ async def tmdb_other_meta(self, meta): meta['keywords'] = self.get_keywords(movie) meta['genres'] = self.get_genres(response) meta['tmdb_directors'] = self.get_directors(movie) - if meta.get('anime', False) == False: + if meta.get('anime', False) is False: meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) meta['poster'] = response.get('poster_path', "") meta['overview'] = response['overview'] @@ -1337,14 +1488,14 @@ async def tmdb_other_meta(self, meta): response = tv.info() meta['title'] = response['name'] if response['first_air_date']: - meta['year'] = datetime.strptime(response['first_air_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = tv.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) @@ -1365,7 +1516,7 @@ async def tmdb_other_meta(self, meta): # meta['aka'] = f" AKA {response['original_name']}" meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1390,20 +1541,17 @@ async def tmdb_other_meta(self, meta): meta['aka'] = "" if f"({meta['year']})" in meta['aka']: meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() - - - return meta - + return meta def get_keywords(self, tmdb_info): if tmdb_info is not None: tmdb_keywords = tmdb_info.keywords() if tmdb_keywords.get('keywords') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('keywords')] + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] elif tmdb_keywords.get('results') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('results')] - return(', '.join(keywords)) + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] + return (', '.join(keywords)) else: return '' @@ -1411,8 +1559,8 @@ def get_genres(self, tmdb_info): if tmdb_info is not None: tmdb_genres = tmdb_info.get('genres', []) if tmdb_genres is not []: - genres=[f"{genre['name'].replace(',',' ')}" for genre in tmdb_genres] - return(', '.join(genres)) + genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] + return (', '.join(genres)) else: return '' @@ -1439,10 +1587,10 @@ def get_anime(self, response, meta): for each in response['genres']: if each['id'] == 16: animation = True - if response['original_language'] == 'ja' and animation == True: + if response['original_language'] == 'ja' and animation is True: romaji, mal_id, eng_title, season_year, episodes = self.get_romaji(tmdb_name, meta.get('mal', None)) alt_name = f" AKA {romaji}" - + anime = True # mal = AnimeSearch(romaji) # mal_id = mal.results[0].mal_id @@ -1455,7 +1603,7 @@ def get_anime(self, response, meta): return mal_id, alt_name, anime def get_romaji(self, tmdb_name, mal): - if mal == None: + if mal is None: mal = 0 tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") tmdb_name = ' '.join(tmdb_name.split()) @@ -1515,16 +1663,16 @@ def get_romaji(self, tmdb_name, mal): response = requests.post(url, json={'query': query, 'variables': variables}) json = response.json() media = json['data']['Page']['media'] - except: + except Exception: console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') media = [] if media not in (None, []): - result = {'title' : {}} + result = {'title': {}} difference = 0 for anime in media: search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): - if title != None: + if title is not None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) diff = SequenceMatcher(None, title, search_name).ratio() if diff >= difference: @@ -1537,7 +1685,7 @@ def get_romaji(self, tmdb_name, mal): season_year = result.get('season_year', "") episodes = result.get('episodes', 0) else: - romaji = eng_title = season_year = "" + romaji = eng_title = season_year = "" episodes = mal_id = 0 if mal_id in [None, 0]: mal_id = mal @@ -1545,13 +1693,6 @@ def get_romaji(self, tmdb_name, mal): episodes = 0 return romaji, mal_id, eng_title, season_year, episodes - - - - - - - """ Mediainfo/Bdinfo > meta """ @@ -1582,7 +1723,7 @@ def get_audio_v2(self, mi, meta, bdinfo): track = tracks[track_num] if len(tracks) > track_num else {} format = track.get('Format', '') commercial = track.get('Format_Commercial', '') - + if track.get('Language', '') == "zxx": meta['silent'] = True @@ -1591,16 +1732,16 @@ def get_audio_v2(self, mi, meta, bdinfo): format_settings = track.get('Format_Settings', '') if format_settings in ['Explicit']: format_settings = "" - #Channels + # Channels channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) if not str(channels).isnumeric(): channels = mi['media']['track'][track_num]['Channels'] try: channel_layout = mi['media']['track'][track_num]['ChannelLayout'] - except: + except Exception: try: channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] - except: + except Exception: channel_layout = "" # Ensure channel_layout is not None or an empty string before iterating @@ -1613,7 +1754,7 @@ def get_audio_v2(self, mi, meta, bdinfo): chan = f"{int(channels) - 1}.1" else: chan = f"{channels}.0" - + if meta.get('original_language', '') != 'en': eng, orig = False, False try: @@ -1657,10 +1798,10 @@ def get_audio_v2(self, mi, meta, bdinfo): if "commentary" in t.get('Title', '').lower(): has_commentary = True - - #Convert commercial name to naming conventions + + # Convert commercial name to naming conventions audio = { - #Format + # Format "DTS": "DTS", "AAC": "AAC", "AAC LC": "AAC", @@ -1672,16 +1813,16 @@ def get_audio_v2(self, mi, meta, bdinfo): "Vorbis": "VORBIS", "PCM": "LPCM", - #BDINFO AUDIOS - "LPCM Audio" : "LPCM", - "Dolby Digital Audio" : "DD", - "Dolby Digital Plus Audio" : "DD+", + # BDINFO AUDIOS + "LPCM Audio": "LPCM", + "Dolby Digital Audio": "DD", + "Dolby Digital Plus Audio": "DD+", # "Dolby TrueHD" : "TrueHD", - "Dolby TrueHD Audio" : "TrueHD", - "DTS Audio" : "DTS", - "DTS-HD Master Audio" : "DTS-HD MA", - "DTS-HD High-Res Audio" : "DTS-HD HRA", - "DTS:X Master Audio" : "DTS:X" + "Dolby TrueHD Audio": "TrueHD", + "DTS Audio": "DTS", + "DTS-HD Master Audio": "DTS-HD MA", + "DTS-HD High-Res Audio": "DTS-HD HRA", + "DTS:X Master Audio": "DTS:X" } audio_extra = { "XLL": "-HD MA", @@ -1694,20 +1835,19 @@ def get_audio_v2(self, mi, meta, bdinfo): "Atmos Audio": " Atmos", } format_settings_extra = { - "Dolby Surround EX" : "EX" + "Dolby Surround EX": "EX" } commercial_names = { - "Dolby Digital" : "DD", - "Dolby Digital Plus" : "DD+", - "Dolby TrueHD" : "TrueHD", - "DTS-ES" : "DTS-ES", - "DTS-HD High" : "DTS-HD HRA", - "Free Lossless Audio Codec" : "FLAC", - "DTS-HD Master Audio" : "DTS-HD MA" + "Dolby Digital": "DD", + "Dolby Digital Plus": "DD+", + "Dolby TrueHD": "TrueHD", + "DTS-ES": "DTS-ES", + "DTS-HD High": "DTS-HD HRA", + "Free Lossless Audio Codec": "FLAC", + "DTS-HD Master Audio": "DTS-HD MA" } - search_format = True # Ensure commercial and additional are not None before iterating if commercial: @@ -1746,9 +1886,8 @@ def get_audio_v2(self, mi, meta, bdinfo): audio = ' '.join(audio.split()) return audio, chan, has_commentary - def is_3d(self, mi, bdinfo): - if bdinfo != None: + if bdinfo is not None: if bdinfo['video'][0]['3d'] != "": return "3D" else: @@ -1760,7 +1899,7 @@ def get_tag(self, video, meta): try: tag = guessit(video)['release_group'] tag = f"-{tag}" - except: + except Exception: tag = "" if tag == "-": tag = "" @@ -1768,15 +1907,14 @@ def get_tag(self, video, meta): tag = "" return tag - def get_source(self, type, video, path, is_disc, meta): try: try: source = guessit(video)['source'] - except: + except Exception: try: source = guessit(path)['source'] - except: + except Exception: source = "BluRay" if meta.get('manual_source', None): source = meta['manual_source'] @@ -1796,18 +1934,18 @@ def get_source(self, type, video, path, is_disc, meta): system = track.standard if system not in ("PAL", "NTSC"): raise WeirdSystem - except: + except Exception: try: other = guessit(video)['other'] if "PAL" in other: system = "PAL" elif "NTSC" in other: system = "NTSC" - except: + except Exception: system = "" finally: - if system == None: - system = "" + if system is None: + system = "" if type == "REMUX": system = f"{system} DVD".strip() source = system @@ -1833,7 +1971,7 @@ def get_uhd(self, type, guess, resolution, path): try: source = guess['Source'] other = guess['Other'] - except: + except Exception: source = "" other = "" uhd = "" @@ -1843,7 +1981,7 @@ def get_uhd(self, type, guess, resolution, path): uhd = "UHD" elif type in ("DISC", "REMUX", "ENCODE", "WEBRIP"): uhd = "" - + if type in ("DISC", "REMUX", "ENCODE") and resolution == "2160p": uhd = "UHD" @@ -1852,7 +1990,7 @@ def get_uhd(self, type, guess, resolution, path): def get_hdr(self, mi, bdinfo): hdr = "" dv = "" - if bdinfo != None: #Disks + if bdinfo is not None: # Disks hdr_mi = bdinfo['video'][0]['hdr_dv'] if "HDR10+" in hdr_mi: hdr = "HDR10+" @@ -1861,9 +1999,9 @@ def get_hdr(self, mi, bdinfo): try: if bdinfo['video'][1]['hdr_dv'] == "Dolby Vision": dv = "DV" - except: + except Exception: pass - else: + else: video_track = mi['media']['track'][1] try: hdr_mi = video_track['colour_primaries'] @@ -1881,13 +2019,13 @@ def get_hdr(self, mi, bdinfo): hdr = "HLG" if hdr != "HLG" and "BT.2020 (10-bit)" in transfer_characteristics: hdr = "WCG" - except: + except Exception: pass try: if "Dolby Vision" in video_track.get('HDR_Format', '') or "Dolby Vision" in video_track.get('HDR_Format_String', ''): dv = "DV" - except: + except Exception: pass hdr = f"{dv} {hdr}".strip() @@ -1895,68 +2033,68 @@ def get_hdr(self, mi, bdinfo): def get_region(self, bdinfo, region=None): label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') - if region != None: + if region is not None: region = region.upper() - else: + else: regions = { - 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', - 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', - 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', - 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', - 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', - 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', - 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', - 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', - 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', - 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', - 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', - 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', - 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', - 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', - 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', - 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', - 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', - 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', - 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', - 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', - 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', - 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', - 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', - 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', - 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', - 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', - 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', - 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', - 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', - 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', - 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', - 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', - 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', - 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', - 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR" : "EUR" + 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', + 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', + 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', + 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', + 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', + 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', + 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', + 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', + 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', + 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', + 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', + 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', + 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', + 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', + 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', + 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', + 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', + 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', + 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', + 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', + 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', + 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', + 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', + 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', + 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', + 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', + 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', + 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', + 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', + 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', + 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', + 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', + 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', + 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', + 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" } for key, value in regions.items(): if f" {key} " in label: region = value - - if region == None: + + if region is None: region = "" return region def get_distributor(self, distributor_in): distributor_list = [ - '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDÉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', - 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SÉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ÉDITIONS', 'ARTE EDITIONS', 'ARTE VIDÉO', - 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTÖRUNG', - 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ÉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFÓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINÉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINÉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', - 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TÉLÉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', - 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車庫娛樂)', '車庫娛樂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDÉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HÄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', - 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", - 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MÉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', - 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', - 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRÖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', - 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDÉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', - 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VÉRTICE 360º', 'VÉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VÉRTIGO FILMS', 'VÉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', + '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDÉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', + 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SÉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ÉDITIONS', 'ARTE EDITIONS', 'ARTE VIDÉO', + 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTÖRUNG', + 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ÉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFÓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINÉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINÉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', + 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TÉLÉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', + 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車庫娛樂)', '車庫娛樂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDÉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HÄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', + 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", + 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MÉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', + 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', + 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRÖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', + 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDÉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', + 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VÉRTICE 360º', 'VÉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VÉRTIGO FILMS', 'VÉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', 'MASTERS OF CINEMA', 'MOC' ] @@ -1967,13 +2105,12 @@ def get_distributor(self, distributor_in): distributor_out = each return distributor_out - def get_video_codec(self, bdinfo): codecs = { - "MPEG-2 Video" : "MPEG-2", - "MPEG-4 AVC Video" : "AVC", - "MPEG-H HEVC Video" : "HEVC", - "VC-1 Video" : "VC-1" + "MPEG-2 Video": "MPEG-2", + "MPEG-4 AVC Video": "AVC", + "MPEG-H HEVC Video": "HEVC", + "VC-1 Video": "VC-1" } codec = codecs.get(bdinfo['video'][0]['codec'], "") return codec @@ -1989,21 +2126,21 @@ def get_video_encode(self, mi, type, bdinfo): if mi['media']['track'][1].get('Encoded_Library_Settings', None): has_encode_settings = True bit_depth = mi['media']['track'][1].get('BitDepth', '0') - except: + except Exception: format = bdinfo['video'][0]['codec'] format_profile = bdinfo['video'][0]['profile'] - if type in ("ENCODE", "WEBRIP"): #ENCODE or WEBRIP + if type in ("ENCODE", "WEBRIP"): # ENCODE or WEBRIP if format == 'AVC': codec = 'x264' elif format == 'HEVC': codec = 'x265' - elif type in ('WEBDL', 'HDTV'): #WEB-DL + elif type in ('WEBDL', 'HDTV'): # WEB-DL if format == 'AVC': codec = 'H.264' elif format == 'HEVC': codec = 'H.265' - - if type == 'HDTV' and has_encode_settings == True: + + if type == 'HDTV' and has_encode_settings is True: codec = codec.replace('H.', 'x') elif format == "VP9": codec = "VP9" @@ -2019,16 +2156,15 @@ def get_video_encode(self, mi, type, bdinfo): video_codec = f"MPEG-{mi['media']['track'][1].get('Format_Version')}" return video_encode, video_codec, has_encode_settings, bit_depth - def get_edition(self, video, bdinfo, filelist, manual_edition): if video.lower().startswith('dc'): video = video.replace('dc', '', 1) - + guess = guessit(video) tag = guess.get('release_group', 'NOGROUP') repack = "" edition = "" - + if bdinfo is not None: try: edition = guessit(bdinfo['label'])['edition'] @@ -2041,10 +2177,10 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): except Exception as e: print(f"Video Edition Guess Error: {e}") edition = "" - + if isinstance(edition, list): edition = " ".join(edition) - + if len(filelist) == 1: video = os.path.basename(video) @@ -2055,9 +2191,9 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): if manual_edition: edition = str(manual_edition) - + print(f"Edition After Manual Edition: {edition}") - + if "REPACK" in edition.upper() or "V2" in video: repack = "REPACK" if "REPACK2" in edition.upper() or "V3" in video: @@ -2068,16 +2204,16 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): repack = "PROPER" if "RERIP" in edition.upper(): repack = "RERIP" - + print(f"Repack after Checks: {repack}") - + # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: edition = "" - + return edition, repack """ @@ -2171,7 +2307,7 @@ def create_torrent(self, meta, path, output_filename): if meta['is_disc']: include, exclude = "", "" else: - exclude = ["*.*", "*sample.mkv", "!sample*.*"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] include = ["*.mkv", "*.mp4", "*.ts"] # Create and write the new torrent using the CustomTorrent class @@ -2197,7 +2333,7 @@ def create_torrent(self, meta, path, output_filename): console.print("[bold green].torrent created", end="\r") return torrent - + def torf_cb(self, torrent, filepath, pieces_done, pieces_total): # print(f'{pieces_done/pieces_total*100:3.0f} % done') cli_ui.info_progress("Hashing...", pieces_done, pieces_total) @@ -2216,7 +2352,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" - #Remove Un-whitelisted info from torrent + # Remove Un-whitelisted info from torrent for each in list(base_torrent.metainfo['info']): if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): base_torrent.metainfo['info'].pop(each, None) @@ -2227,7 +2363,6 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): base_torrent.private = True Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) - """ Upload Screenshots """ @@ -2367,7 +2502,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # If we broke out of the loop due to a failure, switch to the next host and retry img_host_num += 1 if img_host_num > len(self.config['DEFAULT']) - 1: - console.print(f"[red]All image hosts failed. Unable to complete uploads.") + console.print("[red]All image hosts failed. Unable to complete uploads.") return image_list, i # Or you could raise an exception if preferred img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] @@ -2391,7 +2526,7 @@ async def imgbox_upload(self, chdir, image_glob): async def get_name(self, meta): type = meta.get('type', "") - title = meta.get('title',"") + title = meta.get('title', "") alt_title = meta.get('aka', "") year = meta.get('year', "") resolution = meta.get('resolution', "") @@ -2409,7 +2544,7 @@ async def get_name(self, meta): uhd = meta.get('uhd', "") hdr = meta.get('hdr', "") episode_title = meta.get('episode_title', '') - if meta.get('is_disc', "") == "BDMV": #Disk + if meta.get('is_disc', "") == "BDMV": # Disk video_codec = meta.get('video_codec', "") region = meta.get('region', "") elif meta.get('is_disc', "") == "DVD": @@ -2425,11 +2560,11 @@ async def get_name(self, meta): year = meta['year'] else: year = "" - if meta.get('no_season', False) == True: + if meta.get('no_season', False) is True: season = '' - if meta.get('no_year', False) == True: + if meta.get('no_year', False) is True: year = '' - if meta.get('no_aka', False) == True: + if meta.get('no_aka', False) is True: alt_title = '' if meta['debug']: console.log("[cyan]get_name cat/type") @@ -2438,38 +2573,38 @@ async def get_name(self, meta): console.log("[cyan]get_name meta:") console.log(meta) - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif meta['is_disc'] == 'DVD': + elif meta['is_disc'] == 'DVD': name = f"{title} {alt_title} {year} {edition} {repack} {source} {dvd_size} {audio}" potential_missing = ['edition', 'distributor'] elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay/HDDVD Remux - name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay/HDDVD Remux + name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): #DVD Remux - name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" + elif type == "ENCODE": # Encode + name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {year} {alt_title} {season}{episode} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] @@ -2479,29 +2614,28 @@ async def get_name(self, meta): elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" #SOURCE + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" #SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" #SOURCE + elif type == "ENCODE": # Encode + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - - try: + try: name = ' '.join(name.split()) - except: + except Exception: console.print("[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") console.print(f"--category [yellow]{meta['category']}") console.print(f"--type [yellow]{meta['type']}") @@ -2513,15 +2647,12 @@ async def get_name(self, meta): clean_name = self.clean_filename(name) return name_notag, name, clean_name, potential_missing - - - async def get_season_episode(self, video, meta): if meta['category'] == 'TV': filelist = meta['filelist'] meta['tv_pack'] = 0 is_daily = False - if meta['anime'] == False: + if meta['anime'] is False: try: if meta.get('manual_date'): raise ManualDateException @@ -2531,13 +2662,13 @@ async def get_season_episode(self, video, meta): guess_year = "" if guessit(video)["season"] == guess_year: if f"s{guessit(video)['season']}" in video.lower(): - season_int = str(guessit(video)["season"]) + season_int = str(guessit(video)["season"]) season = "S" + season_int.zfill(2) else: season_int = "1" season = "S01" else: - season_int = str(guessit(video)["season"]) + season_int = str(guessit(video)["season"]) season = "S" + season_int.zfill(2) except Exception: @@ -2554,11 +2685,11 @@ async def get_season_episode(self, video, meta): season_int = "1" season = "S01" try: - if is_daily != True: + if is_daily is not True: episodes = "" if len(filelist) == 1: episodes = guessit(video)['episode'] - if type(episodes) == list: + if isinstance(episodes, list): episode = "" for item in guessit(video)["episode"]: ep = (str(item).zfill(2)) @@ -2576,7 +2707,7 @@ async def get_season_episode(self, video, meta): episode_int = "0" meta['tv_pack'] = 1 else: - #If Anime + # If Anime parsed = anitopy.parse(Path(video).name) romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) if mal_id: @@ -2681,39 +2812,28 @@ async def get_season_episode(self, video, meta): console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") await asyncio.sleep(3) - # try: - # version = parsed['release_version'] - # if int(version) == 2: - # meta['repack'] = "REPACK" - # elif int(version) > 2: - # meta['repack'] = f"REPACK{int(version) - 1}" - # # version = f"v{version}" - # except Exception: - # # version = "" - # pass - - if meta.get('manual_season', None) == None: + + if meta.get('manual_season', None) is None: meta['season'] = season else: season_int = meta['manual_season'].lower().replace('s', '') meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" - if meta.get('manual_episode', None) == None: + if meta.get('manual_episode', None) is None: meta['episode'] = episode else: episode_int = meta['manual_episode'].lower().replace('e', '') meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" meta['tv_pack'] = 0 - + # if " COMPLETE " in Path(video).name.replace('.', ' '): # meta['season'] = "COMPLETE" meta['season_int'] = season_int meta['episode_int'] = episode_int - - meta['episode_title_storage'] = guessit(video,{"excludes" : "part"}).get('episode_title', '') + meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') if meta['season'] == "S00" or meta['episode'] == "E00": meta['episode_title'] = meta['episode_title_storage'] - + # Guess the part of the episode (if available) meta['part'] = "" if meta['tv_pack'] == 1: @@ -2722,62 +2842,60 @@ async def get_season_episode(self, video, meta): return meta - def get_service(self, video, tag, audio, guess_title): service = guessit(video).get('streaming_service', "") services = { - '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', - 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', - 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', - 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', - 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', - 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', - 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', - 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', - 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', - 'CRIT': 'CRIT', 'Criterion' : 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', - 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', - 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', - 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', - 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', - 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', - 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', - 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', - 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', - 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi' : 'FUNI', - 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', - 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', - 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR' : 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', - 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', - 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy' : 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', - 'MA' : 'MA', 'Movies Anywhere' : 'MA', 'MAX' : 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', - 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', - 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', - 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', - 'PMNT': 'PMNT', 'PMTP' : 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', - 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST' : 'SKST', 'SkyShowtime': 'SKST', - 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', - 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+' : 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', - 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', - 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', - 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', - 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', - 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', + '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', + 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', + 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', + 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', + 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', + 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', + 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', + 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', + 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', + 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', + 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', + 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', + 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', + 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', + 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', + 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', + 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', + 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', + 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', + 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', + 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', + 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', + 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', + 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', + 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', + 'MA': 'MA', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', + 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', + 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', + 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', + 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', + 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', + 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', + 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', + 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', + 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', + 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', + 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', + 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', + 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', + 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', + 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' } - - + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): - if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes" : ["country", "language"]}).get('title', ''): + if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): service = value elif key == service: service = value @@ -2789,10 +2907,8 @@ def get_service(self, video, tag, audio, guess_title): service_longname = "Amazon" return service, service_longname - - def stream_optimized(self, stream_opt): - if stream_opt == True: + if stream_opt is True: stream = 1 else: stream = 0 @@ -2803,22 +2919,22 @@ def is_anon(self, anon_in): if anon.lower() == "true": console.print("[bold red]Global ANON has been removed in favor of per-tracker settings. Please update your config accordingly.") time.sleep(10) - if anon_in == True: + if anon_in is True: anon_out = 1 else: anon_out = 0 return anon_out async def upload_image(self, session, url, data, headers, files): - if headers == None and files == None: + if headers is None and files is None: async with session.post(url=url, data=data) as resp: response = await resp.json() return response - elif headers == None and files != None: + elif headers is None and files is not None: async with session.post(url=url, data=data, files=files) as resp: response = await resp.json() return response - elif headers != None and files == None: + elif headers is not None and files is None: async with session.post(url=url, data=data, headers=headers) as resp: response = await resp.json() return response @@ -2826,46 +2942,50 @@ async def upload_image(self, session, url, data, headers, files): async with session.post(url=url, data=data, headers=headers, files=files) as resp: response = await resp.json() return response - - + def clean_filename(self, name): - invalid = '<>:"/\|?*' + invalid = '<>:"/\\|?*' for char in invalid: name = name.replace(char, '-') return name - async def gen_desc(self, meta): + desclink = meta.get('desclink', None) descfile = meta.get('descfile', None) ptp_desc = blu_desc = "" desc_source = [] + imagelist = [] with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp_manual') != None: + if meta.get('ptp_manual') is not None: desc_source.append('PTP') - if meta.get('blu_manual') != None: + if meta.get('blu_manual') is not None: desc_source.append('BLU') if len(desc_source) != 1: desc_source = None else: desc_source = desc_source[0] - if meta.get('ptp', None) != None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: + if meta.get('ptp', None) is not None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: + if meta.get('skip_gen_desc', False): + console.print("[cyan]Skipping description generation as PTP description was retained.") + return meta ptp = PTP(config=self.config) - ptp_desc = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) + ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) if ptp_desc.replace('\r\n', '').replace('\n', '').strip() != "": description.write(ptp_desc) description.write("\n") meta['description'] = 'PTP' + meta['imagelist'] = imagelist # Save the imagelist to meta if needed if ptp_desc == "" and meta.get('blu_desc', '').rstrip() not in [None, ''] and desc_source in ['BLU', None]: if meta.get('blu_desc', '').strip().replace('\r\n', '').replace('\n', '') != '': description.write(meta['blu_desc']) meta['description'] = 'BLU' - if meta.get('desc_template', None) != None: + if meta.get('desc_template', None) is not None: from jinja2 import Template with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: desc_templater = Template(f.read()) @@ -2874,14 +2994,15 @@ async def gen_desc(self, meta): description.write(template_desc) description.write("\n") - if meta['nfo'] != False: + if meta['nfo'] is not False: description.write("[code]") nfo = glob.glob("*.nfo")[0] description.write(open(nfo, 'r', encoding="utf-8").read()) description.write("[/code]") description.write("\n") meta['description'] = "CUSTOM" - if desclink != None: + + if desclink is not None: parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) split = os.path.split(parsed.path) if split[0] != '/': @@ -2892,24 +3013,26 @@ async def gen_desc(self, meta): description.write(requests.get(raw).text) description.write("\n") meta['description'] = "CUSTOM" - - if descfile != None: - if os.path.isfile(descfile) == True: + + if descfile is not None: + if os.path.isfile(descfile): text = open(descfile, 'r').read() description.write(text) - meta['description'] = "CUSTOM" - if meta['desc'] != None: + meta['description'] = "CUSTOM" + + if meta['desc'] is not None: description.write(meta['desc']) description.write("\n") meta['description'] = "CUSTOM" + description.write("\n") return meta - + async def tag_override(self, meta): with open(f"{meta['base_dir']}/data/tags.json", 'r', encoding="utf-8") as f: tags = json.load(f) f.close() - + for tag in tags: value = tags.get(tag) if value.get('in_name', "") == tag and tag in meta['path']: @@ -2928,7 +3051,6 @@ async def tag_override(self, meta): else: meta[key] = value.get(key) return meta - async def package(self, meta): if meta['tag'] == "": @@ -2952,7 +3074,7 @@ async def package(self, meta): generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): - if meta.get('rehosted_poster', None) == None: + if meta.get('rehosted_poster', None) is None: r = requests.get(meta['poster'], stream=True) if r.status_code == 200: console.print("[bold yellow]Rehosting Poster") @@ -2963,23 +3085,23 @@ async def package(self, meta): poster = poster[0] generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: json.dump(meta, metafile, indent=4) metafile.close() else: console.print("[bold yellow]Poster could not be retrieved") - elif os.path.exists(poster_img) and meta.get('rehosted_poster') != None: + elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") if len(meta['image_list']) > 0: - generic.write(f"\nImage Webpage:\n") + generic.write("\nImage Webpage:\n") for each in meta['image_list']: generic.write(f"{each['web_url']}\n") - generic.write(f"\nThumbnail Image:\n") + generic.write("\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" - torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") + torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: for each in torrent_files: if not each.startswith(('BASE', '[RAND')): @@ -2992,12 +3114,12 @@ async def package(self, meta): # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") - if filebrowser != None: + if filebrowser is not None: url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) url = urllib.parse.quote(url, safe="https://") else: files = { - "files[]" : (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) + "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) } response = requests.post("https://uguu.se/upload.php", files=files).json() if meta['debug']: @@ -3006,14 +3128,14 @@ async def package(self, meta): return url except Exception: return False - return + return async def get_imdb_aka(self, imdb_id): if imdb_id == "0": return "", None ia = Cinemagoer() result = ia.get_movie(imdb_id.replace('tt', '')) - + original_language = result.get('language codes') if isinstance(original_language, list): if len(original_language) > 1: @@ -3039,7 +3161,6 @@ async def get_dvd_size(self, discs): dvd_sizes.sort() compact = " ".join(dvd_sizes) return compact - def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): if not is_disc: @@ -3048,7 +3169,7 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd for each in extra: if each.lower().startswith('tmdb'): parser = Args(config=self.config) - category, tmdbid = parser.parse_tmdb_id(id = extra[each], category=category) + category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) if each.lower().startswith('imdb'): try: imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) @@ -3056,7 +3177,6 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd pass return category, tmdbid, imdbid - def daily_to_tmdb_season_episode(self, tmdbid, date): show = tmdb.TV(tmdbid) seasons = show.info().get('seasons') @@ -3076,9 +3196,6 @@ def daily_to_tmdb_season_episode(self, tmdbid, date): console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") return season, episode - - - async def get_imdb_info(self, imdbID, meta): imdb_info = {} if int(str(imdbID).replace('tt', '')) != 0: @@ -3107,18 +3224,17 @@ async def get_imdb_info(self, imdbID, meta): imdb_info['directors'].append(f"nm{director.getID()}") else: imdb_info = { - 'title' : meta['title'], - 'year' : meta['year'], - 'aka' : '', - 'type' : None, - 'runtime' : meta.get('runtime', '60'), - 'cover' : meta.get('poster'), + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] return imdb_info - async def search_imdb(self, filename, search_year): imdbID = '0' @@ -3130,7 +3246,6 @@ async def search_imdb(self, filename, search_year): imdbID = str(movie.movieID).replace('tt', '') return imdbID - async def imdb_other_meta(self, meta): imdb_info = meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) meta['title'] = imdb_info['title'] @@ -3152,49 +3267,49 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID): tvmazeID = 0 lookup = False show = None - if imdbID == None: + if imdbID is None: imdbID = '0' - if tvdbID == None: + if tvdbID is None: tvdbID = 0 if int(tvdbID) != 0: params = { - "thetvdb" : tvdbID + "thetvdb": tvdbID } url = "https://api.tvmaze.com/lookup/shows" lookup = True elif int(imdbID) != 0: params = { - "imdb" : f"tt{imdbID}" + "imdb": f"tt{imdbID}" } url = "https://api.tvmaze.com/lookup/shows" lookup = True else: params = { - "q" : filename + "q": filename } - url = f"https://api.tvmaze.com/search/shows" + url = "https://api.tvmaze.com/search/shows" resp = requests.get(url=url, params=params) if resp.ok: resp = resp.json() - if resp == None: + if resp is None: return tvmazeID, imdbID, tvdbID - if lookup == True: + if lookup is True: show = resp else: if year not in (None, ''): for each in resp: premier_date = each['show'].get('premiered', '') - if premier_date != None: + if premier_date is not None: if premier_date.startswith(str(year)): show = each['show'] elif len(resp) >= 1: show = resp[0]['show'] - if show != None: + if show is not None: tvmazeID = show.get('id') if int(imdbID) == 0: - if show.get('externals', {}).get('imdb', '0') != None: + if show.get('externals', {}).get('imdb', '0') is not None: imdbID = str(show.get('externals', {}).get('imdb', '0')).replace('tt', '') if int(tvdbID) == 0: - if show.get('externals', {}).get('tvdb', '0') != None: + if show.get('externals', {}).get('tvdb', '0') is not None: tvdbID = show.get('externals', {}).get('tvdb', '0') - return tvmazeID, imdbID, tvdbID \ No newline at end of file + return tvmazeID, imdbID, tvdbID diff --git a/src/search.py b/src/search.py index 8e782ee7e..f8d7324c9 100644 --- a/src/search.py +++ b/src/search.py @@ -1,8 +1,8 @@ import platform -import asyncio import os from src.console import console + class Search(): """ Logic for searching @@ -11,7 +11,6 @@ def __init__(self, config): self.config = config pass - async def searchFile(self, filename): os_info = platform.platform() filename = filename.lower() @@ -21,6 +20,7 @@ async def searchFile(self, filename): return file_found = False words = filename.split() + async def search_file(search_dir): files_total_search = [] console.print(f"Searching {search_dir}") @@ -31,7 +31,7 @@ async def search_file(search_dir): os_info = platform.platform() if await self.file_search(l_name, words): file_found = True - if('Windows' in os_info): + if ('Windows' in os_info): files_total_search.append(root+'\\'+name) else: files_total_search.append(root+'/'+name) @@ -54,6 +54,7 @@ async def searchFolder(self, foldername): return folders_found = False words = foldername.split() + async def search_dir(search_dir): console.print(f"Searching {search_dir}") folders_total_search = [] @@ -66,27 +67,28 @@ async def search_dir(search_dir): if await self.file_search(l_name, words): folder_found = True - if('Windows' in os_info): + if ('Windows' in os_info): folders_total_search.append(root+'\\'+name) else: folders_total_search.append(root+'/'+name) - + return folders_total_search config_dir = self.config['DISCORD']['search_dir'] if isinstance(config_dir, list): for each in config_dir: folders = await search_dir(each) - + folders_total = folders_total + folders else: folders_total = await search_dir(config_dir) return folders_total return folders_total + async def file_search(self, name, name_words): check = True for word in name_words: if word not in name: check = False break - return check \ No newline at end of file + return check diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 194a2d0a2..18a5fc7df 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -9,7 +9,6 @@ from src.console import console - class ACM(): """ Edit for Tracker: @@ -19,12 +18,6 @@ class ACM(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ACM' @@ -34,15 +27,15 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(category_name, '0') return category_id - async def get_type (self, meta): + async def get_type(self, meta): if meta['is_disc'] == "BDMV": bdinfo = meta['bdinfo'] bd_sizes = [25, 50, 66, 100] @@ -60,7 +53,7 @@ async def get_type (self, meta): if "DVD5" in meta['dvd_size']: type_string = "DVD 5" elif "DVD9" in meta['dvd_size']: - type_string = "DVD 9" + type_string = "DVD 9" else: if meta['type'] == "REMUX": if meta['source'] == "BluRay": @@ -73,18 +66,18 @@ async def get_type (self, meta): # acceptable_res = ["2160p", "1080p", "1080i", "720p", "576p", "576i", "540p", "480p", "Other"] # if meta['resolution'] in acceptable_res: # type_id = meta['resolution'] - # else: + # else: # type_id = "Other" return type_string async def get_type_id(self, type): type_id = { - 'UHD 100': '1', + 'UHD 100': '1', 'UHD 66': '2', 'UHD 50': '3', 'UHD REMUX': '12', 'BD 50': '4', - 'BD 25': '5', + 'BD 25': '5', 'DVD 5': '14', 'REMUX': '7', 'WEBDL': '9', @@ -96,68 +89,68 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { - '2160p': '1', + '2160p': '1', '1080p': '2', - '1080i':'2', - '720p': '3', - '576p': '4', + '1080i': '2', + '720p': '3', + '576p': '4', '576i': '4', - '480p': '5', + '480p': '5', '480i': '5' }.get(resolution, '10') - return resolution_id + return resolution_id - #ACM rejects uploads with more that 4 keywords + # ACM rejects uploads with more that 4 keywords async def get_keywords(self, keywords): - if keywords !='': - keywords_list = keywords.split(',') + if keywords != '': + keywords_list = keywords.split(',') keywords_list = [keyword for keyword in keywords_list if " " not in keyword][:4] - keywords = ', '.join( keywords_list) + keywords = ', '.join(keywords_list) return keywords def get_subtitles(self, meta): sub_lang_map = { - ("Arabic", "ara", "ar") : 'Ara', - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 'Por-BR', - ("Bulgarian", "bul", "bg") : 'Bul', - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 'Chi', - ("Croatian", "hrv", "hr", "scr") : 'Cro', - ("Czech", "cze", "cz", "cs") : 'Cze', - ("Danish", "dan", "da") : 'Dan', - ("Dutch", "dut", "nl") : 'Dut', - ("English", "eng", "en", "English (CC)", "English - SDH") : 'Eng', - ("English - Forced", "English (Forced)", "en (Forced)") : 'Eng', - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 'Eng', - ("Estonian", "est", "et") : 'Est', - ("Finnish", "fin", "fi") : 'Fin', - ("French", "fre", "fr") : 'Fre', - ("German", "ger", "de") : 'Ger', - ("Greek", "gre", "el") : 'Gre', - ("Hebrew", "heb", "he") : 'Heb', - ("Hindi" "hin", "hi") : 'Hin', - ("Hungarian", "hun", "hu") : 'Hun', - ("Icelandic", "ice", "is") : 'Ice', - ("Indonesian", "ind", "id") : 'Ind', - ("Italian", "ita", "it") : 'Ita', - ("Japanese", "jpn", "ja") : 'Jpn', - ("Korean", "kor", "ko") : 'Kor', - ("Latvian", "lav", "lv") : 'Lav', - ("Lithuanian", "lit", "lt") : 'Lit', - ("Norwegian", "nor", "no") : 'Nor', - ("Persian", "fa", "far") : 'Per', - ("Polish", "pol", "pl") : 'Pol', - ("Portuguese", "por", "pt") : 'Por', - ("Romanian", "rum", "ro") : 'Rom', - ("Russian", "rus", "ru") : 'Rus', - ("Serbian", "srp", "sr", "scc") : 'Ser', - ("Slovak", "slo", "sk") : 'Slo', - ("Slovenian", "slv", "sl") : 'Slv', - ("Spanish", "spa", "es") : 'Spa', - ("Swedish", "swe", "sv") : 'Swe', - ("Thai", "tha", "th") : 'Tha', - ("Turkish", "tur", "tr") : 'Tur', - ("Ukrainian", "ukr", "uk") : 'Ukr', - ("Vietnamese", "vie", "vi") : 'Vie', + ("Arabic", "ara", "ar"): 'Ara', + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 'Por-BR', + ("Bulgarian", "bul", "bg"): 'Bul', + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 'Chi', + ("Croatian", "hrv", "hr", "scr"): 'Cro', + ("Czech", "cze", "cz", "cs"): 'Cze', + ("Danish", "dan", "da"): 'Dan', + ("Dutch", "dut", "nl"): 'Dut', + ("English", "eng", "en", "English (CC)", "English - SDH"): 'Eng', + ("English - Forced", "English (Forced)", "en (Forced)"): 'Eng', + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 'Eng', + ("Estonian", "est", "et"): 'Est', + ("Finnish", "fin", "fi"): 'Fin', + ("French", "fre", "fr"): 'Fre', + ("German", "ger", "de"): 'Ger', + ("Greek", "gre", "el"): 'Gre', + ("Hebrew", "heb", "he"): 'Heb', + ("Hindi" "hin", "hi"): 'Hin', + ("Hungarian", "hun", "hu"): 'Hun', + ("Icelandic", "ice", "is"): 'Ice', + ("Indonesian", "ind", "id"): 'Ind', + ("Italian", "ita", "it"): 'Ita', + ("Japanese", "jpn", "ja"): 'Jpn', + ("Korean", "kor", "ko"): 'Kor', + ("Latvian", "lav", "lv"): 'Lav', + ("Lithuanian", "lit", "lt"): 'Lit', + ("Norwegian", "nor", "no"): 'Nor', + ("Persian", "fa", "far"): 'Per', + ("Polish", "pol", "pl"): 'Pol', + ("Portuguese", "por", "pt"): 'Por', + ("Romanian", "rum", "ro"): 'Rom', + ("Russian", "rus", "ru"): 'Rus', + ("Serbian", "srp", "sr", "scc"): 'Ser', + ("Slovak", "slo", "sk"): 'Slo', + ("Slovenian", "slv", "sl"): 'Slv', + ("Spanish", "spa", "es"): 'Spa', + ("Swedish", "swe", "sv"): 'Swe', + ("Thai", "tha", "th"): 'Tha', + ("Turkish", "tur", "tr"): 'Tur', + ("Ukrainian", "ukr", "uk"): 'Ukr', + ("Vietnamese", "vie", "vi"): 'Vie', } sub_langs = [] @@ -179,12 +172,12 @@ def get_subtitles(self, meta): for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) - - # if sub_langs == []: + + # if sub_langs == []: # sub_langs = [44] # No Subtitle return sub_langs - def get_subs_tag(self, subs): + def get_subs_tag(self, subs): if subs == []: return ' [No subs]' elif 'Eng' in subs: @@ -193,10 +186,6 @@ def get_subs_tag(self, subs): return ' [No Eng subs]' return f" [{subs[0]} subs only]" - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -207,48 +196,48 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: # bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() mi_dump = None bd_dump = "" for each in meta['discs']: bd_dump = bd_dump + each['summary'].strip() + "\n\n" - else: + else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : acm_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : await self.get_keywords(meta['keywords']), - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': acm_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': await self.get_keywords(meta['keywords']), + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 if region_id != 0: @@ -262,33 +251,29 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(await self.get_type(meta)), + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(await self.get_type(meta)), # A majority of the ACM library doesn't contain resolution information # 'resolutions[]' : await self.get_res_id(meta['resolution']), # 'name' : "" @@ -302,7 +287,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -328,7 +313,7 @@ async def edit_name(self, meta): if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' - name = name.replace (aka, f' / {original_title} {chr(int("202A", 16))}') + name = name.replace(aka, f' / {original_title} {chr(int("202A", 16))}') elif aka == '': if meta.get('title') != original_title: # name = f'{name[:name.find(year)]}/ {original_title} {chr(int("202A", 16))}{name[name.find(year):]}' @@ -336,20 +321,18 @@ async def edit_name(self, meta): if 'AAC' in audio: name = name.replace(audio.strip().replace(" ", " "), audio.replace("AAC ", "AAC")) name = name.replace("DD+ ", "DD+") - name = name.replace ("UHD BluRay REMUX", "Remux") - name = name.replace ("BluRay REMUX", "Remux") - name = name.replace ("H.265", "HEVC") + name = name.replace("UHD BluRay REMUX", "Remux") + name = name.replace("BluRay REMUX", "Remux") + name = name.replace("H.265", "HEVC") if is_disc == 'DVD': - name = name.replace (f'{source} DVD5', f'{resolution} DVD {source}') - name = name.replace (f'{source} DVD9', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD5', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD9', f'{resolution} DVD {source}') if audio == meta.get('channels'): - name = name.replace (f'{audio}', f'MPEG {audio}') + name = name.replace(f'{audio}', f'MPEG {audio}') name = name + self.get_subs_tag(subs) return name - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: @@ -380,14 +363,14 @@ async def edit_desc(self, meta): desc = desc.replace('[img]', '[img=300]') descfile.write(desc) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() return diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 382ae92d2..a9d485699 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -2,15 +2,14 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class AITHER(): """ Edit for Tracker: @@ -25,12 +24,12 @@ def __init__(self, config): self.source_flag = 'Aither' self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', 'Will1869', 'x0r', 'YIFY'] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -39,11 +38,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -53,28 +52,28 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' @@ -84,34 +83,32 @@ async def upload(self, meta): } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): aither_name = meta['name'] has_eng_audio = False if meta['is_disc'] != "BDMV": with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) - + for track in mi['media']['track']: if track['@type'] == "Audio": if track.get('Language', 'None').startswith('en'): @@ -134,17 +131,17 @@ async def edit_name(self, meta): async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' }.get(type, '0') @@ -152,40 +149,36 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' }.get(resolution, '10') return resolution_id - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" - + try: response = requests.get(url=self.search_url, params=params) response = response.json() @@ -194,8 +187,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 4aeb241e6..2fcbdd603 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -11,7 +11,7 @@ from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console -from datetime import datetime, date + class ANT(): """ @@ -22,25 +22,21 @@ class ANT(): Upload """ - ############################################################### - # ####### EDIT ME ##### # - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ANT' self.source_flag = 'ANT' self.search_url = 'https://anthelion.me/api.php' self.upload_url = 'https://anthelion.me/api.php' - self.banned_groups = ['3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', - 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', - 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', - 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', - 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', - 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', - 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = [ + '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', + 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', + 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', + 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', + 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', + 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', + 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT' + ] self.signature = None pass @@ -66,10 +62,6 @@ async def get_flags(self, meta): flags.append('Remux') return flags - ############################################################### - # #### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ### # - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) torrent_filename = "BASE" @@ -90,7 +82,7 @@ def calculate_pieces_and_file_size(total_size, piece_size): console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - + # Call create_torrent with the default piece size calculation prep.create_torrent(meta, Path(meta['path']), "ANT") torrent_filename = "ANT" @@ -137,7 +129,7 @@ def calculate_pieces_and_file_size(total_size, piece_size): headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - + try: if not meta['debug']: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) @@ -184,4 +176,4 @@ async def search_existing(self, meta): console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d9e73acdf..66dfbaa61 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -4,13 +4,13 @@ import requests from difflib import SequenceMatcher from str2bool import str2bool -import urllib import os import platform from src.trackers.COMMON import COMMON from src.console import console + class BHD(): """ Edit for Tracker: @@ -24,10 +24,10 @@ def __init__(self, config): self.tracker = 'BHD' self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' - self.signature = f"\n[center][url=https://beyond-hd.me/forums/topic/toolpython-l4gs-upload-assistant.5456]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://beyond-hd.me/forums/topic/toolpython-l4gs-upload-assistant.5456/post/138087#post-138087]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -39,37 +39,37 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - - if meta['bdinfo'] != None: + + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') - + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { - 'mediainfo' : mi_dump, + 'mediainfo': mi_dump, } if os.path.exists(torrent_file): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files['file'] = open_torrent.read() open_torrent.close() - + data = { - 'name' : bhd_name, - 'category_id' : cat_id, - 'type' : type_id, + 'name': bhd_name, + 'category_id': cat_id, + 'type': type_id, 'source': source_id, - 'imdb_id' : meta['imdb_id'].replace('tt', ''), - 'tmdb_id' : meta['tmdb'], - 'description' : desc, - 'anon' : anon, - 'sd' : meta.get('sd', 0), - 'live' : draft + 'imdb_id': meta['imdb_id'].replace('tt', ''), + 'tmdb_id': meta['tmdb'], + 'description': desc, + 'anon': anon, + 'sd': meta.get('sd', 0), + 'live': draft # 'internal' : 0, # 'featured' : 0, # 'free' : 0, @@ -77,17 +77,17 @@ async def upload(self, meta): # 'sticky' : 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('tv_pack', 0) == 1: data['pack'] = 1 if meta.get('season', None) == "S00": data['special'] = 1 if meta.get('region', "") != "": data['region'] = meta['region'] - if custom == True: + if custom is True: data['custom_edition'] = edition elif edition != "": data['edition'] = edition @@ -96,9 +96,9 @@ async def upload(self, meta): headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - + url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: response = response.json() @@ -112,39 +112,33 @@ async def upload(self, meta): elif response['satus_message'].startswith('Invalid name value'): console.print(f"[bold yellow]Submitted Name: {bhd_name}") console.print(response) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - - - - - - async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(category_name, '1') return category_id async def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HDDVD" : "HD-DVD", - "HD DVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - "UHDTV" : "HDTV", - "NTSC" : "DVD", "NTSC DVD" : "DVD", - "PAL" : "DVD", "PAL DVD": "DVD", + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HDDVD": "HD-DVD", + "HD DVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + "UHDTV": "HDTV", + "NTSC": "DVD", "NTSC DVD": "DVD", + "PAL": "DVD", "PAL DVD": "DVD", } - + source_id = sources.get(source) return source_id @@ -166,7 +160,7 @@ async def get_type(self, meta): if "DVD5" in meta['dvd_size']: type_id = "DVD 5" elif "DVD9" in meta['dvd_size']: - type_id = "DVD 9" + type_id = "DVD 9" else: if meta['type'] == "REMUX": if meta['source'] == "BluRay": @@ -185,8 +179,6 @@ async def get_type(self, meta): type_id = "Other" return type_id - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: @@ -210,7 +202,7 @@ async def edit_desc(self, meta): desc.write("\n") desc.write(base.replace("[img]", "[img width=300]")) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: desc.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] @@ -220,8 +212,6 @@ async def edit_desc(self, meta): desc.write(self.signature) desc.close() return - - async def search_existing(self, meta): dupes = [] @@ -230,9 +220,9 @@ async def search_existing(self, meta): if category == 'MOVIE': category = "Movies" data = { - 'tmdb_id' : meta['tmdb'], - 'categories' : category, - 'types' : await self.get_type(meta), + 'tmdb_id': meta['tmdb'], + 'categories': category, + 'types': await self.get_type(meta), } # Search all releases if SD if meta['sd'] == 1: @@ -254,16 +244,16 @@ async def search_existing(self, meta): dupes.append(result) else: console.print(f"[yellow]{response.get('status_message')}") - await asyncio.sleep(5) - except: + await asyncio.sleep(5) + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Most likely the site is down.') await asyncio.sleep(5) return dupes - async def get_live(self, meta): + async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(str2bool(str(draft))) #0 for send to draft, 1 for live + draft = bool(str2bool(str(draft))) # 0 for send to draft, 1 for live if draft: draft_int = 0 else: @@ -284,7 +274,7 @@ async def get_edition(self, meta, tags): elif edition == "": edition = "" else: - custom = True + custom = True return custom, edition async def get_tags(self, meta): @@ -301,13 +291,13 @@ async def get_tags(self, meta): tags.append('EnglishDub') if "Open Matte" in meta.get('edition', ""): tags.append("OpenMatte") - if meta.get('scene', False) == True: + if meta.get('scene', False) is True: tags.append("Scene") - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: tags.append('Personal') if "hybrid" in meta.get('edition', "").lower(): tags.append('Hybrid') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: tags.append('Commentary') if "DV" in meta.get('hdr', ''): tags.append('DV') @@ -331,4 +321,4 @@ async def edit_name(self, meta): # name = name.replace('H.264', 'x264') if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) - return name \ No newline at end of file + return name diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index ea6f911c1..7d7969067 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # import discord -import asyncio -from torf import Torrent import requests from src.console import console from str2bool import str2bool @@ -12,8 +10,6 @@ from pymediainfo import MediaInfo -# from pprint import pprint - class BHDTV(): """ Edit for Tracker: @@ -27,10 +23,10 @@ def __init__(self, config): self.config = config self.tracker = 'BHDTV' self.source_flag = 'BIT-HDTV' - #search not implemented - #self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' + # search not implemented + # self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' self.upload_url = 'https://www.bit-hdtv.com/takeupload.php' - #self.forum_link = 'https://www.bit-hdtv.com/rules.php' + # self.forum_link = 'https://www.bit-hdtv.com/rules.php' self.banned_groups = [] pass @@ -48,18 +44,16 @@ async def upload(self, meta): # must be TV pack sub_cat_id = await self.get_type_tv_pack_id(meta['type']) - - resolution_id = await self.get_res_id(meta['resolution']) # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -80,32 +74,31 @@ async def upload(self, meta): data = { 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'name': meta['name'].replace(' ', '.').replace(':.', '.').replace(':', '.').replace('DD+', 'DDP'), - 'mediainfo': mi_dump if bd_dump == None else bd_dump, + 'mediainfo': mi_dump if bd_dump is None else bd_dump, 'cat': cat_id, 'subcat': sub_cat_id, 'resolution': resolution_id, - #'anon': anon, + # 'anon': anon, # admins asked to remove short description. 'sdescr': " ", - 'descr': media_info if bd_dump == None else "Disc so Check Mediainfo dump ", + 'descr': media_info if bd_dump is None else "Disc so Check Mediainfo dump ", 'screen': desc, 'url': f"https://www.tvmaze.com/shows/{meta['tvmaze_id']}" if meta['category'] == 'TV' else f"https://www.imdb.com/title/tt{meta['imdb_id']}", 'format': 'json' } - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, data=data, files=files) try: # pprint(data) console.print(response.json()) - except: - console.print(f"[cyan]It may have uploaded, go check") + except Exception: + console.print("[cyan]It may have uploaded, go check") # cprint(f"Request Data:", 'cyan') pprint(data) console.print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") pprint(data) # # adding my anounce url to torrent. if 'view' in response.json()['data']: @@ -116,7 +109,6 @@ async def upload(self, meta): "Torrent Did not upload") open_torrent.close() - async def get_cat_id(self, meta): category_id = '0' if meta['category'] == 'MOVIE': @@ -128,7 +120,6 @@ async def get_cat_id(self, meta): category_id = '10' return category_id - async def get_type_movie_id(self, meta): type_id = '0' test = meta['type'] @@ -138,7 +129,7 @@ async def get_type_movie_id(self, meta): else: type_id = '2' elif meta['type'] == 'REMUX': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '48' elif meta['3D']: type_id = '45' @@ -147,51 +138,48 @@ async def get_type_movie_id(self, meta): elif meta['type'] == 'HDTV': type_id = '6' elif meta['type'] == 'ENCODE': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '43' elif meta['3D']: type_id = '44' else: type_id = '1' elif meta['type'] == 'WEBDL' or meta['type'] == 'WEBRIP': - type_id = '5' + type_id = '5' return type_id - async def get_type_tv_id(self, type): type_id = { 'HDTV': '7', 'WEBDL': '8', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '10', 'REMUX': '11', 'DISC': '12', }.get(type, '0') return type_id - async def get_type_tv_pack_id(self, type): type_id = { 'HDTV': '13', 'WEBDL': '14', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '16', 'REMUX': '17', 'DISC': '18', }.get(type, '0') return type_id - async def get_res_id(self, resolution): resolution_id = { '2160p': '4', '1080p': '3', - '1080i':'2', + '1080i': '2', '720p': '1' }.get(resolution, '10') return resolution_id @@ -211,6 +199,6 @@ async def edit_desc(self, meta): return async def search_existing(self, meta): - console.print(f"[red]Dupes must be checked Manually") + console.print("[red]Dupes must be checked Manually") return ['Dupes must be checked Manually'] - ### hopefully someone else has the time to implement this. + # hopefully someone else has the time to implement this. diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 9f2757327..571e8451d 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console + class BLU(): """ Edit for Tracker: @@ -23,21 +23,20 @@ def __init__(self, config): self.source_flag = 'BLU' self.search_url = 'https://blutopia.cc/api/torrents/filter' self.torrent_url = 'https://blutopia.cc/api/torrents/' - self.upload_url = 'https://blutopia.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" + self.upload_url = 'https://blutopia.cc/api/torrents/upload' + self.signature = "\n[center][url=https://blutopia.cc/forums/topics/3087/posts/42941]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', + '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] ] - + pass - + async def upload(self, meta): common = COMMON(config=self.config) - blu_name = meta['name'] desc_header = "" if meta.get('webdv', False): @@ -49,12 +48,12 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -64,34 +63,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : blu_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': blu_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -105,28 +104,24 @@ async def upload(self, meta): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - - return + + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', 'FANRES': '3' }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: @@ -135,10 +130,10 @@ async def get_cat_id(self, category_name, edition): async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '12' }.get(type, '0') @@ -146,16 +141,16 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '11', - '2160p': '1', - '1440p' : '2', + '8640p': '10', + '4320p': '11', + '2160p': '1', + '1440p': '2', '1080p': '2', - '1080i':'3', - '720p': '5', - '576p': '6', + '1080i': '3', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' }.get(resolution, '10') return resolution_id @@ -173,12 +168,12 @@ async def derived_dv_layer(self, meta): if cli_ui.ask_yes_no("Is the DV Layer sourced from the same service as the video?"): ask_comp = False desc_header = "[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons not required as DV and HDR are from same provider.[/code]" - + if ask_comp: while desc_header == "": desc_input = cli_ui.ask_string("Please provide comparisons between HDR masters. (link or bbcode)", default="") desc_header = f"[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons between HDR masters: {desc_input}[/code]" - + if "hybrid" not in name.lower(): if "REPACK" in name: name = name.replace('REPACK', 'Hybrid REPACK') @@ -186,17 +181,16 @@ async def derived_dv_layer(self, meta): name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") return name, desc_header - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -210,7 +204,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 445f4e9d5..2c26c0897 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -3,14 +3,12 @@ import asyncio import requests from str2bool import str2bool -import os import platform from src.trackers.COMMON import COMMON from src.console import console - class CBR(): """ Edit for Tracker: @@ -25,11 +23,11 @@ def __init__(self, config): self.source_flag = 'CapybaraBR' self.search_url = 'https://capybarabr.com/api/torrents/filter' self.torrent_url = 'https://capybarabr.com/api/torrents/' - self.upload_url = 'https://capybarabr.com/api/torrents/upload' - self.signature = f"\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" + self.upload_url = 'https://capybarabr.com/api/torrents/upload' + self.signature = "\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -40,12 +38,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2obool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,31 +53,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -96,72 +94,65 @@ async def upload(self, meta): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition, meta): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', 'ANIMES': '4' }.get(category_name, '0') - if meta['anime'] == True and category_id == '2': + if meta['anime'] is True and category_id == '2': category_id = '4' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6' }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '4320p': '1', - '2160p': '2', + '4320p': '1', + '2160p': '2', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9', 'Other': '10', }.get(resolution, '10') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -175,15 +166,14 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') await asyncio.sleep(5) return dupes async def edit_name(self, meta): - - - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") - + + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + return name diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 87fc0ccfb..1fbcd8767 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -1,13 +1,14 @@ from torf import Torrent import os -import traceback import requests import re import json +import click from src.bbcode import BBCODE from src.console import console + class COMMON(): def __init__(self, config): self.config = config @@ -31,14 +32,13 @@ async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, com new_torrent.metainfo['comment'] = comment new_torrent.metainfo['info']['source'] = source_flag Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) - - + async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: if desc_header != "": descfile.write(desc_header) - + bbcode = BBCODE() if meta.get('discs', []) != []: discs = meta['discs'] @@ -61,13 +61,13 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des desc = base desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) - if comparison == False: + if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) - + desc = desc.replace('[img]', '[img=300]') descfile.write(desc) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] @@ -75,109 +75,150 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") descfile.write("[/center]") - if signature != None: + if signature is not None: descfile.write(signature) descfile.close() - return - + return - - async def unit3d_region_ids(self, region): region_id = { - 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, - 'ASA': 10, 'ATA': 11, 'ATF': 12, 'ATG': 13, 'AUS': 14, 'AUT': 15, 'AZE': 16, 'BAH': 17, - 'BAN': 18, 'BDI': 19, 'BEL': 20, 'BEN': 21, 'BER': 22, 'BES': 23, 'BFA': 24, 'BHR': 25, - 'BHU': 26, 'BIH': 27, 'BLM': 28, 'BLR': 29, 'BLZ': 30, 'BOL': 31, 'BOT': 32, 'BRA': 33, - 'BRB': 34, 'BRU': 35, 'BVT': 36, 'CAM': 37, 'CAN': 38, 'CAY': 39, 'CCK': 40, 'CEE': 41, - 'CGO': 42, 'CHA': 43, 'CHI': 44, 'CHN': 45, 'CIV': 46, 'CMR': 47, 'COD': 48, 'COK': 49, - 'COL': 50, 'COM': 51, 'CPV': 52, 'CRC': 53, 'CRO': 54, 'CTA': 55, 'CUB': 56, 'CUW': 57, - 'CXR': 58, 'CYP': 59, 'DJI': 60, 'DMA': 61, 'DOM': 62, 'ECU': 63, 'EGY': 64, 'ENG': 65, - 'EQG': 66, 'ERI': 67, 'ESH': 68, 'ESP': 69, 'ETH': 70, 'FIJ': 71, 'FLK': 72, 'FRA': 73, - 'FRO': 74, 'FSM': 75, 'GAB': 76, 'GAM': 77, 'GBR': 78, 'GEO': 79, 'GER': 80, 'GGY': 81, - 'GHA': 82, 'GIB': 83, 'GLP': 84, 'GNB': 85, 'GRE': 86, 'GRL': 87, 'GRN': 88, 'GUA': 89, - 'GUF': 90, 'GUI': 91, 'GUM': 92, 'GUY': 93, 'HAI': 94, 'HKG': 95, 'HMD': 96, 'HON': 97, - 'HUN': 98, 'IDN': 99, 'IMN': 100, 'IND': 101, 'IOT': 102, 'IRL': 103, 'IRN': 104, 'IRQ': 105, - 'ISL': 106, 'ISR': 107, 'ITA': 108, 'JAM': 109, 'JEY': 110, 'JOR': 111, 'JPN': 112, 'KAZ': 113, - 'KEN': 114, 'KGZ': 115, 'KIR': 116, 'KNA': 117, 'KOR': 118, 'KSA': 119, 'KUW': 120, 'KVX': 121, - 'LAO': 122, 'LBN': 123, 'LBR': 124, 'LBY': 125, 'LCA': 126, 'LES': 127, 'LIE': 128, 'LKA': 129, - 'LUX': 130, 'MAC': 131, 'MAD': 132, 'MAF': 133, 'MAR': 134, 'MAS': 135, 'MDA': 136, 'MDV': 137, - 'MEX': 138, 'MHL': 139, 'MKD': 140, 'MLI': 141, 'MLT': 142, 'MNG': 143, 'MNP': 144, 'MON': 145, - 'MOZ': 146, 'MRI': 147, 'MSR': 148, 'MTN': 149, 'MTQ': 150, 'MWI': 151, 'MYA': 152, 'MYT': 153, - 'NAM': 154, 'NCA': 155, 'NCL': 156, 'NEP': 157, 'NFK': 158, 'NIG': 159, 'NIR': 160, 'NIU': 161, - 'NLD': 162, 'NOR': 163, 'NRU': 164, 'NZL': 165, 'OMA': 166, 'PAK': 167, 'PAN': 168, 'PAR': 169, - 'PCN': 170, 'PER': 171, 'PHI': 172, 'PLE': 173, 'PLW': 174, 'PNG': 175, 'POL': 176, 'POR': 177, - 'PRK': 178, 'PUR': 179, 'QAT': 180, 'REU': 181, 'ROU': 182, 'RSA': 183, 'RUS': 184, 'RWA': 185, - 'SAM': 186, 'SCO': 187, 'SDN': 188, 'SEN': 189, 'SEY': 190, 'SGS': 191, 'SHN': 192, 'SIN': 193, - 'SJM': 194, 'SLE': 195, 'SLV': 196, 'SMR': 197, 'SOL': 198, 'SOM': 199, 'SPM': 200, 'SRB': 201, - 'SSD': 202, 'STP': 203, 'SUI': 204, 'SUR': 205, 'SWZ': 206, 'SXM': 207, 'SYR': 208, 'TAH': 209, - 'TAN': 210, 'TCA': 211, 'TGA': 212, 'THA': 213, 'TJK': 214, 'TKL': 215, 'TKM': 216, 'TLS': 217, - 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, - 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, - 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, - 'ZIM': 242, 'EUR' : 243 + 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, + 'ASA': 10, 'ATA': 11, 'ATF': 12, 'ATG': 13, 'AUS': 14, 'AUT': 15, 'AZE': 16, 'BAH': 17, + 'BAN': 18, 'BDI': 19, 'BEL': 20, 'BEN': 21, 'BER': 22, 'BES': 23, 'BFA': 24, 'BHR': 25, + 'BHU': 26, 'BIH': 27, 'BLM': 28, 'BLR': 29, 'BLZ': 30, 'BOL': 31, 'BOT': 32, 'BRA': 33, + 'BRB': 34, 'BRU': 35, 'BVT': 36, 'CAM': 37, 'CAN': 38, 'CAY': 39, 'CCK': 40, 'CEE': 41, + 'CGO': 42, 'CHA': 43, 'CHI': 44, 'CHN': 45, 'CIV': 46, 'CMR': 47, 'COD': 48, 'COK': 49, + 'COL': 50, 'COM': 51, 'CPV': 52, 'CRC': 53, 'CRO': 54, 'CTA': 55, 'CUB': 56, 'CUW': 57, + 'CXR': 58, 'CYP': 59, 'DJI': 60, 'DMA': 61, 'DOM': 62, 'ECU': 63, 'EGY': 64, 'ENG': 65, + 'EQG': 66, 'ERI': 67, 'ESH': 68, 'ESP': 69, 'ETH': 70, 'FIJ': 71, 'FLK': 72, 'FRA': 73, + 'FRO': 74, 'FSM': 75, 'GAB': 76, 'GAM': 77, 'GBR': 78, 'GEO': 79, 'GER': 80, 'GGY': 81, + 'GHA': 82, 'GIB': 83, 'GLP': 84, 'GNB': 85, 'GRE': 86, 'GRL': 87, 'GRN': 88, 'GUA': 89, + 'GUF': 90, 'GUI': 91, 'GUM': 92, 'GUY': 93, 'HAI': 94, 'HKG': 95, 'HMD': 96, 'HON': 97, + 'HUN': 98, 'IDN': 99, 'IMN': 100, 'IND': 101, 'IOT': 102, 'IRL': 103, 'IRN': 104, 'IRQ': 105, + 'ISL': 106, 'ISR': 107, 'ITA': 108, 'JAM': 109, 'JEY': 110, 'JOR': 111, 'JPN': 112, 'KAZ': 113, + 'KEN': 114, 'KGZ': 115, 'KIR': 116, 'KNA': 117, 'KOR': 118, 'KSA': 119, 'KUW': 120, 'KVX': 121, + 'LAO': 122, 'LBN': 123, 'LBR': 124, 'LBY': 125, 'LCA': 126, 'LES': 127, 'LIE': 128, 'LKA': 129, + 'LUX': 130, 'MAC': 131, 'MAD': 132, 'MAF': 133, 'MAR': 134, 'MAS': 135, 'MDA': 136, 'MDV': 137, + 'MEX': 138, 'MHL': 139, 'MKD': 140, 'MLI': 141, 'MLT': 142, 'MNG': 143, 'MNP': 144, 'MON': 145, + 'MOZ': 146, 'MRI': 147, 'MSR': 148, 'MTN': 149, 'MTQ': 150, 'MWI': 151, 'MYA': 152, 'MYT': 153, + 'NAM': 154, 'NCA': 155, 'NCL': 156, 'NEP': 157, 'NFK': 158, 'NIG': 159, 'NIR': 160, 'NIU': 161, + 'NLD': 162, 'NOR': 163, 'NRU': 164, 'NZL': 165, 'OMA': 166, 'PAK': 167, 'PAN': 168, 'PAR': 169, + 'PCN': 170, 'PER': 171, 'PHI': 172, 'PLE': 173, 'PLW': 174, 'PNG': 175, 'POL': 176, 'POR': 177, + 'PRK': 178, 'PUR': 179, 'QAT': 180, 'REU': 181, 'ROU': 182, 'RSA': 183, 'RUS': 184, 'RWA': 185, + 'SAM': 186, 'SCO': 187, 'SDN': 188, 'SEN': 189, 'SEY': 190, 'SGS': 191, 'SHN': 192, 'SIN': 193, + 'SJM': 194, 'SLE': 195, 'SLV': 196, 'SMR': 197, 'SOL': 198, 'SOM': 199, 'SPM': 200, 'SRB': 201, + 'SSD': 202, 'STP': 203, 'SUI': 204, 'SUR': 205, 'SWZ': 206, 'SXM': 207, 'SYR': 208, 'TAH': 209, + 'TAN': 210, 'TCA': 211, 'TGA': 212, 'THA': 213, 'TJK': 214, 'TKL': 215, 'TKM': 216, 'TLS': 217, + 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, + 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, + 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, + 'ZIM': 242, 'EUR': 243 }.get(region, 0) return region_id async def unit3d_distributor_ids(self, distributor): distributor_id = { - '01 DISTRIBUTION': 1, '100 DESTINATIONS TRAVEL FILM': 2, '101 FILMS': 3, '1FILMS': 4, '2 ENTERTAIN VIDEO': 5, '20TH CENTURY FOX': 6, '2L': 7, '3D CONTENT HUB': 8, '3D MEDIA': 9, '3L FILM': 10, '4DIGITAL': 11, '4DVD': 12, '4K ULTRA HD MOVIES': 13, '4K UHD': 13, '8-FILMS': 14, '84 ENTERTAINMENT': 15, '88 FILMS': 16, '@ANIME': 17, 'ANIME': 17, 'A CONTRACORRIENTE': 18, 'A CONTRACORRIENTE FILMS': 19, 'A&E HOME VIDEO': 20, 'A&E': 20, 'A&M RECORDS': 21, 'A+E NETWORKS': 22, 'A+R': 23, 'A-FILM': 24, 'AAA': 25, 'AB VIDÉO': 26, 'AB VIDEO': 26, 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)': 27, 'ABC': 27, 'ABKCO': 28, 'ABSOLUT MEDIEN': 29, 'ABSOLUTE': 30, 'ACCENT FILM ENTERTAINMENT': 31, 'ACCENTUS': 32, 'ACORN MEDIA': 33, 'AD VITAM': 34, 'ADA': 35, 'ADITYA VIDEOS': 36, 'ADSO FILMS': 37, 'AFM RECORDS': 38, 'AGFA': 39, 'AIX RECORDS': 40, 'ALAMODE FILM': 41, 'ALBA RECORDS': 42, 'ALBANY RECORDS': 43, 'ALBATROS': 44, 'ALCHEMY': 45, 'ALIVE': 46, 'ALL ANIME': 47, 'ALL INTERACTIVE ENTERTAINMENT': 48, 'ALLEGRO': 49, 'ALLIANCE': 50, 'ALPHA MUSIC': 51, 'ALTERDYSTRYBUCJA': 52, 'ALTERED INNOCENCE': 53, 'ALTITUDE FILM DISTRIBUTION': 54, 'ALUCARD RECORDS': 55, 'AMAZING D.C.': 56, 'AMAZING DC': 56, 'AMMO CONTENT': 57, 'AMUSE SOFT ENTERTAINMENT': 58, 'ANCONNECT': 59, 'ANEC': 60, 'ANIMATSU': 61, 'ANIME HOUSE': 62, 'ANIME LTD': 63, 'ANIME WORKS': 64, 'ANIMEIGO': 65, 'ANIPLEX': 66, 'ANOLIS ENTERTAINMENT': 67, 'ANOTHER WORLD ENTERTAINMENT': 68, 'AP INTERNATIONAL': 69, 'APPLE': 70, 'ARA MEDIA': 71, 'ARBELOS': 72, 'ARC ENTERTAINMENT': 73, 'ARP SÉLECTION': 74, 'ARP SELECTION': 74, 'ARROW': 75, 'ART SERVICE': 76, 'ART VISION': 77, 'ARTE ÉDITIONS': 78, 'ARTE EDITIONS': 78, 'ARTE VIDÉO': 79, 'ARTE VIDEO': 79, 'ARTHAUS MUSIK': 80, 'ARTIFICIAL EYE': 81, 'ARTSPLOITATION FILMS': 82, 'ARTUS FILMS': 83, 'ASCOT ELITE HOME ENTERTAINMENT': 84, 'ASIA VIDEO': 85, 'ASMIK ACE': 86, 'ASTRO RECORDS & FILMWORKS': 87, 'ASYLUM': 88, 'ATLANTIC FILM': 89, 'ATLANTIC RECORDS': 90, 'ATLAS FILM': 91, 'AUDIO VISUAL ENTERTAINMENT': 92, 'AURO-3D CREATIVE LABEL': 93, 'AURUM': 94, 'AV VISIONEN': 95, 'AV-JET': 96, 'AVALON': 97, 'AVENTI': 98, 'AVEX TRAX': 99, 'AXIOM': 100, 'AXIS RECORDS': 101, 'AYNGARAN': 102, 'BAC FILMS': 103, 'BACH FILMS': 104, 'BANDAI VISUAL': 105, 'BARCLAY': 106, 'BBC': 107, 'BRITISH BROADCASTING CORPORATION': 107, 'BBI FILMS': 108, 'BBI': 108, 'BCI HOME ENTERTAINMENT': 109, 'BEGGARS BANQUET': 110, 'BEL AIR CLASSIQUES': 111, 'BELGA FILMS': 112, 'BELVEDERE': 113, 'BENELUX FILM DISTRIBUTORS': 114, 'BENNETT-WATT MEDIA': 115, 'BERLIN CLASSICS': 116, 'BERLINER PHILHARMONIKER RECORDINGS': 117, 'BEST ENTERTAINMENT': 118, 'BEYOND HOME ENTERTAINMENT': 119, 'BFI VIDEO': 120, 'BFI': 120, 'BRITISH FILM INSTITUTE': 120, 'BFS ENTERTAINMENT': 121, 'BFS': 121, 'BHAVANI': 122, 'BIBER RECORDS': 123, 'BIG HOME VIDEO': 124, 'BILDSTÖRUNG': 125, 'BILDSTORUNG': 125, 'BILL ZEBUB': 126, 'BIRNENBLATT': 127, 'BIT WEL': 128, 'BLACK BOX': 129, 'BLACK HILL PICTURES': 130, 'BLACK HILL': 130, 'BLACK HOLE RECORDINGS': 131, 'BLACK HOLE': 131, 'BLAQOUT': 132, 'BLAUFIELD MUSIC': 133, 'BLAUFIELD': 133, 'BLOCKBUSTER ENTERTAINMENT': 134, 'BLOCKBUSTER': 134, 'BLU PHASE MEDIA': 135, 'BLU-RAY ONLY': 136, 'BLU-RAY': 136, 'BLURAY ONLY': 136, 'BLURAY': 136, 'BLUE GENTIAN RECORDS': 137, 'BLUE KINO': 138, 'BLUE UNDERGROUND': 139, 'BMG/ARISTA': 140, 'BMG': 140, 'BMGARISTA': 140, 'BMG ARISTA': 140, 'ARISTA': - 140, 'ARISTA/BMG': 140, 'ARISTABMG': 140, 'ARISTA BMG': 140, 'BONTON FILM': 141, 'BONTON': 141, 'BOOMERANG PICTURES': 142, 'BOOMERANG': 142, 'BQHL ÉDITIONS': 143, 'BQHL EDITIONS': 143, 'BQHL': 143, 'BREAKING GLASS': 144, 'BRIDGESTONE': 145, 'BRINK': 146, 'BROAD GREEN PICTURES': 147, 'BROAD GREEN': 147, 'BUSCH MEDIA GROUP': 148, 'BUSCH': 148, 'C MAJOR': 149, 'C.B.S.': 150, 'CAICHANG': 151, 'CALIFÓRNIA FILMES': 152, 'CALIFORNIA FILMES': 152, 'CALIFORNIA': 152, 'CAMEO': 153, 'CAMERA OBSCURA': 154, 'CAMERATA': 155, 'CAMP MOTION PICTURES': 156, 'CAMP MOTION': 156, 'CAPELIGHT PICTURES': 157, 'CAPELIGHT': 157, 'CAPITOL': 159, 'CAPITOL RECORDS': 159, 'CAPRICCI': 160, 'CARGO RECORDS': 161, 'CARLOTTA FILMS': 162, 'CARLOTTA': 162, 'CARLOTA': 162, 'CARMEN FILM': 163, 'CASCADE': 164, 'CATCHPLAY': 165, 'CAULDRON FILMS': 166, 'CAULDRON': 166, 'CBS TELEVISION STUDIOS': 167, 'CBS': 167, 'CCTV': 168, 'CCV ENTERTAINMENT': 169, 'CCV': 169, 'CD BABY': 170, 'CD LAND': 171, 'CECCHI GORI': 172, 'CENTURY MEDIA': 173, 'CHUAN XUN SHI DAI MULTIMEDIA': 174, 'CINE-ASIA': 175, 'CINÉART': 176, 'CINEART': 176, 'CINEDIGM': 177, 'CINEFIL IMAGICA': 178, 'CINEMA EPOCH': 179, 'CINEMA GUILD': 180, 'CINEMA LIBRE STUDIOS': 181, 'CINEMA MONDO': 182, 'CINEMATIC VISION': 183, 'CINEPLOIT RECORDS': 184, 'CINESTRANGE EXTREME': 185, 'CITEL VIDEO': 186, 'CITEL': 186, 'CJ ENTERTAINMENT': 187, 'CJ': 187, 'CLASSIC MEDIA': 188, 'CLASSICFLIX': 189, 'CLASSICLINE': 190, 'CLAUDIO RECORDS': 191, 'CLEAR VISION': 192, 'CLEOPATRA': 193, 'CLOSE UP': 194, 'CMS MEDIA LIMITED': 195, 'CMV LASERVISION': 196, 'CN ENTERTAINMENT': 197, 'CODE RED': 198, 'COHEN MEDIA GROUP': 199, 'COHEN': 199, 'COIN DE MIRE CINÉMA': 200, 'COIN DE MIRE CINEMA': 200, 'COLOSSEO FILM': 201, 'COLUMBIA': 203, 'COLUMBIA PICTURES': 203, 'COLUMBIA/TRI-STAR': 204, 'TRI-STAR': 204, 'COMMERCIAL MARKETING': 205, 'CONCORD MUSIC GROUP': 206, 'CONCORDE VIDEO': 207, 'CONDOR': 208, 'CONSTANTIN FILM': 209, 'CONSTANTIN': 209, 'CONSTANTINO FILMES': 210, 'CONSTANTINO': 210, 'CONSTRUCTIVE MEDIA SERVICE': 211, 'CONSTRUCTIVE': 211, 'CONTENT ZONE': 212, 'CONTENTS GATE': 213, 'COQUEIRO VERDE': 214, 'CORNERSTONE MEDIA': 215, 'CORNERSTONE': 215, 'CP DIGITAL': 216, 'CREST MOVIES': 217, 'CRITERION': 218, 'CRITERION COLLECTION': - 218, 'CC': 218, 'CRYSTAL CLASSICS': 219, 'CULT EPICS': 220, 'CULT FILMS': 221, 'CULT VIDEO': 222, 'CURZON FILM WORLD': 223, 'D FILMS': 224, "D'AILLY COMPANY": 225, 'DAILLY COMPANY': 225, 'D AILLY COMPANY': 225, "D'AILLY": 225, 'DAILLY': 225, 'D AILLY': 225, 'DA CAPO': 226, 'DA MUSIC': 227, "DALL'ANGELO PICTURES": 228, 'DALLANGELO PICTURES': 228, "DALL'ANGELO": 228, 'DALL ANGELO PICTURES': 228, 'DALL ANGELO': 228, 'DAREDO': 229, 'DARK FORCE ENTERTAINMENT': 230, 'DARK FORCE': 230, 'DARK SIDE RELEASING': 231, 'DARK SIDE': 231, 'DAZZLER MEDIA': 232, 'DAZZLER': 232, 'DCM PICTURES': 233, 'DCM': 233, 'DEAPLANETA': 234, 'DECCA': 235, 'DEEPJOY': 236, 'DEFIANT SCREEN ENTERTAINMENT': 237, 'DEFIANT SCREEN': 237, 'DEFIANT': 237, 'DELOS': 238, 'DELPHIAN RECORDS': 239, 'DELPHIAN': 239, 'DELTA MUSIC & ENTERTAINMENT': 240, 'DELTA MUSIC AND ENTERTAINMENT': 240, 'DELTA MUSIC ENTERTAINMENT': 240, 'DELTA MUSIC': 240, 'DELTAMAC CO. LTD.': 241, 'DELTAMAC CO LTD': 241, 'DELTAMAC CO': 241, 'DELTAMAC': 241, 'DEMAND MEDIA': 242, 'DEMAND': 242, 'DEP': 243, 'DEUTSCHE GRAMMOPHON': 244, 'DFW': 245, 'DGM': 246, 'DIAPHANA': 247, 'DIGIDREAMS STUDIOS': 248, 'DIGIDREAMS': 248, 'DIGITAL ENVIRONMENTS': 249, 'DIGITAL': 249, 'DISCOTEK MEDIA': 250, 'DISCOVERY CHANNEL': 251, 'DISCOVERY': 251, 'DISK KINO': 252, 'DISNEY / BUENA VISTA': 253, 'DISNEY': 253, 'BUENA VISTA': 253, 'DISNEY BUENA VISTA': 253, 'DISTRIBUTION SELECT': 254, 'DIVISA': 255, 'DNC ENTERTAINMENT': 256, 'DNC': 256, 'DOGWOOF': 257, 'DOLMEN HOME VIDEO': 258, 'DOLMEN': 258, 'DONAU FILM': 259, 'DONAU': 259, 'DORADO FILMS': 260, 'DORADO': 260, 'DRAFTHOUSE FILMS': 261, 'DRAFTHOUSE': 261, 'DRAGON FILM ENTERTAINMENT': 262, 'DRAGON ENTERTAINMENT': 262, 'DRAGON FILM': 262, 'DRAGON': 262, 'DREAMWORKS': 263, 'DRIVE ON RECORDS': 264, 'DRIVE ON': 264, 'DRIVE-ON': 264, 'DRIVEON': 264, 'DS MEDIA': 265, 'DTP ENTERTAINMENT AG': 266, 'DTP ENTERTAINMENT': 266, 'DTP AG': 266, 'DTP': 266, 'DTS ENTERTAINMENT': 267, 'DTS': 267, 'DUKE MARKETING': 268, 'DUKE VIDEO DISTRIBUTION': 269, 'DUKE': 269, 'DUTCH FILMWORKS': 270, 'DUTCH': 270, 'DVD INTERNATIONAL': 271, 'DVD': 271, 'DYBEX': 272, 'DYNAMIC': 273, 'DYNIT': 274, 'E1 ENTERTAINMENT': 275, 'E1': 275, 'EAGLE ENTERTAINMENT': 276, 'EAGLE HOME ENTERTAINMENT PVT.LTD.': - 277, 'EAGLE HOME ENTERTAINMENT PVTLTD': 277, 'EAGLE HOME ENTERTAINMENT PVT LTD': 277, 'EAGLE HOME ENTERTAINMENT': 277, 'EAGLE PICTURES': 278, 'EAGLE ROCK ENTERTAINMENT': 279, 'EAGLE ROCK': 279, 'EAGLE VISION MEDIA': 280, 'EAGLE VISION': 280, 'EARMUSIC': 281, 'EARTH ENTERTAINMENT': 282, 'EARTH': 282, 'ECHO BRIDGE ENTERTAINMENT': 283, 'ECHO BRIDGE': 283, 'EDEL GERMANY GMBH': 284, 'EDEL GERMANY': 284, 'EDEL RECORDS': 285, 'EDITION TONFILM': 286, 'EDITIONS MONTPARNASSE': 287, 'EDKO FILMS LTD.': 288, 'EDKO FILMS LTD': 288, 'EDKO FILMS': 288, 'EDKO': 288, "EIN'S M&M CO": 289, 'EINS M&M CO': 289, "EIN'S M&M": 289, 'EINS M&M': 289, 'ELEA-MEDIA': 290, 'ELEA MEDIA': 290, 'ELEA': 290, 'ELECTRIC PICTURE': 291, 'ELECTRIC': 291, 'ELEPHANT FILMS': 292, 'ELEPHANT': 292, 'ELEVATION': 293, 'EMI': 294, 'EMON': 295, 'EMS': 296, 'EMYLIA': 297, 'ENE MEDIA': 298, 'ENE': 298, 'ENTERTAINMENT IN VIDEO': 299, 'ENTERTAINMENT IN': 299, 'ENTERTAINMENT ONE': 300, 'ENTERTAINMENT ONE FILMS CANADA INC.': 301, 'ENTERTAINMENT ONE FILMS CANADA INC': 301, 'ENTERTAINMENT ONE FILMS CANADA': 301, 'ENTERTAINMENT ONE CANADA INC': 301, - 'ENTERTAINMENT ONE CANADA': 301, 'ENTERTAINMENTONE': 302, 'EONE': 303, 'EOS': 304, 'EPIC PICTURES': 305, 'EPIC': 305, 'EPIC RECORDS': 306, 'ERATO': 307, 'EROS': 308, 'ESC EDITIONS': 309, 'ESCAPI MEDIA BV': 310, 'ESOTERIC RECORDINGS': 311, 'ESPN FILMS': 312, 'EUREKA ENTERTAINMENT': 313, 'EUREKA': 313, 'EURO PICTURES': 314, 'EURO VIDEO': 315, 'EUROARTS': 316, 'EUROPA FILMES': 317, 'EUROPA': 317, 'EUROPACORP': 318, 'EUROZOOM': 319, 'EXCEL': 320, 'EXPLOSIVE MEDIA': 321, 'EXPLOSIVE': 321, 'EXTRALUCID FILMS': 322, 'EXTRALUCID': 322, 'EYE SEE MOVIES': 323, 'EYE SEE': 323, 'EYK MEDIA': 324, 'EYK': 324, 'FABULOUS FILMS': 325, 'FABULOUS': 325, 'FACTORIS FILMS': 326, 'FACTORIS': 326, 'FARAO RECORDS': 327, 'FARBFILM HOME ENTERTAINMENT': 328, 'FARBFILM ENTERTAINMENT': 328, 'FARBFILM HOME': 328, 'FARBFILM': 328, 'FEELGOOD ENTERTAINMENT': 329, 'FEELGOOD': 329, 'FERNSEHJUWELEN': 330, 'FILM CHEST': 331, 'FILM MEDIA': 332, 'FILM MOVEMENT': 333, 'FILM4': 334, 'FILMART': 335, 'FILMAURO': 336, 'FILMAX': 337, 'FILMCONFECT HOME ENTERTAINMENT': 338, 'FILMCONFECT ENTERTAINMENT': 338, 'FILMCONFECT HOME': 338, 'FILMCONFECT': 338, 'FILMEDIA': 339, 'FILMJUWELEN': 340, 'FILMOTEKA NARODAWA': 341, 'FILMRISE': 342, 'FINAL CUT ENTERTAINMENT': 343, 'FINAL CUT': 343, 'FIREHOUSE 12 RECORDS': 344, 'FIREHOUSE 12': 344, 'FIRST INTERNATIONAL PRODUCTION': 345, 'FIRST INTERNATIONAL': 345, 'FIRST LOOK STUDIOS': 346, 'FIRST LOOK': 346, 'FLAGMAN TRADE': 347, 'FLASHSTAR FILMES': 348, 'FLASHSTAR': 348, 'FLICKER ALLEY': 349, 'FNC ADD CULTURE': 350, 'FOCUS FILMES': 351, 'FOCUS': 351, 'FOKUS MEDIA': 352, 'FOKUSA': 352, 'FOX PATHE EUROPA': 353, 'FOX PATHE': 353, 'FOX EUROPA': 353, 'FOX/MGM': 354, 'FOX MGM': 354, 'MGM': 354, 'MGM/FOX': 354, 'FOX': 354, 'FPE': 355, 'FRANCE TÉLÉVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS': 356, 'FRANCE': 356, 'FREE DOLPHIN ENTERTAINMENT': 357, 'FREE DOLPHIN': 357, 'FREESTYLE DIGITAL MEDIA': 358, 'FREESTYLE DIGITAL': 358, 'FREESTYLE': 358, 'FREMANTLE HOME ENTERTAINMENT': 359, 'FREMANTLE ENTERTAINMENT': 359, 'FREMANTLE HOME': 359, 'FREMANTL': 359, 'FRENETIC FILMS': 360, 'FRENETIC': 360, 'FRONTIER WORKS': 361, 'FRONTIER': 361, 'FRONTIERS MUSIC': 362, 'FRONTIERS RECORDS': 363, 'FS FILM OY': 364, 'FS FILM': - 364, 'FULL MOON FEATURES': 365, 'FULL MOON': 365, 'FUN CITY EDITIONS': 366, 'FUN CITY': 366, 'FUNIMATION ENTERTAINMENT': 367, 'FUNIMATION': 367, 'FUSION': 368, 'FUTUREFILM': 369, 'G2 PICTURES': 370, 'G2': 370, 'GAGA COMMUNICATIONS': 371, 'GAGA': 371, 'GAIAM': 372, 'GALAPAGOS': 373, 'GAMMA HOME ENTERTAINMENT': 374, 'GAMMA ENTERTAINMENT': 374, 'GAMMA HOME': 374, 'GAMMA': 374, 'GARAGEHOUSE PICTURES': 375, 'GARAGEHOUSE': 375, 'GARAGEPLAY (車庫娛樂)': 376, '車庫娛樂': 376, 'GARAGEPLAY (Che Ku Yu Le )': 376, 'GARAGEPLAY': 376, 'Che Ku Yu Le': 376, 'GAUMONT': 377, 'GEFFEN': 378, 'GENEON ENTERTAINMENT': 379, 'GENEON': 379, 'GENEON UNIVERSAL ENTERTAINMENT': 380, 'GENERAL VIDEO RECORDING': 381, 'GLASS DOLL FILMS': 382, 'GLASS DOLL': 382, 'GLOBE MUSIC MEDIA': 383, 'GLOBE MUSIC': 383, 'GLOBE MEDIA': 383, 'GLOBE': 383, 'GO ENTERTAIN': 384, 'GO': 384, 'GOLDEN HARVEST': 385, 'GOOD!MOVIES': 386, - 'GOOD! MOVIES': 386, 'GOOD MOVIES': 386, 'GRAPEVINE VIDEO': 387, 'GRAPEVINE': 387, 'GRASSHOPPER FILM': 388, 'GRASSHOPPER FILMS': 388, 'GRASSHOPPER': 388, 'GRAVITAS VENTURES': 389, 'GRAVITAS': 389, 'GREAT MOVIES': 390, 'GREAT': 390, - 'GREEN APPLE ENTERTAINMENT': 391, 'GREEN ENTERTAINMENT': 391, 'GREEN APPLE': 391, 'GREEN': 391, 'GREENNARAE MEDIA': 392, 'GREENNARAE': 392, 'GRINDHOUSE RELEASING': 393, 'GRINDHOUSE': 393, 'GRIND HOUSE': 393, 'GRYPHON ENTERTAINMENT': 394, 'GRYPHON': 394, 'GUNPOWDER & SKY': 395, 'GUNPOWDER AND SKY': 395, 'GUNPOWDER SKY': 395, 'GUNPOWDER + SKY': 395, 'GUNPOWDER': 395, 'HANABEE ENTERTAINMENT': 396, 'HANABEE': 396, 'HANNOVER HOUSE': 397, 'HANNOVER': 397, 'HANSESOUND': 398, 'HANSE SOUND': 398, 'HANSE': 398, 'HAPPINET': 399, 'HARMONIA MUNDI': 400, 'HARMONIA': 400, 'HBO': 401, 'HDC': 402, 'HEC': 403, 'HELL & BACK RECORDINGS': 404, 'HELL AND BACK RECORDINGS': 404, 'HELL & BACK': 404, 'HELL AND BACK': 404, "HEN'S TOOTH VIDEO": 405, 'HENS TOOTH VIDEO': 405, "HEN'S TOOTH": 405, 'HENS TOOTH': 405, 'HIGH FLIERS': 406, 'HIGHLIGHT': 407, 'HILLSONG': 408, 'HISTORY CHANNEL': 409, 'HISTORY': 409, 'HK VIDÉO': 410, 'HK VIDEO': 410, 'HK': 410, 'HMH HAMBURGER MEDIEN HAUS': 411, 'HAMBURGER MEDIEN HAUS': 411, 'HMH HAMBURGER MEDIEN': 411, 'HMH HAMBURGER': 411, 'HMH': 411, 'HOLLYWOOD CLASSIC ENTERTAINMENT': 412, 'HOLLYWOOD CLASSIC': 412, 'HOLLYWOOD PICTURES': 413, 'HOLLYWOOD': 413, 'HOPSCOTCH ENTERTAINMENT': 414, 'HOPSCOTCH': 414, 'HPM': 415, 'HÄNNSLER CLASSIC': 416, 'HANNSLER CLASSIC': 416, 'HANNSLER': 416, 'I-CATCHER': 417, 'I CATCHER': 417, 'ICATCHER': 417, 'I-ON NEW MEDIA': 418, 'I ON NEW MEDIA': 418, 'ION NEW MEDIA': 418, 'ION MEDIA': 418, 'I-ON': 418, 'ION': 418, 'IAN PRODUCTIONS': 419, 'IAN': 419, 'ICESTORM': 420, 'ICON FILM DISTRIBUTION': 421, 'ICON DISTRIBUTION': 421, 'ICON FILM': 421, 'ICON': 421, 'IDEALE AUDIENCE': 422, 'IDEALE': 422, 'IFC FILMS': 423, 'IFC': 423, 'IFILM': 424, 'ILLUSIONS UNLTD.': 425, 'ILLUSIONS UNLTD': 425, 'ILLUSIONS': 425, 'IMAGE ENTERTAINMENT': 426, 'IMAGE': 426, - 'IMAGEM FILMES': 427, 'IMAGEM': 427, 'IMOVISION': 428, 'IMPERIAL CINEPIX': 429, 'IMPRINT': 430, 'IMPULS HOME ENTERTAINMENT': 431, 'IMPULS ENTERTAINMENT': 431, 'IMPULS HOME': 431, 'IMPULS': 431, 'IN-AKUSTIK': 432, 'IN AKUSTIK': 432, 'INAKUSTIK': 432, 'INCEPTION MEDIA GROUP': 433, 'INCEPTION MEDIA': 433, 'INCEPTION GROUP': 433, 'INCEPTION': 433, 'INDEPENDENT': 434, 'INDICAN': 435, 'INDIE RIGHTS': 436, 'INDIE': 436, 'INDIGO': 437, 'INFO': 438, 'INJOINGAN': 439, 'INKED PICTURES': 440, 'INKED': 440, 'INSIDE OUT MUSIC': 441, 'INSIDE MUSIC': 441, 'INSIDE OUT': 441, 'INSIDE': 441, 'INTERCOM': 442, 'INTERCONTINENTAL VIDEO': 443, 'INTERCONTINENTAL': 443, 'INTERGROOVE': 444, - 'INTERSCOPE': 445, 'INVINCIBLE PICTURES': 446, 'INVINCIBLE': 446, 'ISLAND/MERCURY': 447, 'ISLAND MERCURY': 447, 'ISLANDMERCURY': 447, 'ISLAND & MERCURY': 447, 'ISLAND AND MERCURY': 447, 'ISLAND': 447, 'ITN': 448, 'ITV DVD': 449, 'ITV': 449, 'IVC': 450, 'IVE ENTERTAINMENT': 451, 'IVE': 451, 'J&R ADVENTURES': 452, 'J&R': 452, 'JR': 452, 'JAKOB': 453, 'JONU MEDIA': 454, 'JONU': 454, 'JRB PRODUCTIONS': 455, 'JRB': 455, 'JUST BRIDGE ENTERTAINMENT': 456, 'JUST BRIDGE': 456, 'JUST ENTERTAINMENT': 456, 'JUST': 456, 'KABOOM ENTERTAINMENT': 457, 'KABOOM': 457, 'KADOKAWA ENTERTAINMENT': 458, 'KADOKAWA': 458, 'KAIROS': 459, 'KALEIDOSCOPE ENTERTAINMENT': 460, 'KALEIDOSCOPE': 460, 'KAM & RONSON ENTERPRISES': 461, 'KAM & RONSON': 461, 'KAM&RONSON ENTERPRISES': 461, 'KAM&RONSON': 461, 'KAM AND RONSON ENTERPRISES': 461, 'KAM AND RONSON': 461, 'KANA HOME VIDEO': 462, 'KARMA FILMS': 463, 'KARMA': 463, 'KATZENBERGER': 464, 'KAZE': 465, 'KBS MEDIA': 466, 'KBS': 466, 'KD MEDIA': 467, 'KD': 467, 'KING MEDIA': 468, 'KING': 468, 'KING RECORDS': 469, 'KINO LORBER': 470, 'KINO': 470, 'KINO SWIAT': 471, 'KINOKUNIYA': 472, 'KINOWELT HOME ENTERTAINMENT/DVD': 473, 'KINOWELT HOME ENTERTAINMENT': 473, 'KINOWELT ENTERTAINMENT': 473, 'KINOWELT HOME DVD': 473, 'KINOWELT ENTERTAINMENT/DVD': 473, 'KINOWELT DVD': 473, 'KINOWELT': 473, 'KIT PARKER FILMS': 474, 'KIT PARKER': 474, 'KITTY MEDIA': 475, 'KNM HOME ENTERTAINMENT': 476, 'KNM ENTERTAINMENT': 476, 'KNM HOME': 476, 'KNM': 476, 'KOBA FILMS': 477, 'KOBA': 477, 'KOCH ENTERTAINMENT': 478, 'KOCH MEDIA': 479, 'KOCH': 479, 'KRAKEN RELEASING': 480, 'KRAKEN': 480, 'KSCOPE': 481, 'KSM': 482, 'KULTUR': 483, "L'ATELIER D'IMAGES": 484, "LATELIER D'IMAGES": 484, "L'ATELIER DIMAGES": 484, 'LATELIER DIMAGES': 484, "L ATELIER D'IMAGES": 484, "L'ATELIER D IMAGES": 484, - 'L ATELIER D IMAGES': 484, "L'ATELIER": 484, 'L ATELIER': 484, 'LATELIER': 484, 'LA AVENTURA AUDIOVISUAL': 485, 'LA AVENTURA': 485, 'LACE GROUP': 486, 'LACE': 486, 'LASER PARADISE': 487, 'LAYONS': 488, 'LCJ EDITIONS': 489, 'LCJ': 489, 'LE CHAT QUI FUME': 490, 'LE PACTE': 491, 'LEDICK FILMHANDEL': 492, 'LEGEND': 493, 'LEOMARK STUDIOS': 494, 'LEOMARK': 494, 'LEONINE FILMS': 495, 'LEONINE': 495, 'LICHTUNG MEDIA LTD': 496, 'LICHTUNG LTD': 496, 'LICHTUNG MEDIA LTD.': 496, 'LICHTUNG LTD.': 496, 'LICHTUNG MEDIA': 496, 'LICHTUNG': 496, 'LIGHTHOUSE HOME ENTERTAINMENT': 497, 'LIGHTHOUSE ENTERTAINMENT': 497, 'LIGHTHOUSE HOME': 497, 'LIGHTHOUSE': 497, 'LIGHTYEAR': 498, 'LIONSGATE FILMS': 499, 'LIONSGATE': 499, 'LIZARD CINEMA TRADE': 500, 'LLAMENTOL': 501, 'LOBSTER FILMS': 502, 'LOBSTER': 502, 'LOGON': 503, 'LORBER FILMS': 504, 'LORBER': 504, 'LOS BANDITOS FILMS': 505, 'LOS BANDITOS': 505, 'LOUD & PROUD RECORDS': 506, 'LOUD AND PROUD RECORDS': 506, 'LOUD & PROUD': 506, 'LOUD AND PROUD': 506, 'LSO LIVE': 507, 'LUCASFILM': 508, 'LUCKY RED': 509, 'LUMIÈRE HOME ENTERTAINMENT': 510, 'LUMIERE HOME ENTERTAINMENT': 510, 'LUMIERE ENTERTAINMENT': 510, 'LUMIERE HOME': 510, 'LUMIERE': 510, 'M6 VIDEO': 511, 'M6': 511, 'MAD DIMENSION': 512, 'MADMAN ENTERTAINMENT': 513, 'MADMAN': 513, 'MAGIC BOX': 514, 'MAGIC PLAY': 515, 'MAGNA HOME ENTERTAINMENT': 516, 'MAGNA ENTERTAINMENT': 516, 'MAGNA HOME': 516, 'MAGNA': 516, 'MAGNOLIA PICTURES': 517, 'MAGNOLIA': 517, 'MAIDEN JAPAN': 518, 'MAIDEN': 518, 'MAJENG MEDIA': 519, 'MAJENG': 519, 'MAJESTIC HOME ENTERTAINMENT': 520, 'MAJESTIC ENTERTAINMENT': 520, 'MAJESTIC HOME': 520, 'MAJESTIC': 520, 'MANGA HOME ENTERTAINMENT': 521, 'MANGA ENTERTAINMENT': 521, 'MANGA HOME': 521, 'MANGA': 521, 'MANTA LAB': 522, 'MAPLE STUDIOS': 523, 'MAPLE': 523, 'MARCO POLO PRODUCTION': - 524, 'MARCO POLO': 524, 'MARIINSKY': 525, 'MARVEL STUDIOS': 526, 'MARVEL': 526, 'MASCOT RECORDS': 527, 'MASCOT': 527, 'MASSACRE VIDEO': 528, 'MASSACRE': 528, 'MATCHBOX': 529, 'MATRIX D': 530, 'MAXAM': 531, 'MAYA HOME ENTERTAINMENT': 532, 'MAYA ENTERTAINMENT': 532, 'MAYA HOME': 532, 'MAYAT': 532, 'MDG': 533, 'MEDIA BLASTERS': 534, 'MEDIA FACTORY': 535, 'MEDIA TARGET DISTRIBUTION': 536, 'MEDIA TARGET': 536, 'MEDIAINVISION': 537, 'MEDIATOON': 538, 'MEDIATRES ESTUDIO': 539, 'MEDIATRES STUDIO': 539, 'MEDIATRES': 539, 'MEDICI ARTS': 540, 'MEDICI CLASSICS': 541, 'MEDIUMRARE ENTERTAINMENT': 542, 'MEDIUMRARE': 542, 'MEDUSA': 543, 'MEGASTAR': 544, 'MEI AH': 545, 'MELI MÉDIAS': 546, 'MELI MEDIAS': 546, 'MEMENTO FILMS': 547, 'MEMENTO': 547, 'MENEMSHA FILMS': 548, 'MENEMSHA': 548, 'MERCURY': 549, 'MERCURY STUDIOS': 550, 'MERGE SOFT PRODUCTIONS': 551, 'MERGE PRODUCTIONS': 551, 'MERGE SOFT': 551, 'MERGE': 551, 'METAL BLADE RECORDS': 552, 'METAL BLADE': 552, 'METEOR': 553, 'METRO-GOLDWYN-MAYER': 554, 'METRO GOLDWYN MAYER': 554, 'METROGOLDWYNMAYER': 554, 'METRODOME VIDEO': 555, 'METRODOME': 555, 'METROPOLITAN': 556, 'MFA+': - 557, 'MFA': 557, 'MIG FILMGROUP': 558, 'MIG': 558, 'MILESTONE': 559, 'MILL CREEK ENTERTAINMENT': 560, 'MILL CREEK': 560, 'MILLENNIUM MEDIA': 561, 'MILLENNIUM': 561, 'MIRAGE ENTERTAINMENT': 562, 'MIRAGE': 562, 'MIRAMAX': 563, - 'MISTERIYA ZVUKA': 564, 'MK2': 565, 'MODE RECORDS': 566, 'MODE': 566, 'MOMENTUM PICTURES': 567, 'MONDO HOME ENTERTAINMENT': 568, 'MONDO ENTERTAINMENT': 568, 'MONDO HOME': 568, 'MONDO MACABRO': 569, 'MONGREL MEDIA': 570, 'MONOLIT': 571, 'MONOLITH VIDEO': 572, 'MONOLITH': 572, 'MONSTER PICTURES': 573, 'MONSTER': 573, 'MONTEREY VIDEO': 574, 'MONTEREY': 574, 'MONUMENT RELEASING': 575, 'MONUMENT': 575, 'MORNINGSTAR': 576, 'MORNING STAR': 576, 'MOSERBAER': 577, 'MOVIEMAX': 578, 'MOVINSIDE': 579, 'MPI MEDIA GROUP': 580, 'MPI MEDIA': 580, 'MPI': 580, 'MR. BONGO FILMS': 581, 'MR BONGO FILMS': 581, 'MR BONGO': 581, 'MRG (MERIDIAN)': 582, 'MRG MERIDIAN': 582, 'MRG': 582, 'MERIDIAN': 582, 'MUBI': 583, 'MUG SHOT PRODUCTIONS': 584, 'MUG SHOT': 584, 'MULTIMUSIC': 585, 'MULTI-MUSIC': 585, 'MULTI MUSIC': 585, 'MUSE': 586, 'MUSIC BOX FILMS': 587, 'MUSIC BOX': 587, 'MUSICBOX': 587, 'MUSIC BROKERS': 588, 'MUSIC THEORIES': 589, 'MUSIC VIDEO DISTRIBUTORS': 590, 'MUSIC VIDEO': 590, 'MUSTANG ENTERTAINMENT': 591, 'MUSTANG': 591, 'MVD VISUAL': 592, 'MVD': 592, 'MVD/VSC': 593, 'MVL': 594, 'MVM ENTERTAINMENT': 595, 'MVM': 595, 'MYNDFORM': 596, 'MYSTIC NIGHT PICTURES': 597, 'MYSTIC NIGHT': 597, 'NAMELESS MEDIA': 598, 'NAMELESS': 598, 'NAPALM RECORDS': 599, 'NAPALM': 599, 'NATIONAL ENTERTAINMENT MEDIA': 600, 'NATIONAL ENTERTAINMENT': 600, 'NATIONAL MEDIA': 600, 'NATIONAL FILM ARCHIVE': 601, 'NATIONAL ARCHIVE': 601, 'NATIONAL FILM': 601, 'NATIONAL GEOGRAPHIC': 602, 'NAT GEO TV': 602, 'NAT GEO': 602, 'NGO': 602, 'NAXOS': 603, 'NBCUNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBC UNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBCUNIVERSAL JAPAN': 604, 'NBC UNIVERSAL JAPAN': 604, 'NBC JAPAN': 604, 'NBO ENTERTAINMENT': 605, 'NBO': 605, 'NEOS': 606, 'NETFLIX': 607, 'NETWORK': 608, 'NEW BLOOD': 609, 'NEW DISC': 610, 'NEW KSM': 611, 'NEW LINE CINEMA': 612, 'NEW LINE': 612, 'NEW MOVIE TRADING CO. LTD': 613, 'NEW MOVIE TRADING CO LTD': 613, 'NEW MOVIE TRADING CO': 613, 'NEW MOVIE TRADING': 613, 'NEW WAVE FILMS': 614, 'NEW WAVE': 614, 'NFI': 615, - 'NHK': 616, 'NIPPONART': 617, 'NIS AMERICA': 618, 'NJUTAFILMS': 619, 'NOBLE ENTERTAINMENT': 620, 'NOBLE': 620, 'NORDISK FILM': 621, 'NORDISK': 621, 'NORSK FILM': 622, 'NORSK': 622, 'NORTH AMERICAN MOTION PICTURES': 623, 'NOS AUDIOVISUAIS': 624, 'NOTORIOUS PICTURES': 625, 'NOTORIOUS': 625, 'NOVA MEDIA': 626, 'NOVA': 626, 'NOVA SALES AND DISTRIBUTION': 627, 'NOVA SALES & DISTRIBUTION': 627, 'NSM': 628, 'NSM RECORDS': 629, 'NUCLEAR BLAST': 630, 'NUCLEUS FILMS': 631, 'NUCLEUS': 631, 'OBERLIN MUSIC': 632, 'OBERLIN': 632, 'OBRAS-PRIMAS DO CINEMA': 633, 'OBRAS PRIMAS DO CINEMA': 633, 'OBRASPRIMAS DO CINEMA': 633, 'OBRAS-PRIMAS CINEMA': 633, 'OBRAS PRIMAS CINEMA': 633, 'OBRASPRIMAS CINEMA': 633, 'OBRAS-PRIMAS': 633, 'OBRAS PRIMAS': 633, 'OBRASPRIMAS': 633, 'ODEON': 634, 'OFDB FILMWORKS': 635, 'OFDB': 635, 'OLIVE FILMS': 636, 'OLIVE': 636, 'ONDINE': 637, 'ONSCREEN FILMS': 638, 'ONSCREEN': 638, 'OPENING DISTRIBUTION': 639, 'OPERA AUSTRALIA': 640, 'OPTIMUM HOME ENTERTAINMENT': 641, 'OPTIMUM ENTERTAINMENT': 641, 'OPTIMUM HOME': 641, 'OPTIMUM': 641, 'OPUS ARTE': 642, 'ORANGE STUDIO': 643, 'ORANGE': 643, 'ORLANDO EASTWOOD FILMS': 644, 'ORLANDO FILMS': 644, 'ORLANDO EASTWOOD': 644, 'ORLANDO': 644, 'ORUSTAK PICTURES': 645, 'ORUSTAK': 645, 'OSCILLOSCOPE PICTURES': 646, 'OSCILLOSCOPE': 646, 'OUTPLAY': 647, 'PALISADES TARTAN': 648, 'PAN VISION': 649, 'PANVISION': 649, 'PANAMINT CINEMA': 650, 'PANAMINT': 650, 'PANDASTORM ENTERTAINMENT': 651, 'PANDA STORM ENTERTAINMENT': 651, 'PANDASTORM': 651, 'PANDA STORM': 651, 'PANDORA FILM': 652, 'PANDORA': 652, 'PANEGYRIC': 653, 'PANORAMA': 654, 'PARADE DECK FILMS': 655, 'PARADE DECK': 655, 'PARADISE': 656, 'PARADISO FILMS': 657, 'PARADOX': 658, 'PARAMOUNT PICTURES': 659, 'PARAMOUNT': 659, 'PARIS FILMES': 660, 'PARIS FILMS': 660, 'PARIS': 660, 'PARK CIRCUS': 661, 'PARLOPHONE': 662, 'PASSION RIVER': 663, 'PATHE DISTRIBUTION': 664, 'PATHE': 664, 'PBS': 665, 'PEACE ARCH TRINITY': 666, 'PECCADILLO PICTURES': 667, 'PEPPERMINT': 668, 'PHASE 4 FILMS': 669, 'PHASE 4': 669, 'PHILHARMONIA BAROQUE': 670, 'PICTURE HOUSE ENTERTAINMENT': 671, 'PICTURE ENTERTAINMENT': 671, 'PICTURE HOUSE': 671, 'PICTURE': 671, 'PIDAX': 672, 'PINK FLOYD RECORDS': 673, 'PINK FLOYD': 673, 'PINNACLE FILMS': 674, 'PINNACLE': 674, 'PLAIN': 675, 'PLATFORM ENTERTAINMENT LIMITED': 676, 'PLATFORM ENTERTAINMENT LTD': 676, 'PLATFORM ENTERTAINMENT LTD.': 676, 'PLATFORM ENTERTAINMENT': 676, 'PLATFORM': 676, 'PLAYARTE': 677, 'PLG UK CLASSICS': 678, 'PLG UK': - 678, 'PLG': 678, 'POLYBAND & TOPPIC VIDEO/WVG': 679, 'POLYBAND AND TOPPIC VIDEO/WVG': 679, 'POLYBAND & TOPPIC VIDEO WVG': 679, 'POLYBAND & TOPPIC VIDEO AND WVG': 679, 'POLYBAND & TOPPIC VIDEO & WVG': 679, 'POLYBAND AND TOPPIC VIDEO WVG': 679, 'POLYBAND AND TOPPIC VIDEO AND WVG': 679, 'POLYBAND AND TOPPIC VIDEO & WVG': 679, 'POLYBAND & TOPPIC VIDEO': 679, 'POLYBAND AND TOPPIC VIDEO': 679, 'POLYBAND & TOPPIC': 679, 'POLYBAND AND TOPPIC': 679, 'POLYBAND': 679, 'WVG': 679, 'POLYDOR': 680, 'PONY': 681, 'PONY CANYON': 682, 'POTEMKINE': 683, 'POWERHOUSE FILMS': 684, 'POWERHOUSE': 684, 'POWERSTATIOM': 685, 'PRIDE & JOY': 686, 'PRIDE AND JOY': 686, 'PRINZ MEDIA': 687, 'PRINZ': 687, 'PRIS AUDIOVISUAIS': 688, 'PRO VIDEO': 689, 'PRO-VIDEO': 689, 'PRO-MOTION': 690, 'PRO MOTION': 690, 'PROD. JRB': 691, 'PROD JRB': 691, 'PRODISC': 692, 'PROKINO': 693, 'PROVOGUE RECORDS': 694, 'PROVOGUE': 694, 'PROWARE': 695, 'PULP VIDEO': 696, 'PULP': 696, 'PULSE VIDEO': 697, 'PULSE': 697, 'PURE AUDIO RECORDINGS': 698, 'PURE AUDIO': 698, 'PURE FLIX ENTERTAINMENT': 699, 'PURE FLIX': 699, 'PURE ENTERTAINMENT': 699, 'PYRAMIDE VIDEO': 700, 'PYRAMIDE': 700, 'QUALITY FILMS': 701, 'QUALITY': 701, 'QUARTO VALLEY RECORDS': 702, 'QUARTO VALLEY': 702, 'QUESTAR': 703, 'R SQUARED FILMS': 704, 'R SQUARED': 704, 'RAPID EYE MOVIES': 705, 'RAPID EYE': 705, 'RARO VIDEO': 706, 'RARO': 706, 'RAROVIDEO U.S.': 707, 'RAROVIDEO US': 707, 'RARO VIDEO US': 707, 'RARO VIDEO U.S.': 707, 'RARO U.S.': 707, 'RARO US': 707, 'RAVEN BANNER RELEASING': 708, 'RAVEN BANNER': 708, 'RAVEN': 708, 'RAZOR DIGITAL ENTERTAINMENT': 709, 'RAZOR DIGITAL': 709, 'RCA': 710, 'RCO LIVE': 711, 'RCO': 711, 'RCV': 712, 'REAL GONE MUSIC': 713, 'REAL GONE': 713, 'REANIMEDIA': 714, 'REANI MEDIA': 714, 'REDEMPTION': 715, 'REEL': 716, 'RELIANCE HOME VIDEO & GAMES': 717, 'RELIANCE HOME VIDEO AND GAMES': 717, 'RELIANCE HOME VIDEO': 717, 'RELIANCE VIDEO': 717, 'RELIANCE HOME': 717, 'RELIANCE': 717, 'REM CULTURE': 718, 'REMAIN IN LIGHT': 719, 'REPRISE': 720, 'RESEN': 721, 'RETROMEDIA': 722, 'REVELATION FILMS LTD.': 723, 'REVELATION FILMS LTD': 723, 'REVELATION FILMS': 723, 'REVELATION LTD.': 723, 'REVELATION LTD': 723, 'REVELATION': 723, 'REVOLVER ENTERTAINMENT': 724, 'REVOLVER': 724, 'RHINO MUSIC': 725, 'RHINO': 725, 'RHV': 726, 'RIGHT STUF': 727, 'RIMINI EDITIONS': 728, 'RISING SUN MEDIA': 729, 'RLJ ENTERTAINMENT': 730, 'RLJ': 730, 'ROADRUNNER RECORDS': 731, 'ROADSHOW ENTERTAINMENT': 732, 'ROADSHOW': 732, 'RONE': 733, 'RONIN FLIX': 734, 'ROTANA HOME ENTERTAINMENT': 735, 'ROTANA ENTERTAINMENT': 735, 'ROTANA HOME': 735, 'ROTANA': 735, 'ROUGH TRADE': 736, 'ROUNDER': 737, 'SAFFRON HILL FILMS': 738, 'SAFFRON HILL': 738, 'SAFFRON': 738, 'SAMUEL GOLDWYN FILMS': 739, 'SAMUEL GOLDWYN': 739, 'SAN FRANCISCO SYMPHONY': 740, 'SANDREW METRONOME': 741, 'SAPHRANE': 742, 'SAVOR': 743, 'SCANBOX ENTERTAINMENT': 744, 'SCANBOX': 744, 'SCENIC LABS': 745, 'SCHRÖDERMEDIA': 746, 'SCHRODERMEDIA': 746, 'SCHRODER MEDIA': 746, 'SCORPION RELEASING': 747, 'SCORPION': 747, 'SCREAM TEAM RELEASING': 748, 'SCREAM TEAM': 748, 'SCREEN MEDIA': 749, 'SCREEN': 749, 'SCREENBOUND PICTURES': 750, 'SCREENBOUND': 750, 'SCREENWAVE MEDIA': 751, 'SCREENWAVE': 751, 'SECOND RUN': 752, 'SECOND SIGHT': 753, 'SEEDSMAN GROUP': 754, 'SELECT VIDEO': 755, 'SELECTA VISION': 756, 'SENATOR': 757, 'SENTAI FILMWORKS': 758, 'SENTAI': 758, 'SEVEN7': 759, 'SEVERIN FILMS': 760, 'SEVERIN': 760, 'SEVILLE': 761, 'SEYONS ENTERTAINMENT': 762, 'SEYONS': 762, 'SF STUDIOS': 763, 'SGL ENTERTAINMENT': 764, 'SGL': 764, 'SHAMELESS': 765, 'SHAMROCK MEDIA': 766, 'SHAMROCK': 766, 'SHANGHAI EPIC MUSIC ENTERTAINMENT': 767, 'SHANGHAI EPIC ENTERTAINMENT': 767, 'SHANGHAI EPIC MUSIC': 767, 'SHANGHAI MUSIC ENTERTAINMENT': 767, 'SHANGHAI ENTERTAINMENT': 767, 'SHANGHAI MUSIC': 767, 'SHANGHAI': 767, 'SHEMAROO': 768, 'SHOCHIKU': 769, 'SHOCK': 770, 'SHOGAKU KAN': 771, 'SHOUT FACTORY': 772, 'SHOUT! FACTORY': 772, 'SHOUT': 772, 'SHOUT!': 772, 'SHOWBOX': 773, 'SHOWTIME ENTERTAINMENT': 774, 'SHOWTIME': 774, 'SHRIEK SHOW': 775, 'SHUDDER': 776, 'SIDONIS': 777, 'SIDONIS CALYSTA': 778, 'SIGNAL ONE ENTERTAINMENT': 779, 'SIGNAL ONE': 779, 'SIGNATURE ENTERTAINMENT': 780, 'SIGNATURE': 780, 'SILVER VISION': 781, 'SINISTER FILM': 782, 'SINISTER': 782, 'SIREN VISUAL ENTERTAINMENT': 783, 'SIREN VISUAL': 783, 'SIREN ENTERTAINMENT': 783, 'SIREN': 783, 'SKANI': 784, 'SKY DIGI': 785, 'SLASHER // VIDEO': 786, 'SLASHER / VIDEO': 786, 'SLASHER VIDEO': 786, 'SLASHER': 786, 'SLOVAK FILM INSTITUTE': 787, 'SLOVAK FILM': 787, - 'SFI': 787, 'SM LIFE DESIGN GROUP': 788, 'SMOOTH PICTURES': 789, 'SMOOTH': 789, 'SNAPPER MUSIC': 790, 'SNAPPER': 790, 'SODA PICTURES': 791, 'SODA': 791, 'SONO LUMINUS': 792, 'SONY MUSIC': 793, 'SONY PICTURES': 794, 'SONY': 794, 'SONY PICTURES CLASSICS': 795, 'SONY CLASSICS': 795, 'SOUL MEDIA': 796, 'SOUL': 796, 'SOULFOOD MUSIC DISTRIBUTION': 797, 'SOULFOOD DISTRIBUTION': 797, 'SOULFOOD MUSIC': 797, 'SOULFOOD': 797, 'SOYUZ': 798, 'SPECTRUM': 799, - 'SPENTZOS FILM': 800, 'SPENTZOS': 800, 'SPIRIT ENTERTAINMENT': 801, 'SPIRIT': 801, 'SPIRIT MEDIA GMBH': 802, 'SPIRIT MEDIA': 802, 'SPLENDID ENTERTAINMENT': 803, 'SPLENDID FILM': 804, 'SPO': 805, 'SQUARE ENIX': 806, 'SRI BALAJI VIDEO': 807, 'SRI BALAJI': 807, 'SRI': 807, 'SRI VIDEO': 807, 'SRS CINEMA': 808, 'SRS': 808, 'SSO RECORDINGS': 809, 'SSO': 809, 'ST2 MUSIC': 810, 'ST2': 810, 'STAR MEDIA ENTERTAINMENT': 811, 'STAR ENTERTAINMENT': 811, 'STAR MEDIA': 811, 'STAR': 811, 'STARLIGHT': 812, 'STARZ / ANCHOR BAY': 813, 'STARZ ANCHOR BAY': 813, 'STARZ': 813, 'ANCHOR BAY': 813, 'STER KINEKOR': 814, 'STERLING ENTERTAINMENT': 815, 'STERLING': 815, 'STINGRAY': 816, 'STOCKFISCH RECORDS': 817, 'STOCKFISCH': 817, 'STRAND RELEASING': 818, 'STRAND': 818, 'STUDIO 4K': 819, 'STUDIO CANAL': 820, 'STUDIO GHIBLI': 821, 'GHIBLI': 821, 'STUDIO HAMBURG ENTERPRISES': 822, 'HAMBURG ENTERPRISES': 822, 'STUDIO HAMBURG': 822, 'HAMBURG': 822, 'STUDIO S': 823, 'SUBKULTUR ENTERTAINMENT': 824, 'SUBKULTUR': 824, 'SUEVIA FILMS': 825, 'SUEVIA': 825, 'SUMMIT ENTERTAINMENT': 826, 'SUMMIT': 826, 'SUNFILM ENTERTAINMENT': 827, 'SUNFILM': 827, 'SURROUND RECORDS': 828, 'SURROUND': 828, 'SVENSK FILMINDUSTRI': 829, 'SVENSK': 829, 'SWEN FILMES': 830, 'SWEN FILMS': 830, 'SWEN': 830, 'SYNAPSE FILMS': 831, 'SYNAPSE': 831, 'SYNDICADO': 832, 'SYNERGETIC': 833, 'T- SERIES': 834, 'T-SERIES': 834, 'T SERIES': 834, 'TSERIES': 834, 'T.V.P.': 835, 'TVP': 835, 'TACET RECORDS': 836, 'TACET': 836, 'TAI SENG': 837, 'TAI SHENG': 838, 'TAKEONE': 839, 'TAKESHOBO': 840, 'TAMASA DIFFUSION': 841, 'TC ENTERTAINMENT': 842, 'TC': 842, 'TDK': 843, 'TEAM MARKETING': 844, 'TEATRO REAL': 845, 'TEMA DISTRIBUCIONES': 846, 'TEMPE DIGITAL': 847, 'TF1 VIDÉO': 848, 'TF1 VIDEO': 848, 'TF1': 848, 'THE BLU': 849, 'BLU': 849, 'THE ECSTASY OF FILMS': 850, 'THE FILM DETECTIVE': 851, 'FILM DETECTIVE': 851, 'THE JOKERS': 852, 'JOKERS': 852, 'THE ON': 853, 'ON': 853, 'THIMFILM': 854, 'THIM FILM': 854, 'THIM': 854, 'THIRD WINDOW FILMS': 855, 'THIRD WINDOW': 855, '3RD WINDOW FILMS': 855, '3RD WINDOW': 855, 'THUNDERBEAN ANIMATION': 856, 'THUNDERBEAN': 856, 'THUNDERBIRD RELEASING': 857, 'THUNDERBIRD': 857, 'TIBERIUS FILM': 858, 'TIME LIFE': 859, 'TIMELESS MEDIA GROUP': 860, 'TIMELESS MEDIA': 860, 'TIMELESS GROUP': 860, 'TIMELESS': 860, 'TLA RELEASING': 861, 'TLA': 861, 'TOBIS FILM': 862, 'TOBIS': 862, 'TOEI': 863, 'TOHO': 864, 'TOKYO SHOCK': 865, 'TOKYO': 865, 'TONPOOL MEDIEN GMBH': 866, 'TONPOOL MEDIEN': 866, 'TOPICS ENTERTAINMENT': 867, 'TOPICS': 867, 'TOUCHSTONE PICTURES': 868, 'TOUCHSTONE': 868, 'TRANSMISSION FILMS': 869, 'TRANSMISSION': 869, 'TRAVEL VIDEO STORE': 870, 'TRIART': 871, 'TRIGON FILM': 872, 'TRIGON': 872, 'TRINITY HOME ENTERTAINMENT': 873, 'TRINITY ENTERTAINMENT': 873, 'TRINITY HOME': 873, 'TRINITY': 873, 'TRIPICTURES': 874, 'TRI-PICTURES': 874, 'TRI PICTURES': 874, 'TROMA': 875, 'TURBINE MEDIEN': 876, 'TURTLE RECORDS': 877, 'TURTLE': 877, 'TVA FILMS': 878, 'TVA': 878, 'TWILIGHT TIME': 879, 'TWILIGHT': 879, 'TT': 879, 'TWIN CO., LTD.': 880, 'TWIN CO, LTD.': 880, 'TWIN CO., LTD': 880, 'TWIN CO, LTD': 880, 'TWIN CO LTD': 880, 'TWIN LTD': 880, 'TWIN CO.': 880, 'TWIN CO': 880, 'TWIN': 880, 'UCA': 881, 'UDR': 882, 'UEK': 883, 'UFA/DVD': 884, 'UFA DVD': 884, 'UFADVD': 884, 'UGC PH': 885, 'ULTIMATE3DHEAVEN': 886, 'ULTRA': 887, 'UMBRELLA ENTERTAINMENT': 888, 'UMBRELLA': 888, 'UMC': 889, "UNCORK'D ENTERTAINMENT": 890, 'UNCORKD ENTERTAINMENT': 890, 'UNCORK D ENTERTAINMENT': 890, "UNCORK'D": 890, 'UNCORK D': 890, 'UNCORKD': 890, 'UNEARTHED FILMS': 891, 'UNEARTHED': 891, 'UNI DISC': 892, 'UNIMUNDOS': 893, 'UNITEL': 894, 'UNIVERSAL MUSIC': 895, 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES HOME': 896, 'UNIVERSAL SONY PICTURES': 896, 'UNIVERSAL HOME ENTERTAINMENT': - 896, 'UNIVERSAL ENTERTAINMENT': 896, 'UNIVERSAL HOME': 896, 'UNIVERSAL STUDIOS': 897, 'UNIVERSAL': 897, 'UNIVERSE LASER & VIDEO CO.': 898, 'UNIVERSE LASER AND VIDEO CO.': 898, 'UNIVERSE LASER & VIDEO CO': 898, 'UNIVERSE LASER AND VIDEO CO': 898, 'UNIVERSE LASER CO.': 898, 'UNIVERSE LASER CO': 898, 'UNIVERSE LASER': 898, 'UNIVERSUM FILM': 899, 'UNIVERSUM': 899, 'UTV': 900, 'VAP': 901, 'VCI': 902, 'VENDETTA FILMS': 903, 'VENDETTA': 903, 'VERSÁTIL HOME VIDEO': 904, 'VERSÁTIL VIDEO': 904, 'VERSÁTIL HOME': 904, 'VERSÁTIL': 904, 'VERSATIL HOME VIDEO': 904, 'VERSATIL VIDEO': 904, 'VERSATIL HOME': 904, 'VERSATIL': 904, 'VERTICAL ENTERTAINMENT': 905, 'VERTICAL': 905, 'VÉRTICE 360º': 906, 'VÉRTICE 360': 906, 'VERTICE 360o': 906, 'VERTICE 360': 906, 'VERTIGO BERLIN': 907, 'VÉRTIGO FILMS': 908, 'VÉRTIGO': 908, 'VERTIGO FILMS': 908, 'VERTIGO': 908, 'VERVE PICTURES': 909, 'VIA VISION ENTERTAINMENT': 910, 'VIA VISION': 910, 'VICOL ENTERTAINMENT': 911, 'VICOL': 911, 'VICOM': 912, 'VICTOR ENTERTAINMENT': 913, 'VICTOR': 913, 'VIDEA CDE': 914, 'VIDEO FILM EXPRESS': 915, 'VIDEO FILM': 915, 'VIDEO EXPRESS': 915, 'VIDEO MUSIC, INC.': 916, 'VIDEO MUSIC, INC': 916, 'VIDEO MUSIC INC.': 916, 'VIDEO MUSIC INC': 916, 'VIDEO MUSIC': 916, 'VIDEO SERVICE CORP.': 917, 'VIDEO SERVICE CORP': 917, 'VIDEO SERVICE': 917, 'VIDEO TRAVEL': 918, 'VIDEOMAX': 919, 'VIDEO MAX': 919, 'VII PILLARS ENTERTAINMENT': 920, 'VII PILLARS': 920, 'VILLAGE FILMS': 921, 'VINEGAR SYNDROME': 922, 'VINEGAR': 922, 'VS': 922, 'VINNY MOVIES': 923, 'VINNY': 923, 'VIRGIL FILMS & ENTERTAINMENT': 924, 'VIRGIL FILMS AND ENTERTAINMENT': 924, 'VIRGIL ENTERTAINMENT': 924, 'VIRGIL FILMS': 924, 'VIRGIL': 924, 'VIRGIN RECORDS': 925, 'VIRGIN': 925, 'VISION FILMS': 926, 'VISION': 926, 'VISUAL ENTERTAINMENT GROUP': 927, 'VISUAL GROUP': 927, 'VISUAL ENTERTAINMENT': 927, 'VISUAL': 927, 'VIVENDI VISUAL ENTERTAINMENT': 928, 'VIVENDI VISUAL': 928, 'VIVENDI': 928, 'VIZ PICTURES': 929, 'VIZ': 929, 'VLMEDIA': 930, 'VL MEDIA': 930, 'VL': 930, 'VOLGA': 931, 'VVS FILMS': 932, - 'VVS': 932, 'VZ HANDELS GMBH': 933, 'VZ HANDELS': 933, 'WARD RECORDS': 934, 'WARD': 934, 'WARNER BROS.': 935, 'WARNER BROS': 935, 'WARNER ARCHIVE': 935, 'WARNER ARCHIVE COLLECTION': 935, 'WAC': 935, 'WARNER': 935, 'WARNER MUSIC': 936, 'WEA': 937, 'WEINSTEIN COMPANY': 938, 'WEINSTEIN': 938, 'WELL GO USA': 939, 'WELL GO': 939, 'WELTKINO FILMVERLEIH': 940, 'WEST VIDEO': 941, 'WEST': 941, 'WHITE PEARL MOVIES': 942, 'WHITE PEARL': 942, + '01 DISTRIBUTION': 1, '100 DESTINATIONS TRAVEL FILM': 2, '101 FILMS': 3, '1FILMS': 4, '2 ENTERTAIN VIDEO': 5, '20TH CENTURY FOX': 6, '2L': 7, '3D CONTENT HUB': 8, '3D MEDIA': 9, '3L FILM': 10, '4DIGITAL': 11, '4DVD': 12, '4K ULTRA HD MOVIES': 13, '4K UHD': 13, '8-FILMS': 14, '84 ENTERTAINMENT': 15, '88 FILMS': 16, '@ANIME': 17, 'ANIME': 17, 'A CONTRACORRIENTE': 18, 'A CONTRACORRIENTE FILMS': 19, 'A&E HOME VIDEO': 20, 'A&E': 20, 'A&M RECORDS': 21, 'A+E NETWORKS': 22, 'A+R': 23, 'A-FILM': 24, 'AAA': 25, 'AB VIDÉO': 26, 'AB VIDEO': 26, 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)': 27, 'ABC': 27, 'ABKCO': 28, 'ABSOLUT MEDIEN': 29, 'ABSOLUTE': 30, 'ACCENT FILM ENTERTAINMENT': 31, 'ACCENTUS': 32, 'ACORN MEDIA': 33, 'AD VITAM': 34, 'ADA': 35, 'ADITYA VIDEOS': 36, 'ADSO FILMS': 37, 'AFM RECORDS': 38, 'AGFA': 39, 'AIX RECORDS': 40, 'ALAMODE FILM': 41, 'ALBA RECORDS': 42, 'ALBANY RECORDS': 43, 'ALBATROS': 44, 'ALCHEMY': 45, 'ALIVE': 46, 'ALL ANIME': 47, 'ALL INTERACTIVE ENTERTAINMENT': 48, 'ALLEGRO': 49, 'ALLIANCE': 50, 'ALPHA MUSIC': 51, 'ALTERDYSTRYBUCJA': 52, 'ALTERED INNOCENCE': 53, 'ALTITUDE FILM DISTRIBUTION': 54, 'ALUCARD RECORDS': 55, 'AMAZING D.C.': 56, 'AMAZING DC': 56, 'AMMO CONTENT': 57, 'AMUSE SOFT ENTERTAINMENT': 58, 'ANCONNECT': 59, 'ANEC': 60, 'ANIMATSU': 61, 'ANIME HOUSE': 62, 'ANIME LTD': 63, 'ANIME WORKS': 64, 'ANIMEIGO': 65, 'ANIPLEX': 66, 'ANOLIS ENTERTAINMENT': 67, 'ANOTHER WORLD ENTERTAINMENT': 68, 'AP INTERNATIONAL': 69, 'APPLE': 70, 'ARA MEDIA': 71, 'ARBELOS': 72, 'ARC ENTERTAINMENT': 73, 'ARP SÉLECTION': 74, 'ARP SELECTION': 74, 'ARROW': 75, 'ART SERVICE': 76, 'ART VISION': 77, 'ARTE ÉDITIONS': 78, 'ARTE EDITIONS': 78, 'ARTE VIDÉO': 79, 'ARTE VIDEO': 79, 'ARTHAUS MUSIK': 80, 'ARTIFICIAL EYE': 81, 'ARTSPLOITATION FILMS': 82, 'ARTUS FILMS': 83, 'ASCOT ELITE HOME ENTERTAINMENT': 84, 'ASIA VIDEO': 85, 'ASMIK ACE': 86, 'ASTRO RECORDS & FILMWORKS': 87, 'ASYLUM': 88, 'ATLANTIC FILM': 89, 'ATLANTIC RECORDS': 90, 'ATLAS FILM': 91, 'AUDIO VISUAL ENTERTAINMENT': 92, 'AURO-3D CREATIVE LABEL': 93, 'AURUM': 94, 'AV VISIONEN': 95, 'AV-JET': 96, 'AVALON': 97, 'AVENTI': 98, 'AVEX TRAX': 99, 'AXIOM': 100, 'AXIS RECORDS': 101, 'AYNGARAN': 102, 'BAC FILMS': 103, 'BACH FILMS': 104, 'BANDAI VISUAL': 105, 'BARCLAY': 106, 'BBC': 107, 'BRITISH BROADCASTING CORPORATION': 107, 'BBI FILMS': 108, 'BBI': 108, 'BCI HOME ENTERTAINMENT': 109, 'BEGGARS BANQUET': 110, 'BEL AIR CLASSIQUES': 111, 'BELGA FILMS': 112, 'BELVEDERE': 113, 'BENELUX FILM DISTRIBUTORS': 114, 'BENNETT-WATT MEDIA': 115, 'BERLIN CLASSICS': 116, 'BERLINER PHILHARMONIKER RECORDINGS': 117, 'BEST ENTERTAINMENT': 118, 'BEYOND HOME ENTERTAINMENT': 119, 'BFI VIDEO': 120, 'BFI': 120, 'BRITISH FILM INSTITUTE': 120, 'BFS ENTERTAINMENT': 121, 'BFS': 121, 'BHAVANI': 122, 'BIBER RECORDS': 123, 'BIG HOME VIDEO': 124, 'BILDSTÖRUNG': 125, 'BILDSTORUNG': 125, 'BILL ZEBUB': 126, 'BIRNENBLATT': 127, 'BIT WEL': 128, 'BLACK BOX': 129, 'BLACK HILL PICTURES': 130, 'BLACK HILL': 130, 'BLACK HOLE RECORDINGS': 131, 'BLACK HOLE': 131, 'BLAQOUT': 132, 'BLAUFIELD MUSIC': 133, 'BLAUFIELD': 133, 'BLOCKBUSTER ENTERTAINMENT': 134, 'BLOCKBUSTER': 134, 'BLU PHASE MEDIA': 135, 'BLU-RAY ONLY': 136, 'BLU-RAY': 136, 'BLURAY ONLY': 136, 'BLURAY': 136, 'BLUE GENTIAN RECORDS': 137, 'BLUE KINO': 138, 'BLUE UNDERGROUND': 139, 'BMG/ARISTA': 140, 'BMG': 140, 'BMGARISTA': 140, 'BMG ARISTA': 140, 'ARISTA': + 140, 'ARISTA/BMG': 140, 'ARISTABMG': 140, 'ARISTA BMG': 140, 'BONTON FILM': 141, 'BONTON': 141, 'BOOMERANG PICTURES': 142, 'BOOMERANG': 142, 'BQHL ÉDITIONS': 143, 'BQHL EDITIONS': 143, 'BQHL': 143, 'BREAKING GLASS': 144, 'BRIDGESTONE': 145, 'BRINK': 146, 'BROAD GREEN PICTURES': 147, 'BROAD GREEN': 147, 'BUSCH MEDIA GROUP': 148, 'BUSCH': 148, 'C MAJOR': 149, 'C.B.S.': 150, 'CAICHANG': 151, 'CALIFÓRNIA FILMES': 152, 'CALIFORNIA FILMES': 152, 'CALIFORNIA': 152, 'CAMEO': 153, 'CAMERA OBSCURA': 154, 'CAMERATA': 155, 'CAMP MOTION PICTURES': 156, 'CAMP MOTION': 156, 'CAPELIGHT PICTURES': 157, 'CAPELIGHT': 157, 'CAPITOL': 159, 'CAPITOL RECORDS': 159, 'CAPRICCI': 160, 'CARGO RECORDS': 161, 'CARLOTTA FILMS': 162, 'CARLOTTA': 162, 'CARLOTA': 162, 'CARMEN FILM': 163, 'CASCADE': 164, 'CATCHPLAY': 165, 'CAULDRON FILMS': 166, 'CAULDRON': 166, 'CBS TELEVISION STUDIOS': 167, 'CBS': 167, 'CCTV': 168, 'CCV ENTERTAINMENT': 169, 'CCV': 169, 'CD BABY': 170, 'CD LAND': 171, 'CECCHI GORI': 172, 'CENTURY MEDIA': 173, 'CHUAN XUN SHI DAI MULTIMEDIA': 174, 'CINE-ASIA': 175, 'CINÉART': 176, 'CINEART': 176, 'CINEDIGM': 177, 'CINEFIL IMAGICA': 178, 'CINEMA EPOCH': 179, 'CINEMA GUILD': 180, 'CINEMA LIBRE STUDIOS': 181, 'CINEMA MONDO': 182, 'CINEMATIC VISION': 183, 'CINEPLOIT RECORDS': 184, 'CINESTRANGE EXTREME': 185, 'CITEL VIDEO': 186, 'CITEL': 186, 'CJ ENTERTAINMENT': 187, 'CJ': 187, 'CLASSIC MEDIA': 188, 'CLASSICFLIX': 189, 'CLASSICLINE': 190, 'CLAUDIO RECORDS': 191, 'CLEAR VISION': 192, 'CLEOPATRA': 193, 'CLOSE UP': 194, 'CMS MEDIA LIMITED': 195, 'CMV LASERVISION': 196, 'CN ENTERTAINMENT': 197, 'CODE RED': 198, 'COHEN MEDIA GROUP': 199, 'COHEN': 199, 'COIN DE MIRE CINÉMA': 200, 'COIN DE MIRE CINEMA': 200, 'COLOSSEO FILM': 201, 'COLUMBIA': 203, 'COLUMBIA PICTURES': 203, 'COLUMBIA/TRI-STAR': 204, 'TRI-STAR': 204, 'COMMERCIAL MARKETING': 205, 'CONCORD MUSIC GROUP': 206, 'CONCORDE VIDEO': 207, 'CONDOR': 208, 'CONSTANTIN FILM': 209, 'CONSTANTIN': 209, 'CONSTANTINO FILMES': 210, 'CONSTANTINO': 210, 'CONSTRUCTIVE MEDIA SERVICE': 211, 'CONSTRUCTIVE': 211, 'CONTENT ZONE': 212, 'CONTENTS GATE': 213, 'COQUEIRO VERDE': 214, 'CORNERSTONE MEDIA': 215, 'CORNERSTONE': 215, 'CP DIGITAL': 216, 'CREST MOVIES': 217, 'CRITERION': 218, 'CRITERION COLLECTION': + 218, 'CC': 218, 'CRYSTAL CLASSICS': 219, 'CULT EPICS': 220, 'CULT FILMS': 221, 'CULT VIDEO': 222, 'CURZON FILM WORLD': 223, 'D FILMS': 224, "D'AILLY COMPANY": 225, 'DAILLY COMPANY': 225, 'D AILLY COMPANY': 225, "D'AILLY": 225, 'DAILLY': 225, 'D AILLY': 225, 'DA CAPO': 226, 'DA MUSIC': 227, "DALL'ANGELO PICTURES": 228, 'DALLANGELO PICTURES': 228, "DALL'ANGELO": 228, 'DALL ANGELO PICTURES': 228, 'DALL ANGELO': 228, 'DAREDO': 229, 'DARK FORCE ENTERTAINMENT': 230, 'DARK FORCE': 230, 'DARK SIDE RELEASING': 231, 'DARK SIDE': 231, 'DAZZLER MEDIA': 232, 'DAZZLER': 232, 'DCM PICTURES': 233, 'DCM': 233, 'DEAPLANETA': 234, 'DECCA': 235, 'DEEPJOY': 236, 'DEFIANT SCREEN ENTERTAINMENT': 237, 'DEFIANT SCREEN': 237, 'DEFIANT': 237, 'DELOS': 238, 'DELPHIAN RECORDS': 239, 'DELPHIAN': 239, 'DELTA MUSIC & ENTERTAINMENT': 240, 'DELTA MUSIC AND ENTERTAINMENT': 240, 'DELTA MUSIC ENTERTAINMENT': 240, 'DELTA MUSIC': 240, 'DELTAMAC CO. LTD.': 241, 'DELTAMAC CO LTD': 241, 'DELTAMAC CO': 241, 'DELTAMAC': 241, 'DEMAND MEDIA': 242, 'DEMAND': 242, 'DEP': 243, 'DEUTSCHE GRAMMOPHON': 244, 'DFW': 245, 'DGM': 246, 'DIAPHANA': 247, 'DIGIDREAMS STUDIOS': 248, 'DIGIDREAMS': 248, 'DIGITAL ENVIRONMENTS': 249, 'DIGITAL': 249, 'DISCOTEK MEDIA': 250, 'DISCOVERY CHANNEL': 251, 'DISCOVERY': 251, 'DISK KINO': 252, 'DISNEY / BUENA VISTA': 253, 'DISNEY': 253, 'BUENA VISTA': 253, 'DISNEY BUENA VISTA': 253, 'DISTRIBUTION SELECT': 254, 'DIVISA': 255, 'DNC ENTERTAINMENT': 256, 'DNC': 256, 'DOGWOOF': 257, 'DOLMEN HOME VIDEO': 258, 'DOLMEN': 258, 'DONAU FILM': 259, 'DONAU': 259, 'DORADO FILMS': 260, 'DORADO': 260, 'DRAFTHOUSE FILMS': 261, 'DRAFTHOUSE': 261, 'DRAGON FILM ENTERTAINMENT': 262, 'DRAGON ENTERTAINMENT': 262, 'DRAGON FILM': 262, 'DRAGON': 262, 'DREAMWORKS': 263, 'DRIVE ON RECORDS': 264, 'DRIVE ON': 264, 'DRIVE-ON': 264, 'DRIVEON': 264, 'DS MEDIA': 265, 'DTP ENTERTAINMENT AG': 266, 'DTP ENTERTAINMENT': 266, 'DTP AG': 266, 'DTP': 266, 'DTS ENTERTAINMENT': 267, 'DTS': 267, 'DUKE MARKETING': 268, 'DUKE VIDEO DISTRIBUTION': 269, 'DUKE': 269, 'DUTCH FILMWORKS': 270, 'DUTCH': 270, 'DVD INTERNATIONAL': 271, 'DVD': 271, 'DYBEX': 272, 'DYNAMIC': 273, 'DYNIT': 274, 'E1 ENTERTAINMENT': 275, 'E1': 275, 'EAGLE ENTERTAINMENT': 276, 'EAGLE HOME ENTERTAINMENT PVT.LTD.': + 277, 'EAGLE HOME ENTERTAINMENT PVTLTD': 277, 'EAGLE HOME ENTERTAINMENT PVT LTD': 277, 'EAGLE HOME ENTERTAINMENT': 277, 'EAGLE PICTURES': 278, 'EAGLE ROCK ENTERTAINMENT': 279, 'EAGLE ROCK': 279, 'EAGLE VISION MEDIA': 280, 'EAGLE VISION': 280, 'EARMUSIC': 281, 'EARTH ENTERTAINMENT': 282, 'EARTH': 282, 'ECHO BRIDGE ENTERTAINMENT': 283, 'ECHO BRIDGE': 283, 'EDEL GERMANY GMBH': 284, 'EDEL GERMANY': 284, 'EDEL RECORDS': 285, 'EDITION TONFILM': 286, 'EDITIONS MONTPARNASSE': 287, 'EDKO FILMS LTD.': 288, 'EDKO FILMS LTD': 288, 'EDKO FILMS': 288, 'EDKO': 288, "EIN'S M&M CO": 289, 'EINS M&M CO': 289, "EIN'S M&M": 289, 'EINS M&M': 289, 'ELEA-MEDIA': 290, 'ELEA MEDIA': 290, 'ELEA': 290, 'ELECTRIC PICTURE': 291, 'ELECTRIC': 291, 'ELEPHANT FILMS': 292, 'ELEPHANT': 292, 'ELEVATION': 293, 'EMI': 294, 'EMON': 295, 'EMS': 296, 'EMYLIA': 297, 'ENE MEDIA': 298, 'ENE': 298, 'ENTERTAINMENT IN VIDEO': 299, 'ENTERTAINMENT IN': 299, 'ENTERTAINMENT ONE': 300, 'ENTERTAINMENT ONE FILMS CANADA INC.': 301, 'ENTERTAINMENT ONE FILMS CANADA INC': 301, 'ENTERTAINMENT ONE FILMS CANADA': 301, 'ENTERTAINMENT ONE CANADA INC': 301, + 'ENTERTAINMENT ONE CANADA': 301, 'ENTERTAINMENTONE': 302, 'EONE': 303, 'EOS': 304, 'EPIC PICTURES': 305, 'EPIC': 305, 'EPIC RECORDS': 306, 'ERATO': 307, 'EROS': 308, 'ESC EDITIONS': 309, 'ESCAPI MEDIA BV': 310, 'ESOTERIC RECORDINGS': 311, 'ESPN FILMS': 312, 'EUREKA ENTERTAINMENT': 313, 'EUREKA': 313, 'EURO PICTURES': 314, 'EURO VIDEO': 315, 'EUROARTS': 316, 'EUROPA FILMES': 317, 'EUROPA': 317, 'EUROPACORP': 318, 'EUROZOOM': 319, 'EXCEL': 320, 'EXPLOSIVE MEDIA': 321, 'EXPLOSIVE': 321, 'EXTRALUCID FILMS': 322, 'EXTRALUCID': 322, 'EYE SEE MOVIES': 323, 'EYE SEE': 323, 'EYK MEDIA': 324, 'EYK': 324, 'FABULOUS FILMS': 325, 'FABULOUS': 325, 'FACTORIS FILMS': 326, 'FACTORIS': 326, 'FARAO RECORDS': 327, 'FARBFILM HOME ENTERTAINMENT': 328, 'FARBFILM ENTERTAINMENT': 328, 'FARBFILM HOME': 328, 'FARBFILM': 328, 'FEELGOOD ENTERTAINMENT': 329, 'FEELGOOD': 329, 'FERNSEHJUWELEN': 330, 'FILM CHEST': 331, 'FILM MEDIA': 332, 'FILM MOVEMENT': 333, 'FILM4': 334, 'FILMART': 335, 'FILMAURO': 336, 'FILMAX': 337, 'FILMCONFECT HOME ENTERTAINMENT': 338, 'FILMCONFECT ENTERTAINMENT': 338, 'FILMCONFECT HOME': 338, 'FILMCONFECT': 338, 'FILMEDIA': 339, 'FILMJUWELEN': 340, 'FILMOTEKA NARODAWA': 341, 'FILMRISE': 342, 'FINAL CUT ENTERTAINMENT': 343, 'FINAL CUT': 343, 'FIREHOUSE 12 RECORDS': 344, 'FIREHOUSE 12': 344, 'FIRST INTERNATIONAL PRODUCTION': 345, 'FIRST INTERNATIONAL': 345, 'FIRST LOOK STUDIOS': 346, 'FIRST LOOK': 346, 'FLAGMAN TRADE': 347, 'FLASHSTAR FILMES': 348, 'FLASHSTAR': 348, 'FLICKER ALLEY': 349, 'FNC ADD CULTURE': 350, 'FOCUS FILMES': 351, 'FOCUS': 351, 'FOKUS MEDIA': 352, 'FOKUSA': 352, 'FOX PATHE EUROPA': 353, 'FOX PATHE': 353, 'FOX EUROPA': 353, 'FOX/MGM': 354, 'FOX MGM': 354, 'MGM': 354, 'MGM/FOX': 354, 'FOX': 354, 'FPE': 355, 'FRANCE TÉLÉVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS': 356, 'FRANCE': 356, 'FREE DOLPHIN ENTERTAINMENT': 357, 'FREE DOLPHIN': 357, 'FREESTYLE DIGITAL MEDIA': 358, 'FREESTYLE DIGITAL': 358, 'FREESTYLE': 358, 'FREMANTLE HOME ENTERTAINMENT': 359, 'FREMANTLE ENTERTAINMENT': 359, 'FREMANTLE HOME': 359, 'FREMANTL': 359, 'FRENETIC FILMS': 360, 'FRENETIC': 360, 'FRONTIER WORKS': 361, 'FRONTIER': 361, 'FRONTIERS MUSIC': 362, 'FRONTIERS RECORDS': 363, 'FS FILM OY': 364, 'FS FILM': + 364, 'FULL MOON FEATURES': 365, 'FULL MOON': 365, 'FUN CITY EDITIONS': 366, 'FUN CITY': 366, 'FUNIMATION ENTERTAINMENT': 367, 'FUNIMATION': 367, 'FUSION': 368, 'FUTUREFILM': 369, 'G2 PICTURES': 370, 'G2': 370, 'GAGA COMMUNICATIONS': 371, 'GAGA': 371, 'GAIAM': 372, 'GALAPAGOS': 373, 'GAMMA HOME ENTERTAINMENT': 374, 'GAMMA ENTERTAINMENT': 374, 'GAMMA HOME': 374, 'GAMMA': 374, 'GARAGEHOUSE PICTURES': 375, 'GARAGEHOUSE': 375, 'GARAGEPLAY (車庫娛樂)': 376, '車庫娛樂': 376, 'GARAGEPLAY (Che Ku Yu Le )': 376, 'GARAGEPLAY': 376, 'Che Ku Yu Le': 376, 'GAUMONT': 377, 'GEFFEN': 378, 'GENEON ENTERTAINMENT': 379, 'GENEON': 379, 'GENEON UNIVERSAL ENTERTAINMENT': 380, 'GENERAL VIDEO RECORDING': 381, 'GLASS DOLL FILMS': 382, 'GLASS DOLL': 382, 'GLOBE MUSIC MEDIA': 383, 'GLOBE MUSIC': 383, 'GLOBE MEDIA': 383, 'GLOBE': 383, 'GO ENTERTAIN': 384, 'GO': 384, 'GOLDEN HARVEST': 385, 'GOOD!MOVIES': 386, + 'GOOD! MOVIES': 386, 'GOOD MOVIES': 386, 'GRAPEVINE VIDEO': 387, 'GRAPEVINE': 387, 'GRASSHOPPER FILM': 388, 'GRASSHOPPER FILMS': 388, 'GRASSHOPPER': 388, 'GRAVITAS VENTURES': 389, 'GRAVITAS': 389, 'GREAT MOVIES': 390, 'GREAT': 390, + 'GREEN APPLE ENTERTAINMENT': 391, 'GREEN ENTERTAINMENT': 391, 'GREEN APPLE': 391, 'GREEN': 391, 'GREENNARAE MEDIA': 392, 'GREENNARAE': 392, 'GRINDHOUSE RELEASING': 393, 'GRINDHOUSE': 393, 'GRIND HOUSE': 393, 'GRYPHON ENTERTAINMENT': 394, 'GRYPHON': 394, 'GUNPOWDER & SKY': 395, 'GUNPOWDER AND SKY': 395, 'GUNPOWDER SKY': 395, 'GUNPOWDER + SKY': 395, 'GUNPOWDER': 395, 'HANABEE ENTERTAINMENT': 396, 'HANABEE': 396, 'HANNOVER HOUSE': 397, 'HANNOVER': 397, 'HANSESOUND': 398, 'HANSE SOUND': 398, 'HANSE': 398, 'HAPPINET': 399, 'HARMONIA MUNDI': 400, 'HARMONIA': 400, 'HBO': 401, 'HDC': 402, 'HEC': 403, 'HELL & BACK RECORDINGS': 404, 'HELL AND BACK RECORDINGS': 404, 'HELL & BACK': 404, 'HELL AND BACK': 404, "HEN'S TOOTH VIDEO": 405, 'HENS TOOTH VIDEO': 405, "HEN'S TOOTH": 405, 'HENS TOOTH': 405, 'HIGH FLIERS': 406, 'HIGHLIGHT': 407, 'HILLSONG': 408, 'HISTORY CHANNEL': 409, 'HISTORY': 409, 'HK VIDÉO': 410, 'HK VIDEO': 410, 'HK': 410, 'HMH HAMBURGER MEDIEN HAUS': 411, 'HAMBURGER MEDIEN HAUS': 411, 'HMH HAMBURGER MEDIEN': 411, 'HMH HAMBURGER': 411, 'HMH': 411, 'HOLLYWOOD CLASSIC ENTERTAINMENT': 412, 'HOLLYWOOD CLASSIC': 412, 'HOLLYWOOD PICTURES': 413, 'HOLLYWOOD': 413, 'HOPSCOTCH ENTERTAINMENT': 414, 'HOPSCOTCH': 414, 'HPM': 415, 'HÄNNSLER CLASSIC': 416, 'HANNSLER CLASSIC': 416, 'HANNSLER': 416, 'I-CATCHER': 417, 'I CATCHER': 417, 'ICATCHER': 417, 'I-ON NEW MEDIA': 418, 'I ON NEW MEDIA': 418, 'ION NEW MEDIA': 418, 'ION MEDIA': 418, 'I-ON': 418, 'ION': 418, 'IAN PRODUCTIONS': 419, 'IAN': 419, 'ICESTORM': 420, 'ICON FILM DISTRIBUTION': 421, 'ICON DISTRIBUTION': 421, 'ICON FILM': 421, 'ICON': 421, 'IDEALE AUDIENCE': 422, 'IDEALE': 422, 'IFC FILMS': 423, 'IFC': 423, 'IFILM': 424, 'ILLUSIONS UNLTD.': 425, 'ILLUSIONS UNLTD': 425, 'ILLUSIONS': 425, 'IMAGE ENTERTAINMENT': 426, 'IMAGE': 426, + 'IMAGEM FILMES': 427, 'IMAGEM': 427, 'IMOVISION': 428, 'IMPERIAL CINEPIX': 429, 'IMPRINT': 430, 'IMPULS HOME ENTERTAINMENT': 431, 'IMPULS ENTERTAINMENT': 431, 'IMPULS HOME': 431, 'IMPULS': 431, 'IN-AKUSTIK': 432, 'IN AKUSTIK': 432, 'INAKUSTIK': 432, 'INCEPTION MEDIA GROUP': 433, 'INCEPTION MEDIA': 433, 'INCEPTION GROUP': 433, 'INCEPTION': 433, 'INDEPENDENT': 434, 'INDICAN': 435, 'INDIE RIGHTS': 436, 'INDIE': 436, 'INDIGO': 437, 'INFO': 438, 'INJOINGAN': 439, 'INKED PICTURES': 440, 'INKED': 440, 'INSIDE OUT MUSIC': 441, 'INSIDE MUSIC': 441, 'INSIDE OUT': 441, 'INSIDE': 441, 'INTERCOM': 442, 'INTERCONTINENTAL VIDEO': 443, 'INTERCONTINENTAL': 443, 'INTERGROOVE': 444, + 'INTERSCOPE': 445, 'INVINCIBLE PICTURES': 446, 'INVINCIBLE': 446, 'ISLAND/MERCURY': 447, 'ISLAND MERCURY': 447, 'ISLANDMERCURY': 447, 'ISLAND & MERCURY': 447, 'ISLAND AND MERCURY': 447, 'ISLAND': 447, 'ITN': 448, 'ITV DVD': 449, 'ITV': 449, 'IVC': 450, 'IVE ENTERTAINMENT': 451, 'IVE': 451, 'J&R ADVENTURES': 452, 'J&R': 452, 'JR': 452, 'JAKOB': 453, 'JONU MEDIA': 454, 'JONU': 454, 'JRB PRODUCTIONS': 455, 'JRB': 455, 'JUST BRIDGE ENTERTAINMENT': 456, 'JUST BRIDGE': 456, 'JUST ENTERTAINMENT': 456, 'JUST': 456, 'KABOOM ENTERTAINMENT': 457, 'KABOOM': 457, 'KADOKAWA ENTERTAINMENT': 458, 'KADOKAWA': 458, 'KAIROS': 459, 'KALEIDOSCOPE ENTERTAINMENT': 460, 'KALEIDOSCOPE': 460, 'KAM & RONSON ENTERPRISES': 461, 'KAM & RONSON': 461, 'KAM&RONSON ENTERPRISES': 461, 'KAM&RONSON': 461, 'KAM AND RONSON ENTERPRISES': 461, 'KAM AND RONSON': 461, 'KANA HOME VIDEO': 462, 'KARMA FILMS': 463, 'KARMA': 463, 'KATZENBERGER': 464, 'KAZE': 465, 'KBS MEDIA': 466, 'KBS': 466, 'KD MEDIA': 467, 'KD': 467, 'KING MEDIA': 468, 'KING': 468, 'KING RECORDS': 469, 'KINO LORBER': 470, 'KINO': 470, 'KINO SWIAT': 471, 'KINOKUNIYA': 472, 'KINOWELT HOME ENTERTAINMENT/DVD': 473, 'KINOWELT HOME ENTERTAINMENT': 473, 'KINOWELT ENTERTAINMENT': 473, 'KINOWELT HOME DVD': 473, 'KINOWELT ENTERTAINMENT/DVD': 473, 'KINOWELT DVD': 473, 'KINOWELT': 473, 'KIT PARKER FILMS': 474, 'KIT PARKER': 474, 'KITTY MEDIA': 475, 'KNM HOME ENTERTAINMENT': 476, 'KNM ENTERTAINMENT': 476, 'KNM HOME': 476, 'KNM': 476, 'KOBA FILMS': 477, 'KOBA': 477, 'KOCH ENTERTAINMENT': 478, 'KOCH MEDIA': 479, 'KOCH': 479, 'KRAKEN RELEASING': 480, 'KRAKEN': 480, 'KSCOPE': 481, 'KSM': 482, 'KULTUR': 483, "L'ATELIER D'IMAGES": 484, "LATELIER D'IMAGES": 484, "L'ATELIER DIMAGES": 484, 'LATELIER DIMAGES': 484, "L ATELIER D'IMAGES": 484, "L'ATELIER D IMAGES": 484, + 'L ATELIER D IMAGES': 484, "L'ATELIER": 484, 'L ATELIER': 484, 'LATELIER': 484, 'LA AVENTURA AUDIOVISUAL': 485, 'LA AVENTURA': 485, 'LACE GROUP': 486, 'LACE': 486, 'LASER PARADISE': 487, 'LAYONS': 488, 'LCJ EDITIONS': 489, 'LCJ': 489, 'LE CHAT QUI FUME': 490, 'LE PACTE': 491, 'LEDICK FILMHANDEL': 492, 'LEGEND': 493, 'LEOMARK STUDIOS': 494, 'LEOMARK': 494, 'LEONINE FILMS': 495, 'LEONINE': 495, 'LICHTUNG MEDIA LTD': 496, 'LICHTUNG LTD': 496, 'LICHTUNG MEDIA LTD.': 496, 'LICHTUNG LTD.': 496, 'LICHTUNG MEDIA': 496, 'LICHTUNG': 496, 'LIGHTHOUSE HOME ENTERTAINMENT': 497, 'LIGHTHOUSE ENTERTAINMENT': 497, 'LIGHTHOUSE HOME': 497, 'LIGHTHOUSE': 497, 'LIGHTYEAR': 498, 'LIONSGATE FILMS': 499, 'LIONSGATE': 499, 'LIZARD CINEMA TRADE': 500, 'LLAMENTOL': 501, 'LOBSTER FILMS': 502, 'LOBSTER': 502, 'LOGON': 503, 'LORBER FILMS': 504, 'LORBER': 504, 'LOS BANDITOS FILMS': 505, 'LOS BANDITOS': 505, 'LOUD & PROUD RECORDS': 506, 'LOUD AND PROUD RECORDS': 506, 'LOUD & PROUD': 506, 'LOUD AND PROUD': 506, 'LSO LIVE': 507, 'LUCASFILM': 508, 'LUCKY RED': 509, 'LUMIÈRE HOME ENTERTAINMENT': 510, 'LUMIERE HOME ENTERTAINMENT': 510, 'LUMIERE ENTERTAINMENT': 510, 'LUMIERE HOME': 510, 'LUMIERE': 510, 'M6 VIDEO': 511, 'M6': 511, 'MAD DIMENSION': 512, 'MADMAN ENTERTAINMENT': 513, 'MADMAN': 513, 'MAGIC BOX': 514, 'MAGIC PLAY': 515, 'MAGNA HOME ENTERTAINMENT': 516, 'MAGNA ENTERTAINMENT': 516, 'MAGNA HOME': 516, 'MAGNA': 516, 'MAGNOLIA PICTURES': 517, 'MAGNOLIA': 517, 'MAIDEN JAPAN': 518, 'MAIDEN': 518, 'MAJENG MEDIA': 519, 'MAJENG': 519, 'MAJESTIC HOME ENTERTAINMENT': 520, 'MAJESTIC ENTERTAINMENT': 520, 'MAJESTIC HOME': 520, 'MAJESTIC': 520, 'MANGA HOME ENTERTAINMENT': 521, 'MANGA ENTERTAINMENT': 521, 'MANGA HOME': 521, 'MANGA': 521, 'MANTA LAB': 522, 'MAPLE STUDIOS': 523, 'MAPLE': 523, 'MARCO POLO PRODUCTION': + 524, 'MARCO POLO': 524, 'MARIINSKY': 525, 'MARVEL STUDIOS': 526, 'MARVEL': 526, 'MASCOT RECORDS': 527, 'MASCOT': 527, 'MASSACRE VIDEO': 528, 'MASSACRE': 528, 'MATCHBOX': 529, 'MATRIX D': 530, 'MAXAM': 531, 'MAYA HOME ENTERTAINMENT': 532, 'MAYA ENTERTAINMENT': 532, 'MAYA HOME': 532, 'MAYAT': 532, 'MDG': 533, 'MEDIA BLASTERS': 534, 'MEDIA FACTORY': 535, 'MEDIA TARGET DISTRIBUTION': 536, 'MEDIA TARGET': 536, 'MEDIAINVISION': 537, 'MEDIATOON': 538, 'MEDIATRES ESTUDIO': 539, 'MEDIATRES STUDIO': 539, 'MEDIATRES': 539, 'MEDICI ARTS': 540, 'MEDICI CLASSICS': 541, 'MEDIUMRARE ENTERTAINMENT': 542, 'MEDIUMRARE': 542, 'MEDUSA': 543, 'MEGASTAR': 544, 'MEI AH': 545, 'MELI MÉDIAS': 546, 'MELI MEDIAS': 546, 'MEMENTO FILMS': 547, 'MEMENTO': 547, 'MENEMSHA FILMS': 548, 'MENEMSHA': 548, 'MERCURY': 549, 'MERCURY STUDIOS': 550, 'MERGE SOFT PRODUCTIONS': 551, 'MERGE PRODUCTIONS': 551, 'MERGE SOFT': 551, 'MERGE': 551, 'METAL BLADE RECORDS': 552, 'METAL BLADE': 552, 'METEOR': 553, 'METRO-GOLDWYN-MAYER': 554, 'METRO GOLDWYN MAYER': 554, 'METROGOLDWYNMAYER': 554, 'METRODOME VIDEO': 555, 'METRODOME': 555, 'METROPOLITAN': 556, 'MFA+': + 557, 'MFA': 557, 'MIG FILMGROUP': 558, 'MIG': 558, 'MILESTONE': 559, 'MILL CREEK ENTERTAINMENT': 560, 'MILL CREEK': 560, 'MILLENNIUM MEDIA': 561, 'MILLENNIUM': 561, 'MIRAGE ENTERTAINMENT': 562, 'MIRAGE': 562, 'MIRAMAX': 563, + 'MISTERIYA ZVUKA': 564, 'MK2': 565, 'MODE RECORDS': 566, 'MODE': 566, 'MOMENTUM PICTURES': 567, 'MONDO HOME ENTERTAINMENT': 568, 'MONDO ENTERTAINMENT': 568, 'MONDO HOME': 568, 'MONDO MACABRO': 569, 'MONGREL MEDIA': 570, 'MONOLIT': 571, 'MONOLITH VIDEO': 572, 'MONOLITH': 572, 'MONSTER PICTURES': 573, 'MONSTER': 573, 'MONTEREY VIDEO': 574, 'MONTEREY': 574, 'MONUMENT RELEASING': 575, 'MONUMENT': 575, 'MORNINGSTAR': 576, 'MORNING STAR': 576, 'MOSERBAER': 577, 'MOVIEMAX': 578, 'MOVINSIDE': 579, 'MPI MEDIA GROUP': 580, 'MPI MEDIA': 580, 'MPI': 580, 'MR. BONGO FILMS': 581, 'MR BONGO FILMS': 581, 'MR BONGO': 581, 'MRG (MERIDIAN)': 582, 'MRG MERIDIAN': 582, 'MRG': 582, 'MERIDIAN': 582, 'MUBI': 583, 'MUG SHOT PRODUCTIONS': 584, 'MUG SHOT': 584, 'MULTIMUSIC': 585, 'MULTI-MUSIC': 585, 'MULTI MUSIC': 585, 'MUSE': 586, 'MUSIC BOX FILMS': 587, 'MUSIC BOX': 587, 'MUSICBOX': 587, 'MUSIC BROKERS': 588, 'MUSIC THEORIES': 589, 'MUSIC VIDEO DISTRIBUTORS': 590, 'MUSIC VIDEO': 590, 'MUSTANG ENTERTAINMENT': 591, 'MUSTANG': 591, 'MVD VISUAL': 592, 'MVD': 592, 'MVD/VSC': 593, 'MVL': 594, 'MVM ENTERTAINMENT': 595, 'MVM': 595, 'MYNDFORM': 596, 'MYSTIC NIGHT PICTURES': 597, 'MYSTIC NIGHT': 597, 'NAMELESS MEDIA': 598, 'NAMELESS': 598, 'NAPALM RECORDS': 599, 'NAPALM': 599, 'NATIONAL ENTERTAINMENT MEDIA': 600, 'NATIONAL ENTERTAINMENT': 600, 'NATIONAL MEDIA': 600, 'NATIONAL FILM ARCHIVE': 601, 'NATIONAL ARCHIVE': 601, 'NATIONAL FILM': 601, 'NATIONAL GEOGRAPHIC': 602, 'NAT GEO TV': 602, 'NAT GEO': 602, 'NGO': 602, 'NAXOS': 603, 'NBCUNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBC UNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBCUNIVERSAL JAPAN': 604, 'NBC UNIVERSAL JAPAN': 604, 'NBC JAPAN': 604, 'NBO ENTERTAINMENT': 605, 'NBO': 605, 'NEOS': 606, 'NETFLIX': 607, 'NETWORK': 608, 'NEW BLOOD': 609, 'NEW DISC': 610, 'NEW KSM': 611, 'NEW LINE CINEMA': 612, 'NEW LINE': 612, 'NEW MOVIE TRADING CO. LTD': 613, 'NEW MOVIE TRADING CO LTD': 613, 'NEW MOVIE TRADING CO': 613, 'NEW MOVIE TRADING': 613, 'NEW WAVE FILMS': 614, 'NEW WAVE': 614, 'NFI': 615, + 'NHK': 616, 'NIPPONART': 617, 'NIS AMERICA': 618, 'NJUTAFILMS': 619, 'NOBLE ENTERTAINMENT': 620, 'NOBLE': 620, 'NORDISK FILM': 621, 'NORDISK': 621, 'NORSK FILM': 622, 'NORSK': 622, 'NORTH AMERICAN MOTION PICTURES': 623, 'NOS AUDIOVISUAIS': 624, 'NOTORIOUS PICTURES': 625, 'NOTORIOUS': 625, 'NOVA MEDIA': 626, 'NOVA': 626, 'NOVA SALES AND DISTRIBUTION': 627, 'NOVA SALES & DISTRIBUTION': 627, 'NSM': 628, 'NSM RECORDS': 629, 'NUCLEAR BLAST': 630, 'NUCLEUS FILMS': 631, 'NUCLEUS': 631, 'OBERLIN MUSIC': 632, 'OBERLIN': 632, 'OBRAS-PRIMAS DO CINEMA': 633, 'OBRAS PRIMAS DO CINEMA': 633, 'OBRASPRIMAS DO CINEMA': 633, 'OBRAS-PRIMAS CINEMA': 633, 'OBRAS PRIMAS CINEMA': 633, 'OBRASPRIMAS CINEMA': 633, 'OBRAS-PRIMAS': 633, 'OBRAS PRIMAS': 633, 'OBRASPRIMAS': 633, 'ODEON': 634, 'OFDB FILMWORKS': 635, 'OFDB': 635, 'OLIVE FILMS': 636, 'OLIVE': 636, 'ONDINE': 637, 'ONSCREEN FILMS': 638, 'ONSCREEN': 638, 'OPENING DISTRIBUTION': 639, 'OPERA AUSTRALIA': 640, 'OPTIMUM HOME ENTERTAINMENT': 641, 'OPTIMUM ENTERTAINMENT': 641, 'OPTIMUM HOME': 641, 'OPTIMUM': 641, 'OPUS ARTE': 642, 'ORANGE STUDIO': 643, 'ORANGE': 643, 'ORLANDO EASTWOOD FILMS': 644, 'ORLANDO FILMS': 644, 'ORLANDO EASTWOOD': 644, 'ORLANDO': 644, 'ORUSTAK PICTURES': 645, 'ORUSTAK': 645, 'OSCILLOSCOPE PICTURES': 646, 'OSCILLOSCOPE': 646, 'OUTPLAY': 647, 'PALISADES TARTAN': 648, 'PAN VISION': 649, 'PANVISION': 649, 'PANAMINT CINEMA': 650, 'PANAMINT': 650, 'PANDASTORM ENTERTAINMENT': 651, 'PANDA STORM ENTERTAINMENT': 651, 'PANDASTORM': 651, 'PANDA STORM': 651, 'PANDORA FILM': 652, 'PANDORA': 652, 'PANEGYRIC': 653, 'PANORAMA': 654, 'PARADE DECK FILMS': 655, 'PARADE DECK': 655, 'PARADISE': 656, 'PARADISO FILMS': 657, 'PARADOX': 658, 'PARAMOUNT PICTURES': 659, 'PARAMOUNT': 659, 'PARIS FILMES': 660, 'PARIS FILMS': 660, 'PARIS': 660, 'PARK CIRCUS': 661, 'PARLOPHONE': 662, 'PASSION RIVER': 663, 'PATHE DISTRIBUTION': 664, 'PATHE': 664, 'PBS': 665, 'PEACE ARCH TRINITY': 666, 'PECCADILLO PICTURES': 667, 'PEPPERMINT': 668, 'PHASE 4 FILMS': 669, 'PHASE 4': 669, 'PHILHARMONIA BAROQUE': 670, 'PICTURE HOUSE ENTERTAINMENT': 671, 'PICTURE ENTERTAINMENT': 671, 'PICTURE HOUSE': 671, 'PICTURE': 671, 'PIDAX': 672, 'PINK FLOYD RECORDS': 673, 'PINK FLOYD': 673, 'PINNACLE FILMS': 674, 'PINNACLE': 674, 'PLAIN': 675, 'PLATFORM ENTERTAINMENT LIMITED': 676, 'PLATFORM ENTERTAINMENT LTD': 676, 'PLATFORM ENTERTAINMENT LTD.': 676, 'PLATFORM ENTERTAINMENT': 676, 'PLATFORM': 676, 'PLAYARTE': 677, 'PLG UK CLASSICS': 678, 'PLG UK': + 678, 'PLG': 678, 'POLYBAND & TOPPIC VIDEO/WVG': 679, 'POLYBAND AND TOPPIC VIDEO/WVG': 679, 'POLYBAND & TOPPIC VIDEO WVG': 679, 'POLYBAND & TOPPIC VIDEO AND WVG': 679, 'POLYBAND & TOPPIC VIDEO & WVG': 679, 'POLYBAND AND TOPPIC VIDEO WVG': 679, 'POLYBAND AND TOPPIC VIDEO AND WVG': 679, 'POLYBAND AND TOPPIC VIDEO & WVG': 679, 'POLYBAND & TOPPIC VIDEO': 679, 'POLYBAND AND TOPPIC VIDEO': 679, 'POLYBAND & TOPPIC': 679, 'POLYBAND AND TOPPIC': 679, 'POLYBAND': 679, 'WVG': 679, 'POLYDOR': 680, 'PONY': 681, 'PONY CANYON': 682, 'POTEMKINE': 683, 'POWERHOUSE FILMS': 684, 'POWERHOUSE': 684, 'POWERSTATIOM': 685, 'PRIDE & JOY': 686, 'PRIDE AND JOY': 686, 'PRINZ MEDIA': 687, 'PRINZ': 687, 'PRIS AUDIOVISUAIS': 688, 'PRO VIDEO': 689, 'PRO-VIDEO': 689, 'PRO-MOTION': 690, 'PRO MOTION': 690, 'PROD. JRB': 691, 'PROD JRB': 691, 'PRODISC': 692, 'PROKINO': 693, 'PROVOGUE RECORDS': 694, 'PROVOGUE': 694, 'PROWARE': 695, 'PULP VIDEO': 696, 'PULP': 696, 'PULSE VIDEO': 697, 'PULSE': 697, 'PURE AUDIO RECORDINGS': 698, 'PURE AUDIO': 698, 'PURE FLIX ENTERTAINMENT': 699, 'PURE FLIX': 699, 'PURE ENTERTAINMENT': 699, 'PYRAMIDE VIDEO': 700, 'PYRAMIDE': 700, 'QUALITY FILMS': 701, 'QUALITY': 701, 'QUARTO VALLEY RECORDS': 702, 'QUARTO VALLEY': 702, 'QUESTAR': 703, 'R SQUARED FILMS': 704, 'R SQUARED': 704, 'RAPID EYE MOVIES': 705, 'RAPID EYE': 705, 'RARO VIDEO': 706, 'RARO': 706, 'RAROVIDEO U.S.': 707, 'RAROVIDEO US': 707, 'RARO VIDEO US': 707, 'RARO VIDEO U.S.': 707, 'RARO U.S.': 707, 'RARO US': 707, 'RAVEN BANNER RELEASING': 708, 'RAVEN BANNER': 708, 'RAVEN': 708, 'RAZOR DIGITAL ENTERTAINMENT': 709, 'RAZOR DIGITAL': 709, 'RCA': 710, 'RCO LIVE': 711, 'RCO': 711, 'RCV': 712, 'REAL GONE MUSIC': 713, 'REAL GONE': 713, 'REANIMEDIA': 714, 'REANI MEDIA': 714, 'REDEMPTION': 715, 'REEL': 716, 'RELIANCE HOME VIDEO & GAMES': 717, 'RELIANCE HOME VIDEO AND GAMES': 717, 'RELIANCE HOME VIDEO': 717, 'RELIANCE VIDEO': 717, 'RELIANCE HOME': 717, 'RELIANCE': 717, 'REM CULTURE': 718, 'REMAIN IN LIGHT': 719, 'REPRISE': 720, 'RESEN': 721, 'RETROMEDIA': 722, 'REVELATION FILMS LTD.': 723, 'REVELATION FILMS LTD': 723, 'REVELATION FILMS': 723, 'REVELATION LTD.': 723, 'REVELATION LTD': 723, 'REVELATION': 723, 'REVOLVER ENTERTAINMENT': 724, 'REVOLVER': 724, 'RHINO MUSIC': 725, 'RHINO': 725, 'RHV': 726, 'RIGHT STUF': 727, 'RIMINI EDITIONS': 728, 'RISING SUN MEDIA': 729, 'RLJ ENTERTAINMENT': 730, 'RLJ': 730, 'ROADRUNNER RECORDS': 731, 'ROADSHOW ENTERTAINMENT': 732, 'ROADSHOW': 732, 'RONE': 733, 'RONIN FLIX': 734, 'ROTANA HOME ENTERTAINMENT': 735, 'ROTANA ENTERTAINMENT': 735, 'ROTANA HOME': 735, 'ROTANA': 735, 'ROUGH TRADE': 736, 'ROUNDER': 737, 'SAFFRON HILL FILMS': 738, 'SAFFRON HILL': 738, 'SAFFRON': 738, 'SAMUEL GOLDWYN FILMS': 739, 'SAMUEL GOLDWYN': 739, 'SAN FRANCISCO SYMPHONY': 740, 'SANDREW METRONOME': 741, 'SAPHRANE': 742, 'SAVOR': 743, 'SCANBOX ENTERTAINMENT': 744, 'SCANBOX': 744, 'SCENIC LABS': 745, 'SCHRÖDERMEDIA': 746, 'SCHRODERMEDIA': 746, 'SCHRODER MEDIA': 746, 'SCORPION RELEASING': 747, 'SCORPION': 747, 'SCREAM TEAM RELEASING': 748, 'SCREAM TEAM': 748, 'SCREEN MEDIA': 749, 'SCREEN': 749, 'SCREENBOUND PICTURES': 750, 'SCREENBOUND': 750, 'SCREENWAVE MEDIA': 751, 'SCREENWAVE': 751, 'SECOND RUN': 752, 'SECOND SIGHT': 753, 'SEEDSMAN GROUP': 754, 'SELECT VIDEO': 755, 'SELECTA VISION': 756, 'SENATOR': 757, 'SENTAI FILMWORKS': 758, 'SENTAI': 758, 'SEVEN7': 759, 'SEVERIN FILMS': 760, 'SEVERIN': 760, 'SEVILLE': 761, 'SEYONS ENTERTAINMENT': 762, 'SEYONS': 762, 'SF STUDIOS': 763, 'SGL ENTERTAINMENT': 764, 'SGL': 764, 'SHAMELESS': 765, 'SHAMROCK MEDIA': 766, 'SHAMROCK': 766, 'SHANGHAI EPIC MUSIC ENTERTAINMENT': 767, 'SHANGHAI EPIC ENTERTAINMENT': 767, 'SHANGHAI EPIC MUSIC': 767, 'SHANGHAI MUSIC ENTERTAINMENT': 767, 'SHANGHAI ENTERTAINMENT': 767, 'SHANGHAI MUSIC': 767, 'SHANGHAI': 767, 'SHEMAROO': 768, 'SHOCHIKU': 769, 'SHOCK': 770, 'SHOGAKU KAN': 771, 'SHOUT FACTORY': 772, 'SHOUT! FACTORY': 772, 'SHOUT': 772, 'SHOUT!': 772, 'SHOWBOX': 773, 'SHOWTIME ENTERTAINMENT': 774, 'SHOWTIME': 774, 'SHRIEK SHOW': 775, 'SHUDDER': 776, 'SIDONIS': 777, 'SIDONIS CALYSTA': 778, 'SIGNAL ONE ENTERTAINMENT': 779, 'SIGNAL ONE': 779, 'SIGNATURE ENTERTAINMENT': 780, 'SIGNATURE': 780, 'SILVER VISION': 781, 'SINISTER FILM': 782, 'SINISTER': 782, 'SIREN VISUAL ENTERTAINMENT': 783, 'SIREN VISUAL': 783, 'SIREN ENTERTAINMENT': 783, 'SIREN': 783, 'SKANI': 784, 'SKY DIGI': 785, 'SLASHER // VIDEO': 786, 'SLASHER / VIDEO': 786, 'SLASHER VIDEO': 786, 'SLASHER': 786, 'SLOVAK FILM INSTITUTE': 787, 'SLOVAK FILM': 787, + 'SFI': 787, 'SM LIFE DESIGN GROUP': 788, 'SMOOTH PICTURES': 789, 'SMOOTH': 789, 'SNAPPER MUSIC': 790, 'SNAPPER': 790, 'SODA PICTURES': 791, 'SODA': 791, 'SONO LUMINUS': 792, 'SONY MUSIC': 793, 'SONY PICTURES': 794, 'SONY': 794, 'SONY PICTURES CLASSICS': 795, 'SONY CLASSICS': 795, 'SOUL MEDIA': 796, 'SOUL': 796, 'SOULFOOD MUSIC DISTRIBUTION': 797, 'SOULFOOD DISTRIBUTION': 797, 'SOULFOOD MUSIC': 797, 'SOULFOOD': 797, 'SOYUZ': 798, 'SPECTRUM': 799, + 'SPENTZOS FILM': 800, 'SPENTZOS': 800, 'SPIRIT ENTERTAINMENT': 801, 'SPIRIT': 801, 'SPIRIT MEDIA GMBH': 802, 'SPIRIT MEDIA': 802, 'SPLENDID ENTERTAINMENT': 803, 'SPLENDID FILM': 804, 'SPO': 805, 'SQUARE ENIX': 806, 'SRI BALAJI VIDEO': 807, 'SRI BALAJI': 807, 'SRI': 807, 'SRI VIDEO': 807, 'SRS CINEMA': 808, 'SRS': 808, 'SSO RECORDINGS': 809, 'SSO': 809, 'ST2 MUSIC': 810, 'ST2': 810, 'STAR MEDIA ENTERTAINMENT': 811, 'STAR ENTERTAINMENT': 811, 'STAR MEDIA': 811, 'STAR': 811, 'STARLIGHT': 812, 'STARZ / ANCHOR BAY': 813, 'STARZ ANCHOR BAY': 813, 'STARZ': 813, 'ANCHOR BAY': 813, 'STER KINEKOR': 814, 'STERLING ENTERTAINMENT': 815, 'STERLING': 815, 'STINGRAY': 816, 'STOCKFISCH RECORDS': 817, 'STOCKFISCH': 817, 'STRAND RELEASING': 818, 'STRAND': 818, 'STUDIO 4K': 819, 'STUDIO CANAL': 820, 'STUDIO GHIBLI': 821, 'GHIBLI': 821, 'STUDIO HAMBURG ENTERPRISES': 822, 'HAMBURG ENTERPRISES': 822, 'STUDIO HAMBURG': 822, 'HAMBURG': 822, 'STUDIO S': 823, 'SUBKULTUR ENTERTAINMENT': 824, 'SUBKULTUR': 824, 'SUEVIA FILMS': 825, 'SUEVIA': 825, 'SUMMIT ENTERTAINMENT': 826, 'SUMMIT': 826, 'SUNFILM ENTERTAINMENT': 827, 'SUNFILM': 827, 'SURROUND RECORDS': 828, 'SURROUND': 828, 'SVENSK FILMINDUSTRI': 829, 'SVENSK': 829, 'SWEN FILMES': 830, 'SWEN FILMS': 830, 'SWEN': 830, 'SYNAPSE FILMS': 831, 'SYNAPSE': 831, 'SYNDICADO': 832, 'SYNERGETIC': 833, 'T- SERIES': 834, 'T-SERIES': 834, 'T SERIES': 834, 'TSERIES': 834, 'T.V.P.': 835, 'TVP': 835, 'TACET RECORDS': 836, 'TACET': 836, 'TAI SENG': 837, 'TAI SHENG': 838, 'TAKEONE': 839, 'TAKESHOBO': 840, 'TAMASA DIFFUSION': 841, 'TC ENTERTAINMENT': 842, 'TC': 842, 'TDK': 843, 'TEAM MARKETING': 844, 'TEATRO REAL': 845, 'TEMA DISTRIBUCIONES': 846, 'TEMPE DIGITAL': 847, 'TF1 VIDÉO': 848, 'TF1 VIDEO': 848, 'TF1': 848, 'THE BLU': 849, 'BLU': 849, 'THE ECSTASY OF FILMS': 850, 'THE FILM DETECTIVE': 851, 'FILM DETECTIVE': 851, 'THE JOKERS': 852, 'JOKERS': 852, 'THE ON': 853, 'ON': 853, 'THIMFILM': 854, 'THIM FILM': 854, 'THIM': 854, 'THIRD WINDOW FILMS': 855, 'THIRD WINDOW': 855, '3RD WINDOW FILMS': 855, '3RD WINDOW': 855, 'THUNDERBEAN ANIMATION': 856, 'THUNDERBEAN': 856, 'THUNDERBIRD RELEASING': 857, 'THUNDERBIRD': 857, 'TIBERIUS FILM': 858, 'TIME LIFE': 859, 'TIMELESS MEDIA GROUP': 860, 'TIMELESS MEDIA': 860, 'TIMELESS GROUP': 860, 'TIMELESS': 860, 'TLA RELEASING': 861, 'TLA': 861, 'TOBIS FILM': 862, 'TOBIS': 862, 'TOEI': 863, 'TOHO': 864, 'TOKYO SHOCK': 865, 'TOKYO': 865, 'TONPOOL MEDIEN GMBH': 866, 'TONPOOL MEDIEN': 866, 'TOPICS ENTERTAINMENT': 867, 'TOPICS': 867, 'TOUCHSTONE PICTURES': 868, 'TOUCHSTONE': 868, 'TRANSMISSION FILMS': 869, 'TRANSMISSION': 869, 'TRAVEL VIDEO STORE': 870, 'TRIART': 871, 'TRIGON FILM': 872, 'TRIGON': 872, 'TRINITY HOME ENTERTAINMENT': 873, 'TRINITY ENTERTAINMENT': 873, 'TRINITY HOME': 873, 'TRINITY': 873, 'TRIPICTURES': 874, 'TRI-PICTURES': 874, 'TRI PICTURES': 874, 'TROMA': 875, 'TURBINE MEDIEN': 876, 'TURTLE RECORDS': 877, 'TURTLE': 877, 'TVA FILMS': 878, 'TVA': 878, 'TWILIGHT TIME': 879, 'TWILIGHT': 879, 'TT': 879, 'TWIN CO., LTD.': 880, 'TWIN CO, LTD.': 880, 'TWIN CO., LTD': 880, 'TWIN CO, LTD': 880, 'TWIN CO LTD': 880, 'TWIN LTD': 880, 'TWIN CO.': 880, 'TWIN CO': 880, 'TWIN': 880, 'UCA': 881, 'UDR': 882, 'UEK': 883, 'UFA/DVD': 884, 'UFA DVD': 884, 'UFADVD': 884, 'UGC PH': 885, 'ULTIMATE3DHEAVEN': 886, 'ULTRA': 887, 'UMBRELLA ENTERTAINMENT': 888, 'UMBRELLA': 888, 'UMC': 889, "UNCORK'D ENTERTAINMENT": 890, 'UNCORKD ENTERTAINMENT': 890, 'UNCORK D ENTERTAINMENT': 890, "UNCORK'D": 890, 'UNCORK D': 890, 'UNCORKD': 890, 'UNEARTHED FILMS': 891, 'UNEARTHED': 891, 'UNI DISC': 892, 'UNIMUNDOS': 893, 'UNITEL': 894, 'UNIVERSAL MUSIC': 895, 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES HOME': 896, 'UNIVERSAL SONY PICTURES': 896, 'UNIVERSAL HOME ENTERTAINMENT': + 896, 'UNIVERSAL ENTERTAINMENT': 896, 'UNIVERSAL HOME': 896, 'UNIVERSAL STUDIOS': 897, 'UNIVERSAL': 897, 'UNIVERSE LASER & VIDEO CO.': 898, 'UNIVERSE LASER AND VIDEO CO.': 898, 'UNIVERSE LASER & VIDEO CO': 898, 'UNIVERSE LASER AND VIDEO CO': 898, 'UNIVERSE LASER CO.': 898, 'UNIVERSE LASER CO': 898, 'UNIVERSE LASER': 898, 'UNIVERSUM FILM': 899, 'UNIVERSUM': 899, 'UTV': 900, 'VAP': 901, 'VCI': 902, 'VENDETTA FILMS': 903, 'VENDETTA': 903, 'VERSÁTIL HOME VIDEO': 904, 'VERSÁTIL VIDEO': 904, 'VERSÁTIL HOME': 904, 'VERSÁTIL': 904, 'VERSATIL HOME VIDEO': 904, 'VERSATIL VIDEO': 904, 'VERSATIL HOME': 904, 'VERSATIL': 904, 'VERTICAL ENTERTAINMENT': 905, 'VERTICAL': 905, 'VÉRTICE 360º': 906, 'VÉRTICE 360': 906, 'VERTICE 360o': 906, 'VERTICE 360': 906, 'VERTIGO BERLIN': 907, 'VÉRTIGO FILMS': 908, 'VÉRTIGO': 908, 'VERTIGO FILMS': 908, 'VERTIGO': 908, 'VERVE PICTURES': 909, 'VIA VISION ENTERTAINMENT': 910, 'VIA VISION': 910, 'VICOL ENTERTAINMENT': 911, 'VICOL': 911, 'VICOM': 912, 'VICTOR ENTERTAINMENT': 913, 'VICTOR': 913, 'VIDEA CDE': 914, 'VIDEO FILM EXPRESS': 915, 'VIDEO FILM': 915, 'VIDEO EXPRESS': 915, 'VIDEO MUSIC, INC.': 916, 'VIDEO MUSIC, INC': 916, 'VIDEO MUSIC INC.': 916, 'VIDEO MUSIC INC': 916, 'VIDEO MUSIC': 916, 'VIDEO SERVICE CORP.': 917, 'VIDEO SERVICE CORP': 917, 'VIDEO SERVICE': 917, 'VIDEO TRAVEL': 918, 'VIDEOMAX': 919, 'VIDEO MAX': 919, 'VII PILLARS ENTERTAINMENT': 920, 'VII PILLARS': 920, 'VILLAGE FILMS': 921, 'VINEGAR SYNDROME': 922, 'VINEGAR': 922, 'VS': 922, 'VINNY MOVIES': 923, 'VINNY': 923, 'VIRGIL FILMS & ENTERTAINMENT': 924, 'VIRGIL FILMS AND ENTERTAINMENT': 924, 'VIRGIL ENTERTAINMENT': 924, 'VIRGIL FILMS': 924, 'VIRGIL': 924, 'VIRGIN RECORDS': 925, 'VIRGIN': 925, 'VISION FILMS': 926, 'VISION': 926, 'VISUAL ENTERTAINMENT GROUP': 927, 'VISUAL GROUP': 927, 'VISUAL ENTERTAINMENT': 927, 'VISUAL': 927, 'VIVENDI VISUAL ENTERTAINMENT': 928, 'VIVENDI VISUAL': 928, 'VIVENDI': 928, 'VIZ PICTURES': 929, 'VIZ': 929, 'VLMEDIA': 930, 'VL MEDIA': 930, 'VL': 930, 'VOLGA': 931, 'VVS FILMS': 932, + 'VVS': 932, 'VZ HANDELS GMBH': 933, 'VZ HANDELS': 933, 'WARD RECORDS': 934, 'WARD': 934, 'WARNER BROS.': 935, 'WARNER BROS': 935, 'WARNER ARCHIVE': 935, 'WARNER ARCHIVE COLLECTION': 935, 'WAC': 935, 'WARNER': 935, 'WARNER MUSIC': 936, 'WEA': 937, 'WEINSTEIN COMPANY': 938, 'WEINSTEIN': 938, 'WELL GO USA': 939, 'WELL GO': 939, 'WELTKINO FILMVERLEIH': 940, 'WEST VIDEO': 941, 'WEST': 941, 'WHITE PEARL MOVIES': 942, 'WHITE PEARL': 942, 'WICKED-VISION MEDIA': 943, 'WICKED VISION MEDIA': 943, 'WICKEDVISION MEDIA': 943, 'WICKED-VISION': 943, 'WICKED VISION': 943, 'WICKEDVISION': 943, 'WIENERWORLD': 944, 'WILD BUNCH': 945, 'WILD EYE RELEASING': 946, 'WILD EYE': 946, 'WILD SIDE VIDEO': 947, 'WILD SIDE': 947, 'WME': 948, 'WOLFE VIDEO': 949, 'WOLFE': 949, 'WORD ON FIRE': 950, 'WORKS FILM GROUP': 951, 'WORLD WRESTLING': 952, 'WVG MEDIEN': 953, 'WWE STUDIOS': 954, 'WWE': 954, 'X RATED KULT': 955, 'X-RATED KULT': 955, 'X RATED CULT': 955, 'X-RATED CULT': 955, 'X RATED': 955, 'X-RATED': 955, 'XCESS': 956, 'XLRATOR': 957, 'XT VIDEO': 958, 'XT': 958, 'YAMATO VIDEO': 959, 'YAMATO': 959, 'YASH RAJ FILMS': 960, 'YASH RAJS': 960, 'ZEITGEIST FILMS': 961, 'ZEITGEIST': 961, 'ZENITH PICTURES': 962, 'ZENITH': 962, 'ZIMA': 963, 'ZYLO': 964, 'ZYX MUSIC': 965, 'ZYX': 965 }.get(distributor, 0) return distributor_id - async def unit3d_torrent_info(self, tracker, torrent_url, id): - tmdb = imdb = tvdb = description = category = infohash = mal = None + async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): + tmdb = imdb = tvdb = description = category = infohash = mal = files = None imagelist = [] - params = {'api_token' : self.config['TRACKERS'][tracker].get('api_key', '')} - url = f"{torrent_url}{id}" + + # Build the params for the API request + params = {'api_token': self.config['TRACKERS'][tracker].get('api_key', '')} + + # Determine the URL based on whether we're searching by ID or file name + if id: + url = f"{torrent_url}{id}" + console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow]") + elif file_name: + url = f"{search_url}?file_name={file_name}" + console.print(f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]") + else: + console.print("[red]No ID or file name provided for search.[/red]") + return None, None, None, None, None, None, None, None, None + response = requests.get(url=url, params=params) + try: + # console.print(f"[green]Raw response from {tracker}: {response.text}[/green]") response = response.json() - attributes = response['attributes'] - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') - - bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) - console.print(f"[green]Successfully grabbed description from {tracker}") - except Exception: - console.print(traceback.print_exc()) - console.print(f"[yellow]Invalid Response from {tracker} API.") - + data = response.get('data', []) + if data: + attributes = data[0].get('attributes', {}) - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + + # Process the description and imagelist if the description exists + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"[blue]Extracted description: [yellow]{description}") + + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit or discard the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}") + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print(f"[green]Keeping the original description.[/green]") + else: + console.print(f"[yellow]No description found for {tracker}.[/yellow]") + else: + console.print(f"[yellow]No data found in the response for {tracker}.[/yellow]") + + except Exception as e: + console.print_exception() + console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") + + if description: # Ensure description is only printed if it's not None + console.print(f"[green]Final description to be returned:[/green] {description}") + + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name async def parseCookieFile(self, cookiefile): """Parse a cookies.txt file and return a dictionary of key value pairs compatible with requests.""" cookies = {} - with open (cookiefile, 'r') as fp: + with open(cookiefile, 'r') as fp: for line in fp: if not line.startswith(("# ", "\n", "#\n")): lineFields = re.split(' |\t', line.strip()) @@ -185,45 +226,43 @@ async def parseCookieFile(self, cookiefile): cookies[lineFields[5]] = lineFields[6] return cookies - - async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): ptgen = "" url = 'https://ptgen.zhenzhen.workers.dev' if ptgen_site != '': url = ptgen_site params = {} - data={} - #get douban url + data = {} + # get douban url if int(meta.get('imdb_id', '0')) != 0: data['search'] = f"tt{meta['imdb_id']}" ptgen = requests.get(url, params=data) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break except requests.exceptions.JSONDecodeError: continue try: - params['url'] = ptgen.json()['data'][0]['link'] + params['url'] = ptgen.json()['data'][0]['link'] except Exception: console.print("[red]Unable to get data from ptgen using IMDb") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") else: console.print("[red]No IMDb id was found.") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break ptgen = ptgen.json() meta['ptgen'] = ptgen - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() ptgen = ptgen['format'] @@ -237,32 +276,6 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return "" return ptgen - - - # async def ptgen(self, meta): - # ptgen = "" - # url = "https://api.iyuu.cn/App.Movie.Ptgen" - # params = {} - # if int(meta.get('imdb_id', '0')) != 0: - # params['url'] = f"tt{meta['imdb_id']}" - # else: - # console.print("[red]No IMDb id was found.") - # params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") - # try: - # ptgen = requests.get(url, params=params) - # ptgen = ptgen.json() - # ptgen = ptgen['data']['format'] - # if "[/img]" in ptgen: - # ptgen = ptgen.split("[/img]")[1] - # ptgen = f"[img]{meta.get('imdb_info', {}).get('cover', meta.get('cover', ''))}[/img]{ptgen}" - # except: - # console.print_exception() - # console.print("[bold red]There was an error getting the ptgen") - # console.print(ptgen) - # return ptgen - - - async def filter_dupes(self, dupes, meta): if meta['debug']: console.log("[cyan]Pre-filtered dupes") @@ -275,35 +288,35 @@ async def filter_dupes(self, dupes, meta): remove_set = set({meta['resolution']}) search_combos = [ { - 'search' : meta['hdr'], - 'search_for' : {'HDR', 'PQ10'}, - 'update' : {'HDR|PQ10'} + 'search': meta['hdr'], + 'search_for': {'HDR', 'PQ10'}, + 'update': {'HDR|PQ10'} }, { - 'search' : meta['hdr'], - 'search_for' : {'DV'}, - 'update' : {'DV|DoVi'} + 'search': meta['hdr'], + 'search_for': {'DV'}, + 'update': {'DV|DoVi'} }, { - 'search' : meta['hdr'], - 'search_not' : {'DV', 'DoVi', 'HDR', 'PQ10'}, - 'update' : {'!(DV)|(DoVi)|(HDR)|(PQ10)'} + 'search': meta['hdr'], + 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, + 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'} }, { - 'search' : str(meta.get('tv_pack', 0)), - 'search_for' : '1', - 'update' : {f"{meta['season']}(?!E\d+)"} + 'search': str(meta.get('tv_pack', 0)), + 'search_for': '1', + 'update': {rf"{meta['season']}(?!E\d+)"} }, { - 'search' : meta['episode'], - 'search_for' : meta['episode'], - 'update' : {meta['season'], meta['episode']} + 'search': meta['episode'], + 'search_for': meta['episode'], + 'update': {meta['season'], meta['episode']} } ] search_matches = [ { - 'if' : {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, - 'in' : meta['type'] + 'if': {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, + 'in': meta['type'] } ] for s in search_combos: diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 5813f469a..d2cd9ae31 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -2,13 +2,11 @@ import asyncio import re import os -from pathlib import Path from str2bool import str2bool -import json import glob import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse import cli_ui from bs4 import BeautifulSoup @@ -16,6 +14,7 @@ from src.exceptions import * from src.console import console + class FL(): def __init__(self, config): @@ -28,7 +27,6 @@ def __init__(self, config): self.uploader_name = config['TRACKERS'][self.tracker].get('uploader_name') self.signature = None self.banned_groups = [""] - async def get_category_id(self, meta): has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) @@ -51,7 +49,7 @@ async def get_category_id(self, meta): if has_ro_sub and meta.get('sd', 0) == 0 and meta['resolution'] != '2160p': # 19 = Movie + RO cat_id = 19 - + if meta['category'] == 'TV': # 21 = TV HD cat_id = 21 @@ -61,7 +59,7 @@ async def get_category_id(self, meta): elif meta.get('sd', 0) == 1: # 23 = TV SD cat_id = 23 - + if meta['is_disc'] == "DVD": # 2 = DVD cat_id = 2 @@ -102,7 +100,7 @@ async def edit_name(self, meta): fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name - + ############################################################### ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### ############################################################### @@ -114,7 +112,7 @@ async def upload(self, meta): fl_name = await self.edit_name(meta) cat_id = await self.get_category_id(meta) has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) - + # Confirm the correct naming order for FL cli_ui.info(f"Filelist name: {fl_name}") if meta.get('unattended', False) == False: @@ -184,7 +182,7 @@ async def upload(self, meta): session.cookies.update(pickle.load(cf)) up = session.post(url=url, data=data, files=files) torrentFile.close() - + # Match url to verify successful upload match = re.match(r".*?filelist\.io/details\.php\?id=(\d+)&uploaded=(\d+)", up.url) if match: @@ -197,7 +195,6 @@ async def upload(self, meta): raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') return - async def search_existing(self, meta): dupes = [] with requests.Session() as session: @@ -208,15 +205,15 @@ async def search_existing(self, meta): search_url = f"https://filelist.io/browse.php" if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'search' : meta['imdb_id'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '3' + 'search': meta['imdb_id'], + 'cat': await self.get_category_id(meta), + 'searchin': '3' } else: params = { - 'search' : meta['title'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '0' + 'search': meta['title'], + 'cat': await self.get_category_id(meta), + 'searchin': '0' } r = session.get(search_url, params=params) @@ -229,9 +226,6 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") if not os.path.exists(cookiefile): @@ -249,8 +243,7 @@ async def validate_credentials(self, meta): else: return False return True - - + async def validate_cookies(self, meta, cookiefile): url = "https://filelist.io/index.php" if os.path.exists(cookiefile): @@ -268,7 +261,7 @@ async def validate_cookies(self, meta, cookiefile): return False else: return False - + async def login(self, cookiefile): with requests.Session() as session: r = session.get("https://filelist.io/login.php") @@ -276,10 +269,10 @@ async def login(self, cookiefile): soup = BeautifulSoup(r.text, 'html.parser') validator = soup.find('input', {'name' : 'validator'}).get('value') data = { - 'validator' : validator, - 'username' : self.username, - 'password' : self.password, - 'unlock' : '1', + 'validator': validator, + 'username': self.username, + 'password': self.password, + 'unlock': '1', } response = session.post('https://filelist.io/takelogin.php', data=data) await asyncio.sleep(0.5) @@ -306,14 +299,12 @@ async def download_new_torrent(self, session, id, torrent_path): console.print(r.text) return - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: from src.bbcode import BBCODE bbcode = BBCODE() - + desc = base desc = bbcode.remove_spoiler(desc) desc = bbcode.convert_code_to_quote(desc) @@ -353,7 +344,6 @@ async def edit_desc(self, meta): if self.signature != None: descfile.write(self.signature) descfile.close() - async def get_ro_tracks(self, meta): has_ro_audio = has_ro_sub = False diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index f1682b71f..4a3ef86ff 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -3,13 +3,11 @@ import re import os from pathlib import Path -import traceback import json import glob from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON -from src.bbcode import BBCODE from src.exceptions import * from src.console import console from datetime import datetime, date @@ -26,7 +24,6 @@ def __init__(self, config): self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', False) self.signature = None self.banned_groups = [""] - async def get_type_category_id(self, meta): cat_id = "EXIT" @@ -48,12 +45,12 @@ async def get_type_category_id(self, meta): async def get_type_codec_id(self, meta): codecmap = { - "AVC" : 1, "H.264" : 1, - "HEVC" : 5, "H.265" : 5, - "MPEG-2" : 2, - "VC-1" : 3, - "XviD" : 4, - "VP9" : 6 + "AVC": 1, "H.264": 1, + "HEVC": 5, "H.265": 5, + "MPEG-2": 2, + "VC-1": 3, + "XviD": 4, + "VP9": 6 } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec_id = codecmap.get(searchcodec, "EXIT") @@ -67,8 +64,8 @@ async def get_type_medium_id(self, meta): # 4 = Capture if meta.get('type', '') == "HDTV": medium_id = 4 - if meta.get('has_encode_settings', False) == True: - medium_id = 3 + if meta.get('has_encode_settings', False) is True: + medium_id = 3 # 3 = Encode if meta.get('type', '') in ("ENCODE", "WEBRIP"): medium_id = 3 @@ -82,16 +79,16 @@ async def get_type_medium_id(self, meta): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' }.get(resolution, '10') return resolution_id @@ -101,27 +98,27 @@ async def get_tags(self, meta): # Web Services: service_dict = { - "AMZN" : 28, - "NF" : 29, - "HULU" : 34, - "DSNP" : 33, - "HMAX" : 30, - "ATVP" : 27, - "iT" : 38, - "iP" : 56, - "STAN" : 32, - "PCOK" : 31, - "CR" : 72, - "PMTP" : 69, - "MA" : 77, - "SHO" : 76, - "BCORE" : 66, "CORE" : 66, - "CRKL" : 73, - "FUNI" : 74, - "HLMK" : 71, - "HTSR" : 79, - "CRAV" : 80, - 'MAX' : 88 + "AMZN": 28, + "NF": 29, + "HULU": 34, + "DSNP": 33, + "HMAX": 30, + "ATVP": 27, + "iT": 38, + "iP": 56, + "STAN": 32, + "PCOK": 31, + "CR": 72, + "PMTP": 69, + "MA": 77, + "SHO": 76, + "BCORE": 66, "CORE": 66, + "CRKL": 73, + "FUNI": 74, + "HLMK": 71, + "HTSR": 79, + "CRAV": 80, + 'MAX': 88 } if meta.get('service') in service_dict.keys(): tags.append(service_dict.get(meta['service'])) @@ -129,19 +126,18 @@ async def get_tags(self, meta): # Collections # Masters of Cinema, The Criterion Collection, Warner Archive Collection distributor_dict = { - "WARNER ARCHIVE" : 68, "WARNER ARCHIVE COLLECTION" : 68, "WAC" : 68, - "CRITERION" : 18, "CRITERION COLLECTION" : 18, "CC" : 18, - "MASTERS OF CINEMA" : 19, "MOC" : 19, - "KINO LORBER" : 55, "KINO" : 55, - "BFI VIDEO" : 63, "BFI" : 63, "BRITISH FILM INSTITUTE" : 63, - "STUDIO CANAL" : 65, - "ARROW" : 64 + "WARNER ARCHIVE": 68, "WARNER ARCHIVE COLLECTION": 68, "WAC": 68, + "CRITERION": 18, "CRITERION COLLECTION": 18, "CC": 18, + "MASTERS OF CINEMA": 19, "MOC": 19, + "KINO LORBER": 55, "KINO": 55, + "BFI VIDEO": 63, "BFI": 63, "BRITISH FILM INSTITUTE": 63, + "STUDIO CANAL": 65, + "ARROW": 64 } if meta.get('distributor') in distributor_dict.keys(): tags.append(distributor_dict.get(meta['distributor'])) - - # 4K Remaster, + # 4K Remaster, if "IMAX" in meta.get('edition', ''): tags.append(14) if "OPEN MATTE" in meta.get('edition', '').upper(): @@ -153,20 +149,20 @@ async def get_tags(self, meta): tags.append(7) if "Atmos" in meta['audio']: tags.append(5) - if meta.get('silent', False) == True: - console.print('[yellow]zxx audio track found, suggesting you tag as silent') #57 + if meta.get('silent', False) is True: + console.print('[yellow]zxx audio track found, suggesting you tag as silent') # 57 # Video Metadata - # HDR10, HDR10+, Dolby Vision, 10-bit, + # HDR10, HDR10+, Dolby Vision, 10-bit, if "HDR" in meta.get('hdr', ''): if "HDR10+" in meta['hdr']: - tags.append(25) #HDR10+ + tags.append(25) # HDR10+ else: - tags.append(9) #HDR10 + tags.append(9) # HDR10 if "DV" in meta.get('hdr', ''): - tags.append(6) #DV + tags.append(6) # DV if "HLG" in meta.get('hdr', ''): - tags.append(10) #HLG + tags.append(10) # HLG return tags @@ -197,7 +193,7 @@ async def edit_name(self, meta): hdb_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') - return hdb_name + return hdb_name async def upload(self, meta): common = COMMON(config=self.config) @@ -249,7 +245,7 @@ async def upload(self, meta): comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) - + # Explicitly set the piece size and update metainfo new_torrent.piece_size = 16777216 # 16 MiB in bytes new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set @@ -266,7 +262,7 @@ async def upload(self, meta): else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorrent") } data = { 'name': hdb_name, @@ -280,7 +276,7 @@ async def upload(self, meta): } # If internal, set 1 - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 # If not BDMV fill mediainfo @@ -325,54 +321,54 @@ async def search_existing(self, meta): console.print("[yellow]Searching for existing torrents on site...") url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'category' : await self.get_type_category_id(meta), - 'codec' : await self.get_type_codec_id(meta), - 'medium' : await self.get_type_medium_id(meta), - 'search' : meta['resolution'] + 'username': self.username, + 'passkey': self.passkey, + 'category': await self.get_type_category_id(meta), + 'codec': await self.get_type_codec_id(meta), + 'medium': await self.get_type_medium_id(meta), + 'search': meta['resolution'] } if int(meta.get('imdb_id', '0').replace('tt', '0')) != 0: - data['imdb'] = {'id' : meta['imdb_id']} + data['imdb'] = {'id': meta['imdb_id']} if int(meta.get('tvdb_id', '0')) != 0: - data['tvdb'] = {'id' : meta['tvdb_id']} + data['tvdb'] = {'id': meta['tvdb_id']} try: response = requests.get(url=url, data=json.dumps(data)) response = response.json() for each in response['data']: result = each['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect') await asyncio.sleep(5) return dupes async def validate_credentials(self, meta): - vapi = await self.validate_api() + vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your passkey is valid.') return False - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True - + async def validate_api(self): url = "https://hdbits.org/api/test" data = { - 'username' : self.username, - 'passkey' : self.passkey + 'username': self.username, + 'passkey': self.passkey } try: r = requests.post(url, data=json.dumps(data)).json() if r.get('status', 5) == 0: return True return False - except: + except Exception: return False - + async def validate_cookies(self, meta): common = COMMON(config=self.config) url = "https://hdbits.org" @@ -398,9 +394,9 @@ async def download_new_torrent(self, id, torrent_path): # Get HDB .torrent filename api_url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'id' : id + 'username': self.username, + 'passkey': self.passkey, + 'id': id } r = requests.get(url=api_url, data=json.dumps(data)) filename = r.json()['data'][0]['filename'] @@ -408,8 +404,8 @@ async def download_new_torrent(self, id, torrent_path): # Download new .torrent download_url = f"https://hdbits.org/download.php/{quote(filename)}" params = { - 'passkey' : self.passkey, - 'id' : id + 'passkey': self.passkey, + 'id': id } r = requests.get(url=download_url, params=params) @@ -422,7 +418,7 @@ async def edit_desc(self, meta): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][quote]This release is sourced from {meta['service_longname']}[/quote][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -449,40 +445,39 @@ async def edit_desc(self, meta): desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - if self.rehost_images == True: + if self.rehost_images is True: console.print("[green]Rehosting Images...") hdbimg_bbcode = await self.hdbimg_upload(meta) descfile.write(f"{hdbimg_bbcode}") else: images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): img_url = images[each]['img_url'] web_url = images[each]['web_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - async def hdbimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") url = "https://img.hdbits.org/upload_api.php" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'galleryoption' : 1, - 'galleryname' : meta['name'], - 'thumbsize' : 'w300' + 'username': self.username, + 'passkey': self.passkey, + 'galleryoption': 1, + 'galleryname': meta['name'], + 'thumbsize': 'w300' } files = {} # Set maximum screenshots to 3 for tv singles and 6 for everthing else - hdbimg_screen_count = 3 if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 else 6 + hdbimg_screen_count = 3 if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 else 6 if len(images) < hdbimg_screen_count: - hdbimg_screen_count = len(images) + hdbimg_screen_count = len(images) for i in range(hdbimg_screen_count): files[f'images_files[{i}]'] = open(images[i], 'rb') r = requests.post(url=url, data=data, files=files) @@ -493,53 +488,74 @@ async def get_info_from_torrent_id(self, hdb_id): hdb_imdb = hdb_name = hdb_torrenthash = None url = "https://hdbits.org/api/torrents" data = { - "username" : self.username, - "passkey" : self.passkey, - "id" : hdb_id + "username": self.username, + "passkey": self.passkey, + "id": hdb_id } response = requests.get(url, json=data) if response.ok: try: response = response.json() if response['data'] != []: - hdb_imdb = response['data'][0].get('imdb', {'id' : None}).get('id') - hdb_tvdb = response['data'][0].get('tvdb', {'id' : None}).get('id') + hdb_imdb = response['data'][0].get('imdb', {'id': None}).get('id') + hdb_tvdb = response['data'][0].get('tvdb', {'id': None}).get('id') hdb_name = response['data'][0]['name'] hdb_torrenthash = response['data'][0]['hash'] - except: + except Exception: console.print_exception() else: console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash - async def search_filename(self, filelist): + async def search_filename(self, search_term, search_file_folder): hdb_imdb = hdb_tvdb = hdb_name = hdb_torrenthash = hdb_id = None url = "https://hdbits.org/api/torrents" - data = { - "username" : self.username, - "passkey" : self.passkey, - "limit" : 100, - "file_in_torrent" : os.path.basename(filelist[0]) - } + + if search_file_folder == 'folder': # Handling disc case + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "folder_in_torrent": os.path.basename(search_term) # Using folder name for search + } + console.print(f"[green]Searching HDB for folder: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + else: # Handling non-disc case + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "file_in_torrent": os.path.basename(search_term) # Using filename for search + } + console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + response = requests.get(url, json=data) - console.print(f"[green]Searching HDB for: [bold yellow]{os.path.basename(filelist[0])}[/bold yellow]") + if response.ok: try: - response = response.json() - if response['data'] != []: - for each in response['data']: - if each['numfiles'] == len(filelist): - hdb_imdb = each.get('imdb', {'id' : None}).get('id') - hdb_tvdb = each.get('tvdb', {'id' : None}).get('id') + response_json = response.json() + # console.print(f"[green]HDB API response: {response_json}[/green]") # Log the entire response for debugging + + # Check if 'data' key is present + if 'data' not in response_json: + console.print(f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + if response_json['data'] != []: + for each in response_json['data']: + if search_file_folder == 'folder' or each['numfiles'] == len(search_term): # Handle folder or filelist match + hdb_imdb = each.get('imdb', {'id': None}).get('id') + hdb_tvdb = each.get('tvdb', {'id': None}).get('id') hdb_name = each['name'] hdb_torrenthash = each['hash'] hdb_id = each['id'] - console.print(f'[bold green]Matched release with HDB ID: [yellow]{hdb_id}[/yellow][/bold green]') + console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id - except: + except Exception as e: console.print_exception() + console.print(f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]") else: - console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") - console.print(f'[yellow]Could not find a matching release on HDB') - return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id \ No newline at end of file + console.print(f"[red]Failed to get info from HDB. Status code: {response.status_code}, Reason: {response.reason}[/red]") + + console.print('[yellow]Could not find a matching release on HDB[/yellow]') + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 4fe525b09..bf25df687 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,29 +18,23 @@ class LST(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LST' self.source_flag = 'LST.GG' self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY'] pass - + async def get_cat_id(self, category_name, keywords, service): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', - 'Anime': '6', + 'Anime': '6', }.get(category_name, '0') if category_name == 'TV' and 'anime' in keywords: category_id = '6' @@ -51,10 +44,10 @@ async def get_cat_id(self, category_name, keywords, service): async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' }.get(type, '0') @@ -62,24 +55,20 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -89,55 +78,54 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() if meta.get('service') == "hentai": - desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + f"\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + f"\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc - + desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } - - + # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -149,35 +137,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -191,7 +175,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 56c071f11..33f9ee89f 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,11 +8,11 @@ import cli_ui import pickle import re -import traceback from pathlib import Path from str2bool import str2bool from src.trackers.COMMON import COMMON -from datetime import datetime, date +from datetime import datetime + class MTV(): """ @@ -43,7 +43,7 @@ async def upload(self, meta): # Initiate the upload with retry logic await self.upload_with_retry(meta, cookiefile, common) - + async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] @@ -102,16 +102,16 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) - + # Explicitly set the piece size and update metainfo new_torrent.piece_size = 8388608 # 8 MiB in bytes new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set - + # Validate and write the new torrent new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) - + torrent_filename = "MTV" await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) @@ -170,17 +170,17 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print(response.url) else: if "authkey.php" in response.url: - console.print(f"[red]No DL link in response, It may have uploaded, check manually.") + console.print("[red]No DL link in response, It may have uploaded, check manually.") else: - console.print(f"[red]Upload Failed. It doesn't look like you are logged in.") - except: - console.print(f"[red]It may have uploaded, check manually.") + console.print("[red]Upload Failed. It doesn't look like you are logged in.") + except Exception: + console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) return - + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None): if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox'] @@ -229,7 +229,7 @@ async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: # adding bd_dump to description if it exits and adding empty string to mediainfo - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -241,19 +241,19 @@ async def edit_desc(self, meta): desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") images = meta['image_list'] if len(images) > 0: - desc.write(f"[spoiler=Screenshots]") + desc.write("[spoiler=Screenshots]") for each in range(len(images)): raw_url = images[each]['raw_url'] img_url = images[each]['img_url'] desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") - desc.write(f"[/spoiler]") + desc.write("[/spoiler]") desc.write(f"\n\n{base}") desc.close() return async def edit_group_desc(self, meta): description = "" - if meta['imdb_id'] not in ("0", "", None): + if meta['imdb_id'] not in ("0", "", None): description += f"https://www.imdb.com/title/tt{meta['imdb_id']}" if meta['tmdb'] != 0: description += f"\nhttps://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}" @@ -289,15 +289,15 @@ async def edit_name(self, meta): mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name - + async def get_res_id(self, resolution): resolution_id = { - '8640p':'0', + '8640p': '0', '4320p': '4000', '2160p': '2160', - '1440p' : '1440', + '1440p': '1440', '1080p': '1080', - '1080i':'1080', + '1080i': '1080', '720p': '720', '576p': '0', '576i': '0', @@ -355,6 +355,7 @@ async def get_origin_id(self, meta): # returning P2P else: return '3' + async def get_tags(self, meta): tags = [] # Genres @@ -369,7 +370,7 @@ async def get_tags(self, meta): tags.append('hd') # Streaming Service if str(meta['service_longname']) != "": - tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") + tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") # Release Type/Source for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): @@ -388,14 +389,14 @@ async def get_tags(self, meta): tags.append('sd.season') else: tags.append('hd.season') - + # movie tags if meta['category'] == 'MOVIE': if meta['sd'] == 1: tags.append('sd.movie') else: tags.append('hd.movie') - + # Audio tags audio_tag = "" for each in ['dd', 'ddp', 'aac', 'truehd', 'mp3', 'mp2', 'dts', 'dts.hd', 'dts.x']: @@ -436,10 +437,10 @@ async def validate_credentials(self, meta): if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -448,14 +449,14 @@ async def validate_credentials(self, meta): else: return False vapi = await self.validate_api() - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your API key is valid.') return True async def validate_api(self): url = self.search_url params = { - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } try: r = requests.get(url, params=params) @@ -464,7 +465,7 @@ async def validate_api(self): console.print("[red]Invalid API Key") return False return True - except: + except Exception: return False async def validate_cookies(self, meta, cookiefile): @@ -499,12 +500,12 @@ async def login(self, cookiefile): with requests.Session() as session: url = 'https://www.morethantv.me/login' payload = { - 'username' : self.config['TRACKERS'][self.tracker].get('username'), - 'password' : self.config['TRACKERS'][self.tracker].get('password'), - 'keeploggedin' : 1, - 'cinfo' : '1920|1080|24|0', - 'submit' : 'login', - 'iplocked' : 1, + 'username': self.config['TRACKERS'][self.tracker].get('username'), + 'password': self.config['TRACKERS'][self.tracker].get('password'), + 'keeploggedin': 1, + 'cinfo': '1920|1080|24|0', + 'submit': 'login', + 'iplocked': 1, # 'ssl' : 'yes' } res = session.get(url="https://www.morethantv.me/login") @@ -521,11 +522,11 @@ async def login(self, cookiefile): mfa_code = pyotp.parse_uri(otp_uri).now() else: mfa_code = console.input('[yellow]MTV 2FA Code: ') - + two_factor_payload = { - 'token' : resp.text.rsplit('name="token" value="', 1)[1][:48], - 'code' : mfa_code, - 'submit' : 'login' + 'token': resp.text.rsplit('name="token" value="', 1)[1][:48], + 'code': mfa_code, + 'submit': 'login' } resp = session.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) # checking if logged in @@ -543,9 +544,9 @@ async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 't' : 'search', - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'q' : "" + 't': 'search', + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'q': "" } if meta['imdb_id'] not in ("0", "", None): params['imdbid'] = "tt" + meta['imdb_id'] @@ -569,11 +570,11 @@ async def search_existing(self, meta): console.print(f"[yellow]{rr.get('status_message')}") await asyncio.sleep(5) else: - console.print(f"[red]Site Seems to be down or not responding to API") - except: - console.print(f"[red]Unable to search for existing torrents on site. Most likely the site is down.") + console.print("[red]Site Seems to be down or not responding to API") + except Exception: + console.print("[red]Unable to search for existing torrents on site. Most likely the site is down.") dupes.append("FAILED SEARCH") print(traceback.print_exc()) await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 56e01a671..35dd0fc50 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,7 @@ # import discord import asyncio import requests -import os -from guessit import guessit -from str2bool import str2bool +from guessit import guessit from src.trackers.COMMON import COMMON from src.console import console @@ -18,13 +16,6 @@ class NBL(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'NBL' @@ -38,9 +29,8 @@ def __init__(self, config): 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] - + pass - async def get_cat_id(self, meta): if meta.get('tv_pack', 0) == 1: @@ -49,9 +39,6 @@ async def get_cat_id(self, meta): cat_id = 1 return cat_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### async def edit_desc(self, meta): # Leave this in so manual works return @@ -63,21 +50,21 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read()[:-65].strip() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file_input': open_torrent} data = { - 'api_key' : self.api_key, - 'tvmazeid' : int(meta.get('tvmaze_id', 0)), - 'mediainfo' : mi_dump, - 'category' : await self.get_cat_id(meta), - 'ignoredupes' : 'on' + 'api_key': self.api_key, + 'tvmazeid': int(meta.get('tvmaze_id', 0)), + 'mediainfo': mi_dump, + 'category': await self.get_cat_id(meta), + 'ignoredupes': 'on' } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data) try: if response.ok: @@ -86,34 +73,30 @@ async def upload(self, meta): else: console.print(response) console.print(response.text) - except: + except Exception: console.print_exception() console.print("[bold yellow]It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") if int(meta.get('tvmaze_id', 0)) != 0: - search_term = {'tvmaze' : int(meta['tvmaze_id'])} + search_term = {'tvmaze': int(meta['tvmaze_id'])} elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: - search_term = {'imdb' : meta.get('imdb_id', '0').replace('tt', '')} + search_term = {'imdb': meta.get('imdb_id', '0').replace('tt', '')} else: - search_term = {'series' : meta['title']} + search_term = {'series': meta['title']} json = { - 'jsonrpc' : '2.0', - 'id' : 1, - 'method' : 'getTorrents', - 'params' : [ - self.api_key, + 'jsonrpc': '2.0', + 'id': 1, + 'method': 'getTorrents', + 'params': [ + self.api_key, search_term ] } @@ -143,4 +126,4 @@ async def search_existing(self, meta): except Exception: console.print_exception() - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index c35a082e0..f74a54c91 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -5,8 +5,6 @@ import os from pathlib import Path from str2bool import str2bool -import time -import traceback import json import glob import multiprocessing @@ -18,7 +16,8 @@ from src.exceptions import * from src.console import console from torf import Torrent -from datetime import datetime, date +from datetime import datetime + class PTP(): @@ -28,69 +27,69 @@ def __init__(self, config): self.source_flag = 'PTP' self.api_user = config['TRACKERS']['PTP'].get('ApiUser', '').strip() self.api_key = config['TRACKERS']['PTP'].get('ApiKey', '').strip() - self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() - self.username = config['TRACKERS']['PTP'].get('username', '').strip() + self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() + self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY',] - + self.sub_lang_map = { - ("Arabic", "ara", "ar") : 22, - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 49, - ("Bulgarian", "bul", "bg") : 29, - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 14, - ("Croatian", "hrv", "hr", "scr") : 23, - ("Czech", "cze", "cz", "cs") : 30, - ("Danish", "dan", "da") : 10, - ("Dutch", "dut", "nl") : 9, - ("English", "eng", "en", "English (CC)", "English - SDH") : 3, - ("English - Forced", "English (Forced)", "en (Forced)") : 50, - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 51, - ("Estonian", "est", "et") : 38, - ("Finnish", "fin", "fi") : 15, - ("French", "fre", "fr") : 5, - ("German", "ger", "de") : 6, - ("Greek", "gre", "el") : 26, - ("Hebrew", "heb", "he") : 40, - ("Hindi" "hin", "hi") : 41, - ("Hungarian", "hun", "hu") : 24, - ("Icelandic", "ice", "is") : 28, - ("Indonesian", "ind", "id") : 47, - ("Italian", "ita", "it") : 16, - ("Japanese", "jpn", "ja") : 8, - ("Korean", "kor", "ko") : 19, - ("Latvian", "lav", "lv") : 37, - ("Lithuanian", "lit", "lt") : 39, - ("Norwegian", "nor", "no") : 12, - ("Persian", "fa", "far") : 52, - ("Polish", "pol", "pl") : 17, - ("Portuguese", "por", "pt") : 21, - ("Romanian", "rum", "ro") : 13, - ("Russian", "rus", "ru") : 7, - ("Serbian", "srp", "sr", "scc") : 31, - ("Slovak", "slo", "sk") : 42, - ("Slovenian", "slv", "sl") : 43, - ("Spanish", "spa", "es") : 4, - ("Swedish", "swe", "sv") : 11, - ("Thai", "tha", "th") : 20, - ("Turkish", "tur", "tr") : 18, - ("Ukrainian", "ukr", "uk") : 34, - ("Vietnamese", "vie", "vi") : 25, + ("Arabic", "ara", "ar"): 22, + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 49, + ("Bulgarian", "bul", "bg"): 29, + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 14, + ("Croatian", "hrv", "hr", "scr"): 23, + ("Czech", "cze", "cz", "cs"): 30, + ("Danish", "dan", "da"): 10, + ("Dutch", "dut", "nl"): 9, + ("English", "eng", "en", "English (CC)", "English - SDH"): 3, + ("English - Forced", "English (Forced)", "en (Forced)"): 50, + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 51, + ("Estonian", "est", "et"): 38, + ("Finnish", "fin", "fi"): 15, + ("French", "fre", "fr"): 5, + ("German", "ger", "de"): 6, + ("Greek", "gre", "el"): 26, + ("Hebrew", "heb", "he"): 40, + ("Hindi" "hin", "hi"): 41, + ("Hungarian", "hun", "hu"): 24, + ("Icelandic", "ice", "is"): 28, + ("Indonesian", "ind", "id"): 47, + ("Italian", "ita", "it"): 16, + ("Japanese", "jpn", "ja"): 8, + ("Korean", "kor", "ko"): 19, + ("Latvian", "lav", "lv"): 37, + ("Lithuanian", "lit", "lt"): 39, + ("Norwegian", "nor", "no"): 12, + ("Persian", "fa", "far"): 52, + ("Polish", "pol", "pl"): 17, + ("Portuguese", "por", "pt"): 21, + ("Romanian", "rum", "ro"): 13, + ("Russian", "rus", "ru"): 7, + ("Serbian", "srp", "sr", "scc"): 31, + ("Slovak", "slo", "sk"): 42, + ("Slovenian", "slv", "sl"): 43, + ("Spanish", "spa", "es"): 4, + ("Swedish", "swe", "sv"): 11, + ("Thai", "tha", "th"): 20, + ("Turkish", "tur", "tr"): 18, + ("Ukrainian", "ukr", "uk"): 34, + ("Vietnamese", "vie", "vi"): 25, } async def get_ptp_id_imdb(self, search_term, search_file_folder): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) params = { - 'filelist' : filename + 'filelist': filename } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) @@ -133,15 +132,15 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder): pass console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None - + async def get_imdb_from_torrent_id(self, ptp_torrent_id): params = { - 'torrentid' : ptp_torrent_id + 'torrentid': ptp_torrent_id } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) @@ -164,43 +163,50 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): return None, None except Exception: return None, None - + async def get_ptp_description(self, ptp_torrent_id, is_disc): params = { - 'id' : ptp_torrent_id, - 'action' : 'get_description' + 'id': ptp_torrent_id, + 'action': 'get_description' } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' + console.print(f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}") response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) + ptp_desc = response.text + # console.print(f"[yellow]Raw description received:\n{ptp_desc[:500]}...") # Show first 500 characters for brevity + bbcode = BBCODE() - desc = bbcode.clean_ptp_description(ptp_desc, is_disc) + desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) + console.print(f"[bold green]Successfully grabbed description from PTP") - return desc - + console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:500]}...") # Show first 500 characters for brevity + + return desc, imagelist + async def get_group_by_imdb(self, imdb): params = { - 'imdb' : imdb, + 'imdb': imdb, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) await asyncio.sleep(1) try: response = response.json() - if response.get("Page") == "Browse": # No Releases on Site with ID + if response.get("Page") == "Browse": # No Releases on Site with ID return None - elif response.get('Page') == "Details": # Group Found + elif response.get('Page') == "Details": # Group Found groupID = response.get('GroupId') console.print(f"[green]Matched IMDb: [yellow]tt{imdb}[/yellow] to Group ID: [yellow]{groupID}[/yellow][/green]") console.print(f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])") @@ -212,14 +218,14 @@ async def get_group_by_imdb(self, imdb): async def get_torrent_info(self, imdb, meta): params = { - 'imdb' : imdb, - 'action' : 'torrent_info', - 'fast' : 1 + 'imdb': imdb, + 'action': 'torrent_info', + 'fast': 1 } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = "https://passthepopcorn.me/ajax.php" response = requests.get(url=url, params=params, headers=headers) @@ -240,9 +246,9 @@ async def get_torrent_info(self, imdb, meta): async def get_torrent_info_tmdb(self, meta): tinfo = { - "title" : meta.get("title", ""), - "year" : meta.get("year", ""), - "album_desc" : meta.get("overview", ""), + "title": meta.get("title", ""), + "year": meta.get("year", ""), + "album_desc": meta.get("overview", ""), } tags = await self.get_tags([meta.get("genres", ""), meta.get("keywords", "")]) tinfo['tags'] = ", ".join(tags) @@ -266,21 +272,20 @@ async def get_tags(self, check_against): async def search_existing(self, groupID, meta): # Map resolutions to SD / HD / UHD quality = None - if meta.get('sd', 0) == 1: # 1 is SD + if meta.get('sd', 0) == 1: # 1 is SD quality = "Standard Definition" elif meta['resolution'] in ["1440p", "1080p", "1080i", "720p"]: quality = "High Definition" elif meta['resolution'] in ["2160p", "4320p", "8640p"]: quality = "Ultra High Definition" - params = { - 'id' : groupID, + 'id': groupID, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) @@ -291,7 +296,7 @@ async def search_existing(self, groupID, meta): torrents = response.get('Torrents', []) if len(torrents) != 0: for torrent in torrents: - if torrent.get('Quality') == quality and quality != None: + if torrent.get('Quality') == quality and quality is not None: existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") except Exception: console.print("[red]An error has occured trying to find existing releases") @@ -299,11 +304,11 @@ async def search_existing(self, groupID, meta): async def ptpimg_url_rehost(self, image_url): payload = { - 'format' : 'json', - 'api_key' : self.config["DEFAULT"]["ptpimg_api"], - 'link-upload' : image_url + 'format': 'json', + 'api_key': self.config["DEFAULT"]["ptpimg_api"], + 'link-upload': image_url } - headers = { 'referer': 'https://ptpimg.me/index.php'} + headers = {'referer': 'https://ptpimg.me/index.php'} url = "https://ptpimg.me/upload.php" response = requests.post(url, headers=headers, data=payload) @@ -312,7 +317,7 @@ async def ptpimg_url_rehost(self, image_url): ptpimg_code = response[0]['code'] ptpimg_ext = response[0]['ext'] img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - except: + except Exception: console.print("[red]PTPIMG image rehost failed") img_url = image_url # img_url = ptpimg_upload(image_url, ptpimg_api) @@ -351,7 +356,7 @@ def get_type(self, imdb_info, meta): ptpType = "Stand-up Comedy" elif "concert" in keywords: ptpType = "Concert" - if ptpType == None: + if ptpType is None: if meta.get('mode', 'discord') == 'cli': ptpTypeList = ["Feature Film", "Short Film", "Miniseries", "Stand-up Comedy", "Concert", "Movie Collection"] ptpType = cli_ui.ask_choice("Select the proper type", choices=ptpTypeList) @@ -372,14 +377,14 @@ def get_codec(self, meta): codec = "DVD9" else: codecmap = { - "AVC" : "H.264", - "H.264" : "H.264", - "HEVC" : "H.265", - "H.265" : "H.265", + "AVC": "H.264", + "H.264": "H.264", + "HEVC": "H.265", + "H.265": "H.265", } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec = codecmap.get(searchcodec, searchcodec) - if meta.get('has_encode_settings') == True: + if meta.get('has_encode_settings') is True: codec = codec.replace("H.", "x") return codec @@ -403,23 +408,23 @@ def get_container(self, meta): else: ext = os.path.splitext(meta['filelist'][0])[1] containermap = { - '.mkv' : "MKV", - '.mp4' : 'MP4' + '.mkv': "MKV", + '.mp4': 'MP4' } container = containermap.get(ext, 'Other') return container def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HD DVD" : "HD-DVD", - "HDDVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - 'UHDTV' : 'HDTV', - "NTSC" : "DVD", - "PAL" : "DVD" + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HD DVD": "HD-DVD", + "HDDVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + 'UHDTV': 'HDTV', + "NTSC": "DVD", + "PAL": "DVD" } source_id = sources.get(source, "OtherR") return source_id @@ -438,7 +443,8 @@ def get_subtitles(self, meta): if language == "en": if track.get('Forced', "") == "Yes": language = "en (Forced)" - if "intertitles" in track.get('Title', "").lower(): + title = track.get('Title', "") + if isinstance(title, str) and "intertitles" in title.lower(): language = "en (Intertitles)" for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: @@ -448,29 +454,29 @@ def get_subtitles(self, meta): for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) - + if sub_langs == []: - sub_langs = [44] # No Subtitle + sub_langs = [44] # No Subtitle return sub_langs def get_trumpable(self, sub_langs): trumpable_values = { - "English Hardcoded Subs (Full)" : 4, - "English Hardcoded Subs (Forced)" : 50, - "No English Subs" : 14, - "English Softsubs Exist (Mislabeled)" : None, - "Hardcoded Subs (Non-English)" : "OTHER" + "English Hardcoded Subs (Full)": 4, + "English Hardcoded Subs (Forced)": 50, + "No English Subs": 14, + "English Softsubs Exist (Mislabeled)": None, + "Hardcoded Subs (Non-English)": "OTHER" } opts = cli_ui.select_choices("English subtitles not found. Please select any/all applicable options:", choices=list(trumpable_values.keys())) trumpable = [] if opts: for t, v in trumpable_values.items(): if t in ''.join(opts): - if v == None: + if v is None: break - elif v != 50: # Hardcoded, Forced + elif v != 50: # Hardcoded, Forced trumpable.append(v) - elif v == "OTHER": #Hardcoded, Non-English + elif v == "OTHER": # Hardcoded, Non-English trumpable.append(14) hc_sub_langs = cli_ui.ask_string("Enter language code for HC Subtitle languages") for lang, subID in self.sub_lang_map.items(): @@ -479,7 +485,7 @@ def get_trumpable(self, sub_langs): else: sub_langs.append(v) trumpable.append(4) - + sub_langs = list(set(sub_langs)) trumpable = list(set(trumpable)) if trumpable == []: @@ -496,7 +502,7 @@ def get_remaster_title(self, meta): remaster_title.append('The Criterion Collection') elif meta.get('distributor') in ('MASTERS OF CINEMA', 'MOC'): remaster_title.append('Masters of Cinema') - + # Editions # Director's Cut, Extended Edition, Rifftrax, Theatrical Cut, Uncut, Unrated if "director's cut" in meta.get('edition', '').lower(): @@ -517,7 +523,7 @@ def get_remaster_title(self, meta): # Features # 2-Disc Set, 2in1, 2D/3D Edition, 3D Anaglyph, 3D Full SBS, 3D Half OU, 3D Half SBS, - # 4K Restoration, 4K Remaster, + # 4K Restoration, 4K Remaster, # Extras, Remux, if meta.get('type') == "REMUX": remaster_title.append("Remux") @@ -531,10 +537,10 @@ def get_remaster_title(self, meta): remaster_title.append('Dual Audio') if "Dubbed" in meta['audio']: remaster_title.append('English Dub') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: remaster_title.append('With Commentary') - # HDR10, HDR10+, Dolby Vision, 10-bit, + # HDR10, HDR10+, Dolby Vision, 10-bit, # if "Hi10P" in meta.get('video_encode', ''): # remaster_title.append('10-bit') if meta.get('hdr', '').strip() == '' and meta.get('bit_depth') == '10': @@ -584,16 +590,16 @@ async def edit_desc(self, meta): mi_dump = each['summary'] else: mi_dump = each['summary'] - if meta.get('vapoursynth', False) == True: + if meta.get('vapoursynth', False) is True: use_vs = True else: use_vs = False ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) if each['type'] == "DVD": desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") @@ -608,12 +614,12 @@ async def edit_desc(self, meta): else: ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if len(images) > 0: + images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + + if len(images) > 0: for each in range(len(images[:int(meta['screens'])])): raw_url = images[each]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -624,12 +630,12 @@ async def edit_desc(self, meta): file = meta['filelist'][i] if i == 0: # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None and self.web_source == True: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() else: # Export Mediainfo - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version' : '1'}) + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) # mi_dump = mi_dump.replace(file, os.path.basename(file)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) @@ -637,9 +643,9 @@ async def edit_desc(self, meta): # Generate and upload screens for other files s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2)) s.start() - while s.is_alive() == True: + while s.is_alive() is True: await asyncio.sleep(3) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") @@ -647,8 +653,8 @@ async def edit_desc(self, meta): base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) - desc.write("\n\n") - if len(images) > 0: + desc.write("\n\n") + if len(images) > 0: for each in range(len(images[:int(meta['screens'])])): raw_url = images[each]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -667,17 +673,17 @@ async def get_AntiCsrfToken(self, meta): loggedIn = await self.validate_login(uploadresponse) else: console.print("[yellow]PTP Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: AntiCsrfToken = re.search(r'data-AntiCsrfToken="(.*)"', uploadresponse.text).group(1) else: - passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce",self.announce_url).group(1) + passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce", self.announce_url).group(1) data = { "username": self.username, "password": self.password, "passkey": passKey, "keeplogged": "1", } - headers = {"User-Agent" : self.user_agent} + headers = {"User-Agent": self.user_agent} loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) await asyncio.sleep(2) try: @@ -723,26 +729,26 @@ async def fill_upload_form(self, groupID, meta): data = { "submit": "true", "remaster_year": "", - "remaster_title": self.get_remaster_title(meta), #Eg.: Hardcoded English + "remaster_title": self.get_remaster_title(meta), # Eg.: Hardcoded English "type": self.get_type(meta['imdb_info'], meta), - "codec": "Other", # Sending the codec as custom. + "codec": "Other", # Sending the codec as custom. "other_codec": self.get_codec(meta), "container": "Other", "other_container": self.get_container(meta), "resolution": resolution, - "source": "Other", # Sending the source as custom. + "source": "Other", # Sending the source as custom. "other_source": self.get_source(meta['source']), "release_desc": desc, "nfo_text": "", - "subtitles[]" : ptp_subtitles, - "trumpable[]" : ptp_trumpable, - "AntiCsrfToken" : await self.get_AntiCsrfToken(meta) + "subtitles[]": ptp_subtitles, + "trumpable[]": ptp_trumpable, + "AntiCsrfToken": await self.get_AntiCsrfToken(meta) } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" if resolution == "Other": data["other_resolution"] = other_resolution - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: data["internalrip"] = "on" # IF SPECIAL (idk how to check for this automatically) # data["special"] = "on" @@ -751,18 +757,18 @@ async def fill_upload_form(self, groupID, meta): else: data["imdb"] = meta["imdb_id"] - if groupID == None: # If need to make new group + if groupID is None: # If need to make new group url = "https://passthepopcorn.me/upload.php" if data["imdb"] == "0": tinfo = await self.get_torrent_info_tmdb(meta) else: tinfo = await self.get_torrent_info(meta.get("imdb_id", "0"), meta) cover = meta["imdb_info"].get("cover") - if cover == None: + if cover is None: cover = meta.get('poster') - if cover != None and "ptpimg" not in cover: + if cover is not None and "ptpimg" not in cover: cover = await self.ptpimg_url_rehost(cover) - while cover == None: + while cover is None: cover = cli_ui.ask_string("No Poster was found. Please input a link to a poster: \n", default="") if "ptpimg" not in str(cover) and str(cover).endswith(('.jpg', '.png')): cover = await self.ptpimg_url_rehost(cover) @@ -777,15 +783,15 @@ async def fill_upload_form(self, groupID, meta): if new_data['year'] in ['', '0', 0, None] and meta.get('manual_year') not in [0, '', None]: new_data['year'] = meta['manual_year'] while new_data["tags"] == "": - if meta.get('mode', 'discord') == 'cli': + if meta.get('mode', 'discord') == 'cli': console.print('[yellow]Unable to match any tags') console.print("Valid tags can be found on the PTP upload form") new_data["tags"] = console.input("Please enter at least one tag. Comma seperated (action, animation, short):") data.update(new_data) - if meta["imdb_info"].get("directors", None) != None: + if meta["imdb_info"].get("directors", None) is not None: data["artist[]"] = tuple(meta['imdb_info'].get('directors')) data["importance[]"] = "1" - else: # Upload on existing group + else: # Upload on existing group url = f"https://passthepopcorn.me/upload.php?groupid={groupID}" data["groupid"] = groupID @@ -825,7 +831,7 @@ async def upload(self, meta, url, data): comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) - + # Explicitly set the piece size and update metainfo new_torrent.piece_size = 16777216 # 16 MiB in bytes new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set @@ -871,4 +877,4 @@ async def upload(self, meta, url, data): if match is None: console.print(url) console.print(data) - raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") \ No newline at end of file + raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 28ce55924..232cbda2c 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -5,11 +5,11 @@ import base64 import re import datetime -import json from src.trackers.COMMON import COMMON from src.console import console + class RTF(): """ Edit for Tracker: @@ -18,10 +18,6 @@ class RTF(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RTF' @@ -36,7 +32,7 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -45,21 +41,21 @@ async def upload(self, meta): screenshots = [] for image in meta['image_list']: - if image['raw_url'] != None: + if image['raw_url'] is not None: screenshots.append(image['raw_url']) json_data = { - 'name' : meta['name'], + 'name': meta['name'], # description does not work for some reason # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump is None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB "descr": "This is short description", - "poster": meta["poster"] if meta["poster"] != None else "", + "poster": meta["poster"] if meta["poster"] is not None else "", "type": "401" if meta['category'] == 'MOVIE'else "402", "screenshots": screenshots, 'isAnonymous': self.config['TRACKERS'][self.tracker]["anon"], @@ -77,13 +73,11 @@ async def upload(self, meta): 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - if datetime.date.today().year - meta['year'] <= 9: - console.print(f"[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") return - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: console.print(response.json()) @@ -91,14 +85,13 @@ async def upload(self, meta): t_id = response.json()['torrent']['id'] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(json_data) - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") @@ -108,7 +101,7 @@ async def search_existing(self, meta): } params = { - 'includingDead' : '1' + 'includingDead': '1' } if meta['imdb_id'] != "0": @@ -122,7 +115,7 @@ async def search_existing(self, meta): for each in response: result = [each][0]['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -157,7 +150,7 @@ async def generate_new_api(self, meta): if response.status_code == 201: console.print('[bold green]Using New API key generated for this upload') - console.print(f'[bold green]Please update your L4G config with the below RTF API Key for future uploads') + console.print('[bold green]Please update your L4G config with the below RTF API Key for future uploads') console.print(f'[bold yellow]{response.json()["token"]}') self.config['TRACKERS'][self.tracker]['api_key'] = response.json()["token"] else: diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 54f13d64d..b987d3f37 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -15,7 +15,6 @@ class SN(): Set type/category IDs Upload """ - def __init__(self, config): self.config = config self.tracker = 'SN' @@ -31,7 +30,7 @@ async def get_type_id(self, type): 'BluRay': '3', 'Web': '1', # boxset is 4 - #'NA': '4', + # 'NA': '4', 'DVD': '2' }.get(type, '0') return type_id @@ -39,11 +38,11 @@ async def get_type_id(self, type): async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - #await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + # await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) await self.edit_desc(meta) cat_id = "" sub_cat_id = "" - #cat_id = await self.get_cat_id(meta) + # cat_id = await self.get_cat_id(meta) if meta['category'] == 'MOVIE': cat_id = 1 # sub cat is source so using source to get @@ -56,8 +55,7 @@ async def upload(self, meta): sub_cat_id = 5 # todo need to do a check for docs and add as subcat - - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -90,7 +88,7 @@ async def upload(self, meta): } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.request("POST", url=self.upload_url, data=data, files=files) try: @@ -99,16 +97,15 @@ async def upload(self, meta): else: console.print("[red]Did not upload successfully") console.print(response.json()) - except: + except Exception: console.print("[red]Error! It may have uploaded, go check") console.print(data) console.print_exception() return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: @@ -125,13 +122,12 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_key' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # using title if IMDB id does not exist to search @@ -141,7 +137,7 @@ async def search_existing(self, meta): else: params['filter'] = meta['title'] else: - #using IMDB_id to search if it exists. + # using IMDB_id to search if it exists. if meta['category'] == 'TV': params['media_ref'] = f"tt{meta['imdb_id']}" params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] @@ -155,7 +151,7 @@ async def search_existing(self, meta): for i in response['data']: result = i['name'] dupes.append(result) - except: + except Exception: console.print('[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 9b98f602f..15d6935b3 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -35,13 +35,13 @@ def __init__(self, config): self.upload_url = 'https://www.torrentleech.org/torrents/upload/apiupload' self.signature = None self.banned_groups = [""] - + self.announce_key = self.config['TRACKERS'][self.tracker]['announce_key'] self.config['TRACKERS'][self.tracker]['announce_url'] = f"https://tracker.torrentleech.org/a/{self.announce_key}/announce" pass - + async def get_cat_id(self, common, meta): - if meta.get('anime', 0): + if meta.get('anime', 0): return self.CATEGORIES['Anime'] if meta['category'] == 'MOVIE': @@ -64,7 +64,7 @@ async def get_cat_id(self, common, meta): elif meta['type'] == 'HDTV': return self.CATEGORIES['MovieHdRip'] elif meta['category'] == 'TV': - if meta['original_language'] != 'en': + if meta['original_language'] != 'en': return self.CATEGORIES['TvForeign'] elif meta.get('tv_pack', 0): return self.CATEGORIES['TvBoxsets'] @@ -82,13 +82,13 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+') - - info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] != None else 'MEDIAINFO_CLEANPATH' + + info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] is not None else 'MEDIAINFO_CLEANPATH' open_info = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", 'r', encoding='utf-8') open_desc.write('\n\n') open_desc.write(open_info.read()) open_info.close() - + open_desc.seek(0) open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = { @@ -96,19 +96,19 @@ async def upload(self, meta): 'torrent': (self.get_name(meta) + '.torrent', open_torrent) } data = { - 'announcekey' : self.announce_key, - 'category' : cat_id + 'announcekey': self.announce_key, + 'category': cat_id } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) if not response.text.isnumeric(): console.print(f'[red]{response.text}') else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() open_desc.close() diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index c77e758a7..996bab254 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -34,20 +33,20 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' }.get(type, '0') @@ -55,16 +54,16 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' }.get(resolution, '10') return resolution_id @@ -82,12 +81,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,34 +96,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -136,35 +135,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +171,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/vs.py b/src/vs.py index 6684ca6ea..7fb918cfe 100644 --- a/src/vs.py +++ b/src/vs.py @@ -1,15 +1,12 @@ import vapoursynth as vs -core = vs.core -from awsmfunc import ScreenGen, DynamicTonemap, FrameInfo, zresize +from awsmfunc import ScreenGen, DynamicTonemap, zresize import random -import argparse -from typing import Union, List -from pathlib import Path -import os, sys -import platform -import multiprocessing +import os from functools import partial +core = vs.core + + def CustomFrameInfo(clip, text): def FrameProps(n, f, clip): # Modify the frame properties extraction here to avoid the decode issue @@ -20,6 +17,7 @@ def FrameProps(n, f, clip): # Apply FrameProps to each frame return core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip) + def optimize_images(image, config): import platform # Ensure platform is imported here if config.get('optimize_images', True): @@ -27,7 +25,7 @@ def optimize_images(image, config): try: pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng + import oxipng if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: @@ -36,6 +34,7 @@ def optimize_images(image, config): print(f"Image optimization failed: {e}") return + def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", config=None): if config is None: config = {'optimize_images': True} # Default configuration @@ -130,4 +129,4 @@ def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", conf # Optimize images for i in range(1, num + 1): image_path = os.path.join(dir, f"{str(i).zfill(2)}a.png") - optimize_images(image_path, config) \ No newline at end of file + optimize_images(image_path, config) diff --git a/upload.py b/upload.py index 9a2f8f254..5b333af6c 100644 --- a/upload.py +++ b/upload.py @@ -44,26 +44,23 @@ import os import sys import platform -import multiprocessing -import logging import shutil import glob import cli_ui +import traceback from src.console import console from rich.markdown import Markdown from rich.style import Style - cli_ui.setup(color='always', title="L4G's Upload Assistant") -import traceback base_dir = os.path.abspath(os.path.dirname(__file__)) try: from data.config import config -except: +except Exception: if not os.path.exists(os.path.abspath(f"{base_dir}/data/config.py")): try: if os.path.exists(os.path.abspath(f"{base_dir}/data/config.json")): @@ -73,16 +70,16 @@ with open(f"{base_dir}/data/config.py", 'w') as f: f.write(f"config = {json.dumps(json_config, indent=4)}") f.close() - cli_ui.info(cli_ui.green, "Successfully updated config from .json to .py") - cli_ui.info(cli_ui.green, "It is now safe for you to delete", cli_ui.yellow, "data/config.json", "if you wish") + cli_ui.info(cli_ui.green, "Successfully updated config from .json to .py") + cli_ui.info(cli_ui.green, "It is now safe for you to delete", cli_ui.yellow, "data/config.json", "if you wish") from data.config import config else: raise NotImplementedError - except: + except Exception: cli_ui.info(cli_ui.red, "We have switched from .json to .py for config to have a much more lenient experience") cli_ui.info(cli_ui.red, "Looks like the auto updater didnt work though") cli_ui.info(cli_ui.red, "Updating is just 2 easy steps:") - cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py") ) + cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) cli_ui.info(cli_ui.red, "2: Add", cli_ui.green, "config = ", cli_ui.red, "to the beginning of", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) exit() else: @@ -90,6 +87,7 @@ client = Clients(config=config) parser = Args(config) + async def do_the_thing(base_dir): meta = dict() meta['base_dir'] = base_dir @@ -111,8 +109,8 @@ async def do_the_thing(base_dir): path = path[:-1] queue = [] if os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - queue = [path] + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) + queue = [path] else: # Search glob if dirname exists if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: @@ -126,7 +124,7 @@ async def do_the_thing(base_dir): console.print("\n\n") else: console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") - + elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: queue = paths md_text = "\n - ".join(queue) @@ -153,10 +151,10 @@ async def do_the_thing(base_dir): console.print("\n[bold green]Queuing these files:[/bold green]", end='') console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) console.print("\n\n") - + else: # Add Search Here - console.print(f"[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") exit() base_meta = {k: v for k, v in meta.items()} @@ -169,8 +167,8 @@ async def do_the_thing(base_dir): saved_meta = json.load(f) for key, value in saved_meta.items(): overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', + 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' ] if meta.get(key, None) != value and key in overwrite_list: @@ -181,7 +179,7 @@ async def do_the_thing(base_dir): pass console.print("[red]Click package will be required in a future update, install with requirements.txt now to be prepared") console.print(f"[green]Gathering info for {os.path.basename(path)}") - if meta['imghost'] == None: + if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] if not meta['unattended']: ua = config['DEFAULT'].get('auto_mode', False) @@ -189,44 +187,44 @@ async def do_the_thing(base_dir): meta['unattended'] = True console.print("[yellow]Running in Auto Mode") prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - meta = await prep.gather_prep(meta=meta, mode='cli') + meta = await prep.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) == False: + if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'],[], return_dict) + meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) if meta['debug']: console.print(meta['image_list']) # meta['uploaded_screens'] = True - elif meta.get('skip_imghost_upload', False) == True and meta.get('image_list', False) == False: + elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: meta['image_list'] = [] if not os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")): reuse_torrent = None - if meta.get('rehash', False) == False: + if meta.get('rehash', False) is False: reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: + if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - if meta['nohash'] == False and reuse_torrent == None: + if meta['nohash'] is False and reuse_torrent is None: prep.create_torrent(meta, Path(meta['path']), "BASE") if meta['nohash']: meta['client'] = "none" - elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) == True and meta['nohash'] == False: + elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) is True and meta['nohash'] is False: prep.create_torrent(meta, Path(meta['path']), "BASE") if int(meta.get('randomized', 0)) >= 1: prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) - - if meta.get('trackers', None) != None: + + if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] if "," in trackers: trackers = trackers.split(',') - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() - confirm = get_confirmation(meta) - while confirm == False: + confirm = get_confirmation(meta) + while confirm is False: # help.print_help() editargs = cli_ui.ask_string("Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)") editargs = (meta['path'],) + tuple(editargs.split()) @@ -235,26 +233,24 @@ async def do_the_thing(base_dir): meta, help, before_args = parser.parse(editargs, meta) # meta = await prep.tmdb_other_meta(meta) meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode='cli') + meta = await prep.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) confirm = get_confirmation(meta) - - if isinstance(trackers, list) == False: + + if isinstance(trackers, list) is False: trackers = [trackers] trackers = [s.strip().upper() for s in trackers] if meta.get('manual', False): trackers.insert(0, "MANUAL") - - #################################### - ####### Upload to Trackers ####### - #################################### + + # Upload to Trackers common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'LST', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB'] + http_trackers = ['TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { - 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, - 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL':AL} + 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, + 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, + 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL} for tracker in trackers: if meta['name'].endswith('DUPE?'): @@ -264,7 +260,7 @@ async def do_the_thing(base_dir): debug = "(DEBUG)" else: debug = "" - + if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) if meta['unattended']: @@ -281,12 +277,12 @@ async def do_the_thing(base_dir): dupes = await common.filter_dupes(dupes, meta) # note BHDTV does not have search implemented. meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await tracker_class.upload(meta) if tracker == 'SN': await asyncio.sleep(16) await client.add_to_client(meta, tracker_class.tracker) - + if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) if meta['unattended']: @@ -297,19 +293,19 @@ async def do_the_thing(base_dir): console.print(f"Uploading to {tracker}") if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue - if await tracker_class.validate_credentials(meta) == True: + if await tracker_class.validate_credentials(meta) is True: dupes = await tracker_class.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await tracker_class.upload(meta) await client.add_to_client(meta, tracker_class.tracker) if tracker == "MANUAL": - if meta['unattended']: + if meta['unattended']: do_manual = True else: - do_manual = cli_ui.ask_yes_no(f"Get files for manual upload?", default=True) + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) if do_manual: for manual_tracker in trackers: if manual_tracker != 'MANUAL': @@ -320,11 +316,11 @@ async def do_the_thing(base_dir): else: await tracker_class.edit_desc(meta) url = await prep.package(meta) - if url == False: + if url is False: console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") else: console.print(f"[green]{meta['name']}") - console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") + console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") if tracker == "BHD": bhd = BHD(config=config) @@ -344,10 +340,10 @@ async def do_the_thing(base_dir): dupes = await bhd.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") - + if tracker == "THR": if meta['unattended']: upload_to_thr = True @@ -355,11 +351,11 @@ async def do_the_thing(base_dir): upload_to_thr = cli_ui.ask_yes_no(f"Upload to THR? {debug}", default=meta['unattended']) if upload_to_thr: console.print("Uploading to THR") - #Unable to get IMDB id/Youtube Link + # nable to get IMDB id/Youtube Link if meta.get('imdb_id', '0') == '0': imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) == None: + if meta.get('youtube', None) is None: youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") meta['youtube'] = youtube thr = THR(config=config) @@ -371,10 +367,10 @@ async def do_the_thing(base_dir): dupes = thr.search_existing(session, meta.get('imdb_id')) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await thr.upload(session, meta) await client.add_to_client(meta, "THR") - except: + except Exception: console.print(traceback.print_exc()) if tracker == "PTP": @@ -393,9 +389,9 @@ async def do_the_thing(base_dir): try: console.print("[yellow]Searching for Group ID") groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID == None: + if groupID is None: console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) == None or "youtube" not in str(meta.get('youtube', '')): + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") meta['youtube'] = youtube meta['upload'] = True @@ -406,12 +402,12 @@ async def do_the_thing(base_dir): meta = dupe_check(dupes, meta) if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] == True: + if meta['upload'] is True: ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) await ptp.upload(meta, ptpUrl, ptpData) await asyncio.sleep(5) await client.add_to_client(meta, "PTP") - except: + except Exception: console.print(traceback.print_exc()) if tracker == "TL": @@ -425,11 +421,11 @@ async def do_the_thing(base_dir): if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue await tracker_class.upload(meta) - await client.add_to_client(meta, tracker_class.tracker) + await client.add_to_client(meta, tracker_class.tracker) def get_confirmation(meta): - if meta['debug'] == True: + if meta['debug'] is True: console.print("[bold red]DEBUG: True") console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") console.print() @@ -451,7 +447,7 @@ def get_confirmation(meta): if int(meta.get('freeleech', '0')) != 0: cli_ui.info(f"Freeleech: {meta['freeleech']}") if meta['tag'] == "": - tag = "" + tag = "" else: tag = f" / {meta['tag'][1:]}" if meta['is_disc'] == "DVD": @@ -460,28 +456,28 @@ def get_confirmation(meta): res = meta['resolution'] cli_ui.info(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: cli_ui.info("Personal Release!") console.print() - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) == True else "" # \a rings the bell + ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" # \a rings the bell cli_ui.info(ring_the_bell) # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' if meta.get('is disc', False): meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True - + if meta['isdir']: if 'keep_folder' in meta: if meta['keep_folder']: - cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") + cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) if not kf_confirm: cli_ui.info('Aborting...') exit() - console.print("[red]Click package will be required in a future update, install with requirements.txt now to be prepared") - cli_ui.info_section(cli_ui.yellow, f"Is this correct?") + + cli_ui.info_section(cli_ui.yellow, "Is this correct?") cli_ui.info(f"Name: {meta['name']}") confirm = cli_ui.ask_yes_no("Correct?", default=False) else: @@ -490,19 +486,20 @@ def get_confirmation(meta): return confirm + def dupe_check(dupes, meta): if not dupes: - console.print("[green]No dupes found") - meta['upload'] = True - return meta + console.print("[green]No dupes found") + meta['upload'] = True + return meta else: - console.print() + console.print() dupe_text = "\n".join(dupes) console.print() cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) if meta['unattended']: - if meta.get('dupe', False) == False: + if meta.get('dupe', False) is False: console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") upload = False else: @@ -510,11 +507,11 @@ def dupe_check(dupes, meta): upload = True console.print() if not meta['unattended']: - if meta.get('dupe', False) == False: + if meta.get('dupe', False) is False: upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) else: upload = True - if upload == False: + if upload is False: meta['upload'] = False else: meta['upload'] = True @@ -546,14 +543,15 @@ def check_banned_group(tracker, banned_group_list, meta): return True return False + def get_missing(meta): info_notes = { - 'edition' : 'Special Edition/Release', - 'description' : "Please include Remux/Encode Notes if possible (either here or edit your upload)", - 'service' : "WEB Service e.g.(AMZN, NF)", - 'region' : "Disc Region", - 'imdb' : 'IMDb ID (tt1234567)', - 'distributor' : "Disc Distributor e.g.(BFI, Criterion, etc)" + 'edition': 'Special Edition/Release', + 'description': "Please include Remux/Encode Notes if possible (either here or edit your upload)", + 'service': "WEB Service e.g.(AMZN, NF)", + 'region': "Disc Region", + 'imdb': 'IMDb ID (tt1234567)', + 'distributor': "Disc Distributor e.g.(BFI, Criterion, etc)" } missing = [] if meta.get('imdb_id', '0') == '0': @@ -563,7 +561,7 @@ def get_missing(meta): for each in meta['potential_missing']: if str(meta.get(each, '')).replace(' ', '') in ["", "None", "0"]: if each == "imdb_id": - each = 'imdb' + each = 'imdb' missing.append(f"--{each} | {info_notes.get(each)}") if missing != []: cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") @@ -576,6 +574,7 @@ def get_missing(meta): console.print() return + if __name__ == '__main__': pyver = platform.python_version_tuple() if int(pyver[0]) != 3: @@ -587,4 +586,4 @@ def get_missing(meta): loop = asyncio.get_event_loop() loop.run_until_complete(do_the_thing(base_dir)) else: - asyncio.run(do_the_thing(base_dir)) \ No newline at end of file + asyncio.run(do_the_thing(base_dir)) From 626f86c345ac90e649935a9f9ece32e5a0caff84 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 06:10:43 +1000 Subject: [PATCH 122/741] Allow edit PTP description --- src/trackers/PTP.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f74a54c91..7e4be9878 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -10,6 +10,7 @@ import multiprocessing import platform import pickle +import click from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON from src.bbcode import BBCODE @@ -184,10 +185,25 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): bbcode = BBCODE() desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) - + console.print(f"[bold green]Successfully grabbed description from PTP") console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:500]}...") # Show first 500 characters for brevity + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit or discard the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(desc) + if edited_description: + desc = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {desc}") + elif edit_choice.lower() == 'd': + desc = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print(f"[green]Keeping the original description.[/green]") + return desc, imagelist async def get_group_by_imdb(self, imdb): From f22322cc03b5221499118c1cfaf2882faef7e317 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 07:13:43 +1000 Subject: [PATCH 123/741] Allow chose to regenerate torrent with ANT --- src/trackers/ANT.py | 40 ++++++++++++++++++++++++++++++---------- upload.py | 1 - 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 2fcbdd603..5862b41bf 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -4,6 +4,7 @@ import asyncio import requests import platform +import cli_ui from str2bool import str2bool from pymediainfo import MediaInfo import math @@ -66,26 +67,45 @@ async def upload(self, meta): common = COMMON(config=self.config) torrent_filename = "BASE" torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + + # Calculate the total size of all files in the torrent total_size = sum(file.size for file in torrent.files) + # Calculate the total bytes consumed by all the pathnames in the torrent + def calculate_pathname_bytes(files): + total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + return total_pathname_bytes + + total_pathname_bytes = calculate_pathname_bytes(torrent.files) + # Calculate the number of pieces and the torrent file size based on the current piece size - def calculate_pieces_and_file_size(total_size, piece_size): + def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) # Approximate size: 20 bytes header + 20 bytes per piece + # Approximate size: 20 bytes header + 20 bytes per piece + pathname bytes + torrent_file_size = 20 + (num_pieces * 20) + pathname_bytes return num_pieces, torrent_file_size # Check if the existing torrent fits within the constraints - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, torrent.piece_size) + num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, total_pathname_bytes, torrent.piece_size) - # If the torrent doesn't meet the constraints, regenerate it + # Convert torrent file size to KiB for display + torrent_file_size_kib = torrent_file_size / 1024 + + # If the torrent doesn't meet the constraints, ask the user if they want to regenerate it if not (1000 <= num_pieces <= 2000) or torrent_file_size > 102400: - console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + console.print(f"[yellow]Existing .torrent is outside of ANT preferred constraints with {num_pieces} pieces and is approximately {torrent_file_size_kib:.2f} KiB.") + regenerate = cli_ui.ask_yes_no("Do you wish to regenerate the torrent?", default=True) + + if regenerate: + console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - # Call create_torrent with the default piece size calculation - prep.create_torrent(meta, Path(meta['path']), "ANT") - torrent_filename = "ANT" + # Call create_torrent with the default piece size calculation + prep.create_torrent(meta, Path(meta['path']), "ANT") + torrent_filename = "ANT" + else: + console.print("[green]Using the existing torrent despite not meeting the preferred constraints.") else: console.print("[green]Existing torrent meets the constraints.") diff --git a/upload.py b/upload.py index 5b333af6c..33c83499c 100644 --- a/upload.py +++ b/upload.py @@ -177,7 +177,6 @@ async def do_the_thing(base_dir): f.close() except FileNotFoundError: pass - console.print("[red]Click package will be required in a future update, install with requirements.txt now to be prepared") console.print(f"[green]Gathering info for {os.path.basename(path)}") if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] From b2e6802cd97321709e299c98dc819af630116f9e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 08:18:02 +1000 Subject: [PATCH 124/741] Allow set max piece size --- src/args.py | 2 +- src/prep.py | 32 +++++++++++++++++++++----------- src/trackers/ANT.py | 5 ++++- src/trackers/MTV.py | 8 ++++---- 4 files changed, 30 insertions(+), 17 deletions(-) diff --git a/src/args.py b/src/args.py index 485e76235..ac8796f8b 100644 --- a/src/args.py +++ b/src/args.py @@ -68,7 +68,7 @@ def parse(self, args, meta): parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") parser.add_argument('-ps', '--piece-size-max', dest='piece_size_max', nargs='*', required=False, help="Maximum piece size in MiB", choices=[1, 2, 4, 8, 16], type=int) parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD)") - parser.add_argument('-tc', '--torrent-creation', dest='torrent_creation', nargs='*', required=False, help="What tool should be used to create the base .torrent", choices=['torf', 'torrenttools', 'mktorrent']) + parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64', '128']) parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs='*', required=False, help="Add to qbit with this category") diff --git a/src/prep.py b/src/prep.py index b3c5bb1d8..1e9deb845 100644 --- a/src/prep.py +++ b/src/prep.py @@ -122,7 +122,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['blu_filename'] = blu_filename # Store the filename in meta for later use found_match = True else: - console.print(f"[yellow]User skipped the found ID on {tracker_name}, moving to the next site.") + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") await self.handle_image_list(meta, tracker_name) return meta, found_match else: @@ -156,7 +156,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['blu_filename'] = blu_filename found_match = True else: - console.print(f"[yellow]User skipped the found ID on {tracker_name}, moving to the next site.") + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") await self.handle_image_list(meta, tracker_name) return meta, found_match else: @@ -191,7 +191,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = None return meta, found_match else: - console.print(f"[yellow]User skipped the found IMDb ID on {tracker_name}, moving to the next site.") + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") meta['skip_gen_desc'] = True return meta, found_match @@ -254,7 +254,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = None return meta, found_match else: - console.print(f"[yellow]User skipped the found IMDb ID on {tracker_name}, moving to the next site.") + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") meta['skip_gen_desc'] = True return meta, found_match else: @@ -2220,12 +2220,23 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): Create Torrent """ class CustomTorrent(torf.Torrent): - # Ensure the piece size is within the desired limits + # Default piece size limits torf.Torrent.piece_size_min = 16384 # 16 KiB torf.Torrent.piece_size_max = 67108864 # 64 MiB - def __init__(self, *args, **kwargs): + def __init__(self, meta, *args, **kwargs): super().__init__(*args, **kwargs) + + # Override piece_size_max if meta['max_piece_size'] is specified + if 'max_piece_size' in meta and meta['max_piece_size']: + try: + max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes + self.piece_size_max = min(max_piece_size_mib, torf.Torrent.piece_size_max) + except ValueError: + self.piece_size_max = torf.Torrent.piece_size_max # Fallback to default if conversion fails + else: + self.piece_size_max = torf.Torrent.piece_size_max + # Calculate and set the piece size total_size = self._calculate_total_size() piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) @@ -2246,9 +2257,8 @@ def piece_size(self, value): @classmethod def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 - our_max_size = 67108864 - # Start with a piece size of 8 MiB - piece_size = 8388608 + our_max_size = max_size if max_size else 67108864 # Default to 64 MiB if max_size is None + piece_size = 8388608 # Start with 8 MiB num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size @@ -2262,7 +2272,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: - cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces! Using ({num_pieces}) pieces.") + cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces and .torrent will be approximately {torrent_file_size / 1024:.2f} KiB! Using ({num_pieces}) pieces.") piece_size = our_max_size break elif torrent_file_size > 102400: @@ -2281,7 +2291,6 @@ def _calculate_total_size(self): @classmethod def _calculate_pathname_bytes(cls, files): - # Calculate the total bytes consumed by all the pathnames in the torrent total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) return total_pathname_bytes @@ -2312,6 +2321,7 @@ def create_torrent(self, meta, path, output_filename): # Create and write the new torrent using the CustomTorrent class torrent = self.CustomTorrent( + meta=meta, path=path, trackers=["https://fake.tracker"], source="L4G", diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 5862b41bf..1929172a8 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -101,7 +101,10 @@ def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - # Call create_torrent with the default piece size calculation + # Override the max piece size before regenerating the torrent + meta['max_piece_size'] = '64' # 64 MiB, the maximum piece size allowed + + # Call create_torrent with the adjusted piece size prep.create_torrent(meta, Path(meta['path']), "ANT") torrent_filename = "ANT" else: diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 33f9ee89f..b630cc779 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -80,6 +80,9 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if torrent.piece_size > 8388608: # 8 MiB in bytes console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + # Override the max_piece_size to 8 MiB + meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit + # Determine include and exclude patterns based on whether it's a disc or not if meta['is_disc']: include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list @@ -92,6 +95,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( + meta=meta, path=Path(meta['path']), trackers=["https://fake.tracker"], source="L4G", @@ -103,10 +107,6 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): created_by="L4G's Upload Assistant" ) - # Explicitly set the piece size and update metainfo - new_torrent.piece_size = 8388608 # 8 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set - # Validate and write the new torrent new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) From 8f6c84c2400418ced368481ecdccbbab3323441c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 10:03:49 +1000 Subject: [PATCH 125/741] Better handling of descriptions, especially images when already exist Also fix handling when ptp id is passed via argument --- src/prep.py | 138 +++++++++++++++++++++----------------------- src/trackers/PTP.py | 15 ++--- 2 files changed, 74 insertions(+), 79 deletions(-) diff --git a/src/prep.py b/src/prep.py index 1e9deb845..835e7675d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -90,19 +90,22 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met manual_key = f"{tracker_key}_manual" found_match = False + # console.print(f"[cyan]Starting update_metadata_from_tracker for: {tracker_name}[/cyan]") + # Handle each tracker separately if tracker_name == "BLU": + # console.print(f"[blue]Handling BLU tracker[/blue]") if meta.get(tracker_key) is not None: - meta[manual_key] = meta[tracker_key] - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}") + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( "BLU", tracker_instance.torrent_url, tracker_instance.search_url, id=meta[tracker_key] ) + # console.print(f"[blue]BLU search by ID complete[/blue]") if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: - console.print(f"[green]Valid data found on {tracker_name}, setting meta values") + console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): if blu_tmdb not in [None, '0']: meta['tmdb_manual'] = blu_tmdb @@ -116,27 +119,29 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['blu_desc'] = blu_desc if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): + if not meta.get('image_list'): # Only handle images if image_list is not already populated meta['image_list'] = blu_imagelist + if meta.get('image_list'): + await self.handle_image_list(meta, tracker_name) if blu_filename: meta['blu_filename'] = blu_filename # Store the filename in meta for later use found_match = True + console.print(f"[green]BLU data successfully updated in meta[/green]") else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") - await self.handle_image_list(meta, tracker_name) - return meta, found_match + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") else: - console.print(f"[yellow]No valid data found on {tracker_name}") + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") else: - # BLU tracker handling when tracker_key is not in meta + console.print(f"[yellow]No ID found in meta for BLU, searching by file name[/yellow]") blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( "BLU", tracker_instance.torrent_url, tracker_instance.search_url, file_name=search_term ) + # console.print(f"[blue]BLU search by file name complete[/blue]") if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: - console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values") + console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values[/green]") if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): if blu_tmdb not in [None, '0']: meta['tmdb_manual'] = blu_tmdb @@ -155,50 +160,61 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if blu_filename: meta['blu_filename'] = blu_filename found_match = True + console.print(f"[green]BLU data successfully updated in meta[/green]") else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") - await self.handle_image_list(meta, tracker_name) - return meta, found_match + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") else: - console.print(f"[yellow]No valid data found on {tracker_name}") + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") elif tracker_name == "PTP": - # Handle PTP separately to avoid duplication + # console.print(f"[blue]Handling PTP tracker[/blue]") + if meta.get('ptp') is None: - # Only fetch if not already in meta + # console.print(f"[yellow]No PTP ID in meta, searching by search term[/yellow]") imdb, ptp_torrent_id, meta['ext_torrenthash'] = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder) if ptp_torrent_id: meta['ptp'] = ptp_torrent_id meta['imdb'] = str(imdb).zfill(7) if imdb else None + else: + ptp_torrent_id = meta['ptp'] + console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") + imdb, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + if imdb: + meta['imdb'] = str(imdb).zfill(7) + console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") + else: + console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): - console.print(f"[green]{tracker_name} IMDb ID found: {meta['imdb']}") + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") found_match = True ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) if ptp_desc.strip(): meta['description'] = ptp_desc - meta['image_list'] = ptp_imagelist + if not meta.get('image_list'): # Only handle images if image_list is not already populated + meta['image_list'] = ptp_imagelist + if meta.get('image_list'): + await self.handle_image_list(meta, tracker_name) meta['skip_gen_desc'] = True - console.print(f"[green]PTP description and images added to metadata.") + console.print(f"[green]PTP description and images added to metadata.[/green]") if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): meta['skip_gen_desc'] = True found_match = True else: - console.print(f"[yellow]Description discarded from PTP") + console.print(f"[yellow]Description discarded from PTP[/yellow]") meta['skip_gen_desc'] = True meta['description'] = None - return meta, found_match else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") meta['skip_gen_desc'] = True - return meta, found_match elif tracker_name == "HDB": + # console.print(f"[blue]Handling HDB tracker[/blue]") if meta.get(tracker_key) is not None: meta[manual_key] = meta[tracker_key] - console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}") + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name @@ -206,7 +222,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta[tracker_key] = tracker_id found_match = True else: - # Handle HDB when tracker_key is not in meta + console.print(f"[yellow]No ID found in meta for HDB, searching by file name[/yellow]") imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name @@ -216,52 +232,20 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if found_match: if imdb or tvdb_id or hdb_name: - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}") + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") if await self.prompt_user_for_confirmation(f"Do you want to keep the data found on {tracker_name}?"): - console.print(f"[green]{tracker_name} data retained.") + console.print(f"[green]{tracker_name} data retained.[/green]") else: - console.print(f"[yellow]{tracker_name} data discarded.") + console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") meta[tracker_key] = None meta['tvdb_id'] = None meta['hdb_name'] = None found_match = False else: - console.print(f"[yellow]Could not find a matching release on {tracker_name}.") + console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") found_match = False - else: - # Handle other trackers if any - meta['imdb'], meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(meta.get(tracker_key)) - if meta['imdb']: - meta['imdb'] = str(meta['imdb']).zfill(7) - if await self.prompt_user_for_id_selection(imdb=meta['imdb']): - console.print(f"[green]{tracker_name} IMDb ID found: {meta['imdb']}") - found_match = True - - # Additional PTP handling if needed - if tracker_name == "PTP": - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) - if ptp_desc.strip(): - meta['description'] = ptp_desc - meta['image_list'] = ptp_imagelist - console.print(f"[green]PTP description and images added to metadata.") - - if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): - meta['skip_gen_desc'] = True - found_match = True - else: - console.print(f"[yellow]Description discarded from PTP") - meta['skip_gen_desc'] = True - meta['description'] = None - return meta, found_match - else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.") - meta['skip_gen_desc'] = True - return meta, found_match - else: - console.print(f"[yellow]No IMDb ID found on {tracker_name}") - # Handle image list at the end - await self.handle_image_list(meta, tracker_name) + console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") return meta, found_match async def handle_image_list(self, meta, tracker_name): @@ -401,26 +385,36 @@ async def gather_prep(self, meta, mode): # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") # Reuse information from trackers with fallback - if search_term: # Ensure there's a valid search term - found_match = False + found_match = False + + if search_term: + # console.print(f"[blue]Starting search with search_term: {search_term}[/blue]") if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") ptp = PTP(config=self.config) - # console.print(f"[cyan]Attempting to search PTP with search_term: {search_term}[/cyan]") - meta, found_match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") - if not found_match and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - # console.print(f"[cyan]Attempting to search HDB with search_term: {search_term}[/cyan]") + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") hdb = HDB(config=self.config) - meta, found_match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") - if not found_match and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - # console.print(f"[cyan]Attempting to search BLU with search_term: {search_term}[/cyan]") + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") blu = BLU(config=self.config) - meta, found_match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") + else: + console.print(f"[green]Match found: {found_match}[/green]") else: console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 7e4be9878..85fe032df 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -150,20 +150,21 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): if response.status_code == 200: response = response.json() imdb_id = response['ImdbId'] + ptp_infohash = None for torrent in response['Torrents']: if torrent.get('Id', 0) == str(ptp_torrent_id): ptp_infohash = torrent.get('InfoHash', None) - return imdb_id, ptp_infohash + return imdb_id, ptp_infohash, None elif int(response.status_code) in [400, 401, 403]: console.print(response.text) - return None, None + return None, None, None elif int(response.status_code) == 503: console.print("[bold yellow]PTP Unavailable (503)") - return None, None + return None, None, None else: - return None, None + return None, None, None except Exception: - return None, None + return None, None, None async def get_ptp_description(self, ptp_torrent_id, is_disc): params = { @@ -181,7 +182,7 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): await asyncio.sleep(1) ptp_desc = response.text - # console.print(f"[yellow]Raw description received:\n{ptp_desc[:500]}...") # Show first 500 characters for brevity + # console.print(f"[yellow]Raw description received:\n{ptp_desc[:3800]}...") # Show first 500 characters for brevity bbcode = BBCODE() desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) @@ -190,7 +191,7 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:500]}...") # Show first 500 characters for brevity # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit or discard the description?[/cyan]") + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") if edit_choice.lower() == 'e': From a677c55c9f8134134ba55b62c044a1f27e73c402 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 10:14:09 +1000 Subject: [PATCH 126/741] Fix passed HDB ID handling --- src/prep.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/prep.py b/src/prep.py index 835e7675d..b9aede7a9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -211,19 +211,23 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['skip_gen_desc'] = True elif tracker_name == "HDB": - # console.print(f"[blue]Handling HDB tracker[/blue]") - if meta.get(tracker_key) is not None: + if meta.get('hdb') is not None: meta[manual_key] = meta[tracker_key] console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + + # Use get_info_from_torrent_id function if ID is found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name - if tracker_id: - meta[tracker_key] = tracker_id found_match = True + else: console.print(f"[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + + # Use search_filename function if ID is not found in meta imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name if tracker_id: @@ -242,7 +246,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['hdb_name'] = None found_match = False else: - console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") + # console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") found_match = False console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") From f58a0158f001a8899e26de714049756fdb704324 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 11:04:36 +1000 Subject: [PATCH 127/741] Fix BLU ID searching Should have better description handling also --- src/bbcode.py | 77 +++++++++++++++++++++--------------------- src/trackers/COMMON.py | 65 +++++++++++++++++++++++++++++------ 2 files changed, 92 insertions(+), 50 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 0dfa3f352..7b736e490 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -156,16 +156,16 @@ def clean_ptp_description(self, desc, is_disc): return desc, imagelist def clean_unit3d_description(self, desc, site): - # Unescape html + # Unescape HTML desc = html.unescape(desc) - # End my suffering + # Replace carriage returns with newlines desc = desc.replace('\r\n', '\n') # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" site_url_tags = re.findall(site_regex, desc) - if site_url_tags != []: + if site_url_tags: for site_url_tag in site_url_tags: site_url_tag = ''.join(site_url_tag) url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" @@ -184,51 +184,50 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) - # Get Images from outside spoilers + # Get Images from [img] tags and remove them from the description imagelist = [] - url_tags = re.findall(r"\[url=[\s\S]*?\[\/url\]", desc) - if url_tags != []: - for tag in url_tags: - image = re.findall(r"\[img[\s\S]*?\[\/img\]", tag) - if len(image) == 1: - image_dict = {} - img_url = image[0].lower().replace('[img]', '').replace('[/img]', '') - image_dict['img_url'] = image_dict['raw_url'] = re.sub(r"\[img[\s\S]*\]", "", img_url) - url_tag = tag.replace(image[0], '') - image_dict['web_url'] = re.match(r"\[url=[\s\S]*?\]", url_tag, flags=re.IGNORECASE)[0].lower().replace('[url=', '')[:-1] - imagelist.append(image_dict) - desc = desc.replace(tag, '') - - # Remove bot signatures - bot_signature_regex = r"\[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\]Uploaded Using \[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] Auto Uploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]" - desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE) - desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + img_tags = re.findall(r"\[img[^\]]*\](.*?)\[/img\]", desc, re.IGNORECASE) + if img_tags: + for img_url in img_tags: + image_dict = { + 'img_url': img_url.strip(), + 'raw_url': img_url.strip(), + 'web_url': img_url.strip(), + } + imagelist.append(image_dict) + # Remove the [img] tag and its contents from the description + desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) - # Replace spoiler tags - if spoiler_placeholders != []: + # Restore spoiler tags + if spoiler_placeholders: for i, spoiler in enumerate(spoiler_placeholders): desc = desc.replace(f"SPOILER_PLACEHOLDER-{i} ", spoiler) - # Check for empty [center] tags + # Check for and clean up empty [center] tags centers = re.findall(r"\[center[\s\S]*?\[\/center\]", desc) - if centers != []: + if centers: for center in centers: - full_center = center - replace = ['[center]', ' ', '\n', '[/center]'] - for each in replace: - center = center.replace(each, '') - if center == "": - desc = desc.replace(full_center, '') + # If [center] contains only whitespace or empty tags, remove the entire tag + cleaned_center = re.sub(r'\[center\]\s*\[\/center\]', '', center) + cleaned_center = re.sub(r'\[center\]\s+', '[center]', cleaned_center) + cleaned_center = re.sub(r'\s*\[\/center\]', '[/center]', cleaned_center) + if cleaned_center == '[center][/center]': + desc = desc.replace(center, '') + else: + desc = desc.replace(center, cleaned_center.strip()) - # Convert Comparison spoilers to [comparison=] - desc = self.convert_collapse_to_comparison(desc, "spoiler", spoilers) + # Remove bot signatures + bot_signature_regex = r"\[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\]Uploaded Using \[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] Auto Uploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]" + desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) - # Strip blank lines: - desc = desc.strip('\n') - desc = re.sub("\n\n+", "\n\n", desc) - while desc.startswith('\n'): - desc = desc.replace('\n', '', 1) - desc = desc.strip('\n') + # Ensure no dangling tags and remove extra blank lines + desc = re.sub(r'\n\s*\n', '\n', desc) # Remove multiple consecutive blank lines + desc = re.sub(r'\n\n+', '\n\n', desc) # Ensure no excessive blank lines + desc = desc.strip() # Final cleanup of trailing newlines and spaces + + # Strip trailing whitespace and newlines: + desc = desc.rstrip() if desc.replace('\n', '') == '': return "", imagelist diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 1fbcd8767..f9f9c646c 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -149,23 +149,31 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f # Build the params for the API request params = {'api_token': self.config['TRACKERS'][tracker].get('api_key', '')} - # Determine the URL based on whether we're searching by ID or file name - if id: - url = f"{torrent_url}{id}" - console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow]") - elif file_name: + # Determine the URL based on whether we're searching by file name or ID + if file_name: url = f"{search_url}?file_name={file_name}" console.print(f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]") + elif id: + url = f"{torrent_url}{id}?" + console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow] via {url}") else: console.print("[red]No ID or file name provided for search.[/red]") return None, None, None, None, None, None, None, None, None response = requests.get(url=url, params=params) + # console.print(f"Requested URL: {response.url}") + # console.print(f"Status Code: {response.status_code}") try: - # console.print(f"[green]Raw response from {tracker}: {response.text}[/green]") - response = response.json() - data = response.get('data', []) + json_response = response.json() + # console.print(json_response) + except ValueError: + # console.print(f"Response Text: {response.text}") + return None, None, None, None, None, None, None, None, None + + try: + # Handle response when searching by file name (which might return a 'data' array) + data = json_response.get('data', []) if data: attributes = data[0].get('attributes', {}) @@ -178,7 +186,6 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f imdb = attributes.get('imdb_id') infohash = attributes.get('info_hash') - # Process the description and imagelist if the description exists if description: bbcode = BBCODE() description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) @@ -186,7 +193,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f console.print(f"[blue]Extracted description: [yellow]{description}") # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit or discard the description?[/cyan]") + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") if edit_choice.lower() == 'e': @@ -202,7 +209,43 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f else: console.print(f"[yellow]No description found for {tracker}.[/yellow]") else: - console.print(f"[yellow]No data found in the response for {tracker}.[/yellow]") + console.print(f"[yellow]No data found in the response for {tracker} when searching by file name.[/yellow]") + + # Handle response when searching by ID + if id and not data: + attributes = json_response.get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"[blue]Extracted description: [yellow]{description}") + + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}") + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print(f"[green]Keeping the original description.[/green]") + else: + console.print(f"[yellow]No description found for {tracker}.[/yellow]") except Exception as e: console.print_exception() From 0d2dc342f55049f1c49e2a29eaed00c91aa444e4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 11:33:03 +1000 Subject: [PATCH 128/741] Print the custom descriptions as some form of feedback --- src/prep.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index b9aede7a9..a47afd9f0 100644 --- a/src/prep.py +++ b/src/prep.py @@ -249,7 +249,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") found_match = False - console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") + # console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") return meta, found_match async def handle_image_list(self, meta, tracker_name): @@ -2978,7 +2978,7 @@ async def gen_desc(self, meta): if meta.get('ptp', None) is not None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: if meta.get('skip_gen_desc', False): - console.print("[cyan]Skipping description generation as PTP description was retained.") + console.print("[cyan]Something went wrong with PTP description.") return meta ptp = PTP(config=self.config) ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) @@ -3001,6 +3001,7 @@ async def gen_desc(self, meta): if template_desc.strip() != "": description.write(template_desc) description.write("\n") + console.print(f"[INFO] Description from template '{meta['desc_template']}' used:\n{template_desc}") if meta['nfo'] is not False: description.write("[code]") @@ -3009,6 +3010,7 @@ async def gen_desc(self, meta): description.write("[/code]") description.write("\n") meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from NFO file '{nfo}' used:\n{nfo_content}") if desclink is not None: parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) @@ -3021,17 +3023,20 @@ async def gen_desc(self, meta): description.write(requests.get(raw).text) description.write("\n") meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from link '{desclink}' used:\n{desclink_content}") if descfile is not None: if os.path.isfile(descfile): text = open(descfile, 'r').read() description.write(text) meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from file '{descfile}' used:\n{text}") if meta['desc'] is not None: description.write(meta['desc']) description.write("\n") meta['description'] = "CUSTOM" + console.print(f"[INFO] Custom description used:\n{meta['desc']}") description.write("\n") return meta From 780f0915c395b960d7026de841118a8228f81975 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 11:51:41 +1000 Subject: [PATCH 129/741] Strip those pesky bot images --- src/bbcode.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/bbcode.py b/src/bbcode.py index 7b736e490..421a8dce7 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -198,6 +198,13 @@ def clean_unit3d_description(self, desc, site): # Remove the [img] tag and its contents from the description desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) + # Filter out bot images from imagelist + bot_image_urls = [ + "https://blutopia.xyz/favicon.ico", # Example bot image URL + # Add any other known bot image URLs here + ] + imagelist = [img for img in imagelist if img['img_url'] not in bot_image_urls] + # Restore spoiler tags if spoiler_placeholders: for i, spoiler in enumerate(spoiler_placeholders): From 59d3b34520e24cf6cffe20f6845ece01bcb70005 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 12:12:40 +1000 Subject: [PATCH 130/741] Only auto search if tracker is default tracker --- src/prep.py | 49 +++++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/src/prep.py b/src/prep.py index a47afd9f0..902189aaf 100644 --- a/src/prep.py +++ b/src/prep.py @@ -120,8 +120,8 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() if not meta.get('image_list'): # Only handle images if image_list is not already populated - meta['image_list'] = blu_imagelist - if meta.get('image_list'): + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist await self.handle_image_list(meta, tracker_name) if blu_filename: meta['blu_filename'] = blu_filename # Store the filename in meta for later use @@ -393,27 +393,32 @@ async def gather_prep(self, meta, mode): if search_term: # console.print(f"[blue]Starting search with search_term: {search_term}[/blue]") + default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + found_match = False - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") - ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") - - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") - hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") - - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") - blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") + if "PTP" in default_trackers: + if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") + ptp = PTP(config=self.config) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") + + if "HDB" in default_trackers: + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") + hdb = HDB(config=self.config) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") + + if "BLU" in default_trackers: + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") + blu = BLU(config=self.config) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + found_match = found_match or match + # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") From b56392bc1802692ebdc25c948ebb7abd73e09284 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 12:18:37 +1000 Subject: [PATCH 131/741] Skip searching other sites if match accepted --- src/prep.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/prep.py b/src/prep.py index 902189aaf..0fa86306b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -394,31 +394,33 @@ async def gather_prep(self, meta, mode): if search_term: # console.print(f"[blue]Starting search with search_term: {search_term}[/blue]") default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") - found_match = False - if "PTP" in default_trackers: + if "PTP" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") ptp = PTP(config=self.config) meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") + if match: + found_match = True + # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") - if "HDB" in default_trackers: + if "HDB" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") + if match: + found_match = True + # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") - if "BLU" in default_trackers: + if "BLU" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") blu = BLU(config=self.config) meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - found_match = found_match or match - # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") + if match: + found_match = True + # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") From 9f538339c3e5165b0e502dd57a9d596a92fe54dd Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 12:37:34 +1000 Subject: [PATCH 132/741] Fix BLU image feedback --- src/prep.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 0fa86306b..f62e6eb4e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -122,7 +122,8 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if not meta.get('image_list'): # Only handle images if image_list is not already populated if blu_imagelist: # Ensure blu_imagelist is not empty before setting meta['image_list'] = blu_imagelist - await self.handle_image_list(meta, tracker_name) + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) if blu_filename: meta['blu_filename'] = blu_filename # Store the filename in meta for later use found_match = True @@ -155,8 +156,11 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['blu_desc'] = blu_desc if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): - meta['image_list'] = blu_imagelist + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) if blu_filename: meta['blu_filename'] = blu_filename found_match = True From 5d919321428f903f0a0091bfd37bf369dba01e39 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 13:03:48 +1000 Subject: [PATCH 133/741] Lint clean recent changes --- src/bbcode.py | 4 ++-- src/prep.py | 24 ++++++++++++------------ src/trackers/COMMON.py | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 421a8dce7..1afd03bb4 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -38,7 +38,7 @@ def __init__(self): def clean_ptp_description(self, desc, is_disc): # console.print(f"[yellow]Cleaning PTP description...") - + # Convert Bullet Points to - desc = desc.replace("•", "-") @@ -150,7 +150,7 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.strip('\n') if desc.replace('\n', '').strip() == '': - console.print(f"[yellow]Description is empty after cleaning.") + console.print("[yellow]Description is empty after cleaning.") return "", imagelist return desc, imagelist diff --git a/src/prep.py b/src/prep.py index f62e6eb4e..eb4155d41 100644 --- a/src/prep.py +++ b/src/prep.py @@ -127,13 +127,13 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if blu_filename: meta['blu_filename'] = blu_filename # Store the filename in meta for later use found_match = True - console.print(f"[green]BLU data successfully updated in meta[/green]") + console.print("[green]BLU data successfully updated in meta[/green]") else: console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") else: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") else: - console.print(f"[yellow]No ID found in meta for BLU, searching by file name[/yellow]") + console.print("[yellow]No ID found in meta for BLU, searching by file name[/yellow]") blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( "BLU", tracker_instance.torrent_url, @@ -164,7 +164,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if blu_filename: meta['blu_filename'] = blu_filename found_match = True - console.print(f"[green]BLU data successfully updated in meta[/green]") + console.print("[green]BLU data successfully updated in meta[/green]") else: console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") else: @@ -177,7 +177,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # console.print(f"[yellow]No PTP ID in meta, searching by search term[/yellow]") imdb, ptp_torrent_id, meta['ext_torrenthash'] = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder) if ptp_torrent_id: - meta['ptp'] = ptp_torrent_id + meta['ptp'] = ptp_torrent_id meta['imdb'] = str(imdb).zfill(7) if imdb else None else: ptp_torrent_id = meta['ptp'] @@ -201,13 +201,13 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if meta.get('image_list'): await self.handle_image_list(meta, tracker_name) meta['skip_gen_desc'] = True - console.print(f"[green]PTP description and images added to metadata.[/green]") + console.print("[green]PTP description and images added to metadata.[/green]") if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): meta['skip_gen_desc'] = True found_match = True else: - console.print(f"[yellow]Description discarded from PTP[/yellow]") + console.print("[yellow]Description discarded from PTP[/yellow]") meta['skip_gen_desc'] = True meta['description'] = None else: @@ -218,20 +218,20 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if meta.get('hdb') is not None: meta[manual_key] = meta[tracker_key] console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") - + # Use get_info_from_torrent_id function if ID is found in meta imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) - + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name found_match = True - + else: - console.print(f"[yellow]No ID found in meta for HDB, searching by file name[/yellow]") - + console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + # Use search_filename function if ID is not found in meta imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) - + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name if tracker_id: diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index f9f9c646c..d8edd0f7b 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -205,7 +205,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f description = None console.print("[yellow]Description discarded.[/yellow]") else: - console.print(f"[green]Keeping the original description.[/green]") + console.print("[green]Keeping the original description.[/green]") else: console.print(f"[yellow]No description found for {tracker}.[/yellow]") else: @@ -243,7 +243,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f description = None console.print("[yellow]Description discarded.[/yellow]") else: - console.print(f"[green]Keeping the original description.[/green]") + console.print("[green]Keeping the original description.[/green]") else: console.print(f"[yellow]No description found for {tracker}.[/yellow]") From 57e0b2b97eb9cce8dc3c0d92190d35861d9a151b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 15:59:01 +1000 Subject: [PATCH 134/741] More linting --- .flake8 | 2 +- .github/workflows/.flake8 | 2 +- cogs/commands.py | 2 +- data/example-config.py | 18 ++-- src/clients.py | 8 +- src/discparse.py | 32 +++---- src/prep.py | 126 ++++++++++++++-------------- src/search.py | 20 ++--- src/trackers/ACM.py | 8 +- src/trackers/AITHER.py | 6 +- src/trackers/AL.py | 117 +++++++++++++------------- src/trackers/BHD.py | 6 +- src/trackers/BHDTV.py | 6 +- src/trackers/BLU.py | 6 +- src/trackers/CBR.py | 6 +- src/trackers/COMMON.py | 2 +- src/trackers/FL.py | 57 +++++++------ src/trackers/FNP.py | 131 +++++++++++++---------------- src/trackers/HDB.py | 9 +- src/trackers/HDT.py | 106 +++++++++++------------- src/trackers/HP.py | 131 ++++++++++++++--------------- src/trackers/HUNO.py | 120 +++++++++++++-------------- src/trackers/JPTV.py | 142 ++++++++++++++------------------ src/trackers/LCD.py | 128 +++++++++++++--------------- src/trackers/LST.py | 6 +- src/trackers/LT.py | 139 ++++++++++++++----------------- src/trackers/MTV.py | 4 +- src/trackers/OE.py | 124 +++++++++++++--------------- src/trackers/OTW.py | 131 +++++++++++++---------------- src/trackers/PTP.py | 24 +++--- src/trackers/RF.py | 102 +++++++++++------------ src/trackers/UNIT3D_TEMPLATE.py | 10 +-- upload.py | 6 +- 33 files changed, 807 insertions(+), 930 deletions(-) diff --git a/.flake8 b/.flake8 index ac7343518..1116f5152 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 1000 \ No newline at end of file +ignore = F501 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 index 8a386a47b..68fb2c09f 100644 --- a/.github/workflows/.flake8 +++ b/.github/workflows/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 1000 +ignore = F501 diff --git a/cogs/commands.py b/cogs/commands.py index a02b7362c..373d24531 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -280,7 +280,7 @@ async def send_embed_and_upload(self, ctx, meta): channel = message.channel if meta['nohash'] is False: if meta.get('torrenthash', None) is not None: - reuse_torrent = await client.find_existing_torrent(meta) + reuse_torrent = await client.find_existing_torrent(meta) # noqa F821 if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) diff --git a/data/example-config.py b/data/example-config.py index 4bff88201..4160cfef4 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -225,7 +225,7 @@ "passkey": "HDB passkey", "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", "anon": False, - }, + }, "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" @@ -322,14 +322,14 @@ # "/data/dir2", # ] "discord_emojis": { - "BLU": "💙", - "BHD": "🎉", - "AITHER": "🛫", - "STC": "📺", - "ACM": "🍙", - "MANUAL": "📩", - "UPLOAD": "✅", - "CANCEL": "🚫" + "BLU": "💙", + "BHD": "🎉", + "AITHER": "🛫", + "STC": "📺", + "ACM": "🍙", + "MANUAL": "📩", + "UPLOAD": "✅", + "CANCEL": "🚫" } } } diff --git a/src/clients.py b/src/clients.py index a0cc51ea8..5972c8cd4 100644 --- a/src/clients.py +++ b/src/clients.py @@ -53,7 +53,7 @@ async def add_to_client(self, meta, tracker): await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta) elif torrent_client.lower() == "deluge": if meta['type'] == "DISC": - path = os.path.dirname(meta['path']) + path = os.path.dirname(meta['path']) # noqa F841 self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) elif torrent_client.lower() == "watch": shutil.copy(torrent_path, client['watch_folder']) @@ -343,7 +343,7 @@ def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, m client.connect() if client.connected is True: console.print("Connected to Deluge") - isdir = os.path.isdir(path) + isdir = os.path.isdir(path) # noqa F841 # Remote path mount if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path = path.replace(local_path, remote_path) @@ -395,8 +395,8 @@ def add_fast_resume(self, metainfo, datapath, torrent): priority=1, mtime=int(os.path.getmtime(filepath)), completed=( - (offset + fileinfo["length"] + piece_length - 1) // piece_length - - offset // piece_length + (offset + fileinfo["length"] + piece_length - 1) // piece_length - + offset // piece_length ), )) offset += fileinfo["length"] diff --git a/src/discparse.py b/src/discparse.py index 0a1cb28c0..b885e3261 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -132,9 +132,9 @@ def parse_bdinfo(self, bdinfo_input, files, path): else: three_dim = "" try: - bit_depth = split2[n+6].strip() - hdr_dv = split2[n+7].strip() - color = split2[n+8].strip() + bit_depth = split2[n + 6].strip() + hdr_dv = split2[n + 7].strip() + color = split2[n + 8].strip() except Exception: bit_depth = "" hdr_dv = "" @@ -142,15 +142,15 @@ def parse_bdinfo(self, bdinfo_input, files, path): bdinfo['video'].append({ 'codec': split2[0].strip(), 'bitrate': split2[1].strip(), - 'res': split2[n+2].strip(), - 'fps': split2[n+3].strip(), - 'aspect_ratio': split2[n+4].strip(), - 'profile': split2[n+5].strip(), + 'res': split2[n + 2].strip(), + 'fps': split2[n + 3].strip(), + 'aspect_ratio': split2[n + 4].strip(), + 'profile': split2[n + 5].strip(), 'bit_depth': bit_depth, 'hdr_dv': hdr_dv, 'color': color, '3d': three_dim, - }) + }) elif line.startswith("audio:"): if "(" in l: l = l.split("(")[0] @@ -170,12 +170,12 @@ def parse_bdinfo(self, bdinfo_input, files, path): bdinfo['audio'].append({ 'language': split2[0].strip(), 'codec': split2[1].strip(), - 'channels': split2[n+2].strip(), - 'sample_rate': split2[n+3].strip(), - 'bitrate': split2[n+4].strip(), + 'channels': split2[n + 2].strip(), + 'sample_rate': split2[n + 3].strip(), + 'bitrate': split2[n + 4].strip(), 'bit_depth': bit_depth, # Also DialNorm, but is not in use anywhere yet 'atmos_why_you_be_like_this': fuckatmos, - }) + }) elif line.startswith("disc title:"): title = l.split(':', 1)[1] bdinfo['title'] = title @@ -193,10 +193,10 @@ def parse_bdinfo(self, bdinfo_input, files, path): stripped = line.split() m2ts = {} bd_file = stripped[0] - time_in = stripped[1] + time_in = stripped[1] # noqa F841 bd_length = stripped[2] - bd_size = stripped[3] - bd_bitrate = stripped[4] + bd_size = stripped[3] # noqa F841 + bd_bitrate = stripped[4] # noqa F841 m2ts['file'] = bd_file m2ts['length'] = bd_length bdinfo['files'].append(m2ts) @@ -243,7 +243,7 @@ async def get_dvdinfo(self, discs): each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))/float(1 << 30) + size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) / float(1 << 30) if size <= 7.95: dvd_size = "DVD9" if size <= 4.37: diff --git a/src/prep.py b/src/prep.py index eb4155d41..ecf2d5806 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from src.args import Args from src.console import console -from src.exceptions import * +from src.exceptions import * # noqa: F403 from src.trackers.PTP import PTP from src.trackers.BLU import BLU from src.trackers.HDB import HDB @@ -315,7 +315,6 @@ async def gather_prep(self, meta, mode): meta['sd'] = self.is_sd(meta['resolution']) mi = None - mi_dump = None elif meta['is_disc'] == "DVD": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) @@ -542,7 +541,7 @@ async def get_disc(self, meta): is_disc = None videoloc = meta['path'] bdinfo = None - bd_summary = None + bd_summary = None # noqa: F841 discs = [] parse = DiscParse() for path, directories, files in os. walk(meta['path']): @@ -863,11 +862,11 @@ def mi_resolution(self, res, guess, width, scan, height, actual_height): "1280x540p": "720p", "1280x576p": "720p", "1024x576p": "576p", "576p": "576p", "1024x576i": "576i", "576i": "576i", - "854x480p": "480p", "480p": "480p", + "854x480p": "480p", "480p": "480p", "854x480i": "480i", "480i": "480i", "720x576p": "576p", "576p": "576p", "720x576i": "576i", "576i": "576i", - "720x480p": "480p", "480p": "480p", + "720x480p": "480p", "480p": "480p", "720x480i": "480i", "480i": "480i", "15360x8640p": "8640p", "8640p": "8640p", "7680x4320p": "4320p", "4320p": "4320p", @@ -1032,7 +1031,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": - length = float(track.duration)/1000 + length = float(track.duration)/1000 # noqa F841 par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) @@ -1112,50 +1111,50 @@ def _is_vob_good(n, loops, num_screens): return voblength, n else: return 300, n - try: - voblength, n = _is_vob_good(n, 0, num_screens) - img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) - ss_times = self.valid_ss_time(ss_times, num_screens+1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - self.optimize_images(image) - n += 1 - try: - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") - retake = True - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - elif self.img_host == "ptscreens": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) - looped = 0 - except Exception: - if looped >= 25: - console.print('[red]Failed to take screenshots') - exit() - looped += 1 - progress.advance(screen_task) + try: + voblength, n = _is_vob_good(n, 0, num_screens) + # img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) + ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) + ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + ( + ff + .output(image, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + except Exception: + console.print(traceback.format_exc()) + self.optimize_images(image) + n += 1 + try: + if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + i += 1 + elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + i += 1 + elif os.path.getsize(Path(image)) <= 75000: + console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") + retake = True + time.sleep(1) + elif self.img_host == "ptpimg": + i += 1 + elif self.img_host == "lensdump": + i += 1 + elif self.img_host == "ptscreens": + i += 1 + else: + console.print("[red]Image too large for your image host, retaking") + retake = True + time.sleep(1) + looped = 0 + except Exception: + if looped >= 25: + console.print('[red]Failed to take screenshots') + exit() + looped += 1 + progress.advance(screen_task) # remove smallest image smallest = "" smallestsize = 99**99 @@ -1291,7 +1290,7 @@ def valid_ss_time(self, ss_times, num_screens, length): while valid_time is not True: valid_time = True if ss_times != []: - sst = random.randint(round(length/5), round(length/2)) + sst = random.randint(round(length / 5), round(length / 2)) for each in ss_times: tolerance = length / 10 / num_screens if abs(sst - each) <= tolerance: @@ -1299,7 +1298,7 @@ def valid_ss_time(self, ss_times, num_screens, length): if valid_time is True: ss_times.append(sst) else: - ss_times.append(random.randint(round(length/5), round(length/2))) + ss_times.append(random.randint(round(length / 5), round(length / 2))) return ss_times def optimize_images(self, image): @@ -1797,7 +1796,7 @@ def get_audio_v2(self, mi, meta, bdinfo): dual = "Dual-Audio" elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): dual = "Dubbed" - except Exception as e: + except Exception: console.print(traceback.format_exc()) pass @@ -1855,7 +1854,7 @@ def get_audio_v2(self, mi, meta, bdinfo): "DTS-HD High": "DTS-HD HRA", "Free Lossless Audio Codec": "FLAC", "DTS-HD Master Audio": "DTS-HD MA" - } + } search_format = True # Ensure commercial and additional are not None before iterating @@ -1942,7 +1941,7 @@ def get_source(self, type, video, path, is_disc, meta): if track.track_type == "Video": system = track.standard if system not in ("PAL", "NTSC"): - raise WeirdSystem + raise WeirdSystem # noqa: F405 except Exception: try: other = guessit(video)['other'] @@ -2391,7 +2390,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i img_host = meta['imghost'] # Use the correctly updated image host from meta image_list = [] - newhost_list = [] if custom_img_list: image_glob = custom_img_list @@ -2499,7 +2497,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i break # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') + progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') # Add the image details to the list image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} @@ -2674,7 +2672,7 @@ async def get_season_episode(self, video, meta): if meta['anime'] is False: try: if meta.get('manual_date'): - raise ManualDateException + raise ManualDateException # noqa: F405 try: guess_year = guessit(video)['year'] except Exception: @@ -2780,7 +2778,7 @@ async def get_season_episode(self, video, meta): url = "https://thexem.info/map/single" response = requests.post(url, params=params).json() if response['result'] == "failure": - raise XEMNotFound + raise XEMNotFound # noqa: F405 if meta['debug']: console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") season_int = int(response['data']['scene']['season']) # Convert to integer @@ -2818,7 +2816,7 @@ async def get_season_episode(self, video, meta): season = f"S{str(season_int).zfill(2)}" difference = diff else: - raise XEMNotFound + raise XEMNotFound # noqa: F405 except Exception: if meta['debug']: console.print_exception() @@ -2908,7 +2906,7 @@ def get_service(self, video, tag, audio, guess_title): 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' - } + } video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: @@ -2972,7 +2970,7 @@ async def gen_desc(self, meta): desclink = meta.get('desclink', None) descfile = meta.get('descfile', None) - ptp_desc = blu_desc = "" + ptp_desc = "" desc_source = [] imagelist = [] with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: @@ -3021,7 +3019,7 @@ async def gen_desc(self, meta): description.write("[/code]") description.write("\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from NFO file '{nfo}' used:\n{nfo_content}") + console.print(f"[INFO] Description from NFO file '{nfo}' used:\n{nfo_content}") # noqa: F405 if desclink is not None: parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) @@ -3034,7 +3032,7 @@ async def gen_desc(self, meta): description.write(requests.get(raw).text) description.write("\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from link '{desclink}' used:\n{desclink_content}") + console.print(f"[INFO] Description from link '{desclink}' used:\n{desclink_content}") # noqa: F405 if descfile is not None: if os.path.isfile(descfile): diff --git a/src/search.py b/src/search.py index f8d7324c9..d658e175c 100644 --- a/src/search.py +++ b/src/search.py @@ -12,13 +12,13 @@ def __init__(self, config): pass async def searchFile(self, filename): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 filename = filename.lower() files_total = [] if filename == "": console.print("nothing entered") return - file_found = False + file_found = False # noqa F841 words = filename.split() async def search_file(search_dir): @@ -30,11 +30,11 @@ async def search_file(search_dir): l_name = name.lower() os_info = platform.platform() if await self.file_search(l_name, words): - file_found = True + file_found = True # noqa F841 if ('Windows' in os_info): - files_total_search.append(root+'\\'+name) + files_total_search.append(root + '\\' + name) else: - files_total_search.append(root+'/'+name) + files_total_search.append(root + '/' + name) return files_total_search config_dir = self.config['DISCORD']['search_dir'] if isinstance(config_dir, list): @@ -46,13 +46,13 @@ async def search_file(search_dir): return files_total async def searchFolder(self, foldername): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 foldername = foldername.lower() folders_total = [] if foldername == "": console.print("nothing entered") return - folders_found = False + folders_found = False # noqa F841 words = foldername.split() async def search_dir(search_dir): @@ -66,11 +66,11 @@ async def search_dir(search_dir): os_info = platform.platform() if await self.file_search(l_name, words): - folder_found = True + folder_found = True # noqa F841 if ('Windows' in os_info): - folders_total_search.append(root+'\\'+name) + folders_total_search.append(root + '\\' + name) else: - folders_total_search.append(root+'/'+name) + folders_total_search.append(root + '/' + name) return folders_total_search config_dir = self.config['DISCORD']['search_dir'] diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 18a5fc7df..cbf191477 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -32,7 +32,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type(self, meta): @@ -84,7 +84,7 @@ async def get_type_id(self, type): 'SDTV': '13', 'DVD 9': '16', 'HDTV': '17' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -97,7 +97,7 @@ async def get_res_id(self, resolution): '576i': '4', '480p': '5', '480i': '5' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id # ACM rejects uploads with more that 4 keywords @@ -304,7 +304,7 @@ async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') source = meta.get('source') is_disc = meta.get('is_disc') diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index a9d485699..7436862a7 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -133,7 +133,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -144,7 +144,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -160,7 +160,7 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def search_existing(self, meta): diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 4aecdd66f..cadd0dbca 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -28,22 +27,22 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '1') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '1') return category_id async def get_type_id(self, type): type_id = { 'BDMV': '1', - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'DVDISO': '7', 'DVDRIP': '8', @@ -51,23 +50,23 @@ async def get_type_id(self, type): 'BDRIP': '10', 'COLOR': '11', 'MONO': '12' - }.get(type, '1') + }.get(type, '1') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def upload(self, meta): @@ -80,12 +79,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -95,34 +94,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -134,18 +133,18 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -153,12 +152,12 @@ async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -170,7 +169,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -178,5 +177,5 @@ async def search_existing(self, meta): # Got this from CBR and changed the encoding rename async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","x264").replace("H 265","x265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") - return name \ No newline at end of file + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + return name diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 66dfbaa61..25bea57c3 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -53,7 +53,7 @@ async def upload(self, meta): torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { 'mediainfo': mi_dump, - } + } if os.path.exists(torrent_file): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files['file'] = open_torrent.read() @@ -123,7 +123,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '1') + }.get(category_name, '1') return category_id async def get_source(self, source): @@ -135,7 +135,7 @@ async def get_source(self, source): "Web": "WEB", "HDTV": "HDTV", "UHDTV": "HDTV", - "NTSC": "DVD", "NTSC DVD": "DVD", + "NTSC": "DVD", "NTSC DVD": "DVD", "PAL": "DVD", "PAL DVD": "DVD", } diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index 7d7969067..d675fdaa5 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -51,7 +51,7 @@ async def upload(self, meta): str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) is False: anon = 0 else: - anon = 1 + anon = 1 # noqa F841 if meta['bdinfo'] is not None: mi_dump = None @@ -122,7 +122,7 @@ async def get_cat_id(self, meta): async def get_type_movie_id(self, meta): type_id = '0' - test = meta['type'] + test = meta['type'] # noqa F841 if meta['type'] == 'DISC': if meta['3D']: type_id = '46' @@ -181,7 +181,7 @@ async def get_res_id(self, resolution): '1080p': '3', '1080i': '2', '720p': '1' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_desc(self, meta): diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 571e8451d..54509c9b7 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -123,7 +123,7 @@ async def get_cat_id(self, category_name, edition): 'MOVIE': '1', 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' return category_id @@ -136,7 +136,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '12' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -152,7 +152,7 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def derived_dv_layer(self, meta): diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 2c26c0897..8a58fd0b8 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -112,7 +112,7 @@ async def get_cat_id(self, category_name, edition, meta): 'MOVIE': '1', 'TV': '2', 'ANIMES': '4' - }.get(category_name, '0') + }.get(category_name, '0') if meta['anime'] is True and category_id == '2': category_id = '4' return category_id @@ -125,7 +125,7 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -140,7 +140,7 @@ async def get_res_id(self, resolution): '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def search_existing(self, meta): diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index d8edd0f7b..b46b12e2d 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -143,7 +143,7 @@ async def unit3d_distributor_ids(self, distributor): return distributor_id async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): - tmdb = imdb = tvdb = description = category = infohash = mal = files = None + tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 imagelist = [] # Build the params for the API request diff --git a/src/trackers/FL.py b/src/trackers/FL.py index d2cd9ae31..a7c67d028 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -11,7 +11,7 @@ from bs4 import BeautifulSoup from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console @@ -64,10 +64,10 @@ async def get_category_id(self, meta): # 2 = DVD cat_id = 2 if has_ro_sub: - # 3 = DVD + RO + # 3 = DVD + RO cat_id = 3 - if meta.get('anime', False) == True: + if meta.get('anime', False) is True: # 24 = Anime cat_id = 24 return cat_id @@ -98,11 +98,10 @@ async def edit_name(self, meta): fl_name = ' '.join(fl_name.split()) fl_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", fl_name) fl_name = fl_name.replace(' ', '.').replace('..', '.') - return fl_name - + return fl_name ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 ############################################################### async def upload(self, meta): @@ -115,9 +114,9 @@ async def upload(self, meta): # Confirm the correct naming order for FL cli_ui.info(f"Filelist name: {fl_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: fl_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if fl_confirm != True: + if fl_confirm is not True: fl_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if fl_name_manually == "": console.print('No proper name given') @@ -128,10 +127,10 @@ async def upload(self, meta): # Torrent File Naming # Note: Don't Edit .torrent filename after creation, SubsPlease anime releases (because of their weird naming) are an exception - if meta.get('anime', True) == True and meta.get('tag', '') == '-SubsPlease': + if meta.get('anime', True) is True and meta.get('tag', '') == '-SubsPlease': torrentFileName = fl_name else: - if meta.get('isdir', False) == False: + if meta.get('isdir', False) is False: torrentFileName = meta.get('uuid') torrentFileName = os.path.splitext(torrentFileName)[0] else: @@ -140,26 +139,26 @@ async def upload(self, meta): # Download new .torrent from site fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(torrentFileName) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'name' : fl_name, - 'type' : cat_id, - 'descr' : fl_desc.strip(), - 'nfo' : mi_dump + 'name': fl_name, + 'type': cat_id, + 'descr': fl_desc.strip(), + 'nfo': mi_dump } if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' @@ -192,7 +191,7 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return async def search_existing(self, meta): @@ -201,8 +200,8 @@ async def search_existing(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) - - search_url = f"https://filelist.io/browse.php" + + search_url = "https://filelist.io/browse.php" if int(meta['imdb_id'].replace('tt', '')) != 0: params = { 'search': meta['imdb_id'], @@ -215,7 +214,7 @@ async def search_existing(self, meta): 'cat': await self.get_category_id(meta), 'searchin': '0' } - + r = session.get(search_url, params=params) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') @@ -231,10 +230,10 @@ async def validate_credentials(self, meta): if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -267,7 +266,7 @@ async def login(self, cookiefile): r = session.get("https://filelist.io/login.php") await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - validator = soup.find('input', {'name' : 'validator'}).get('value') + validator = soup.find('input', {'name': 'validator'}).get('value') data = { 'validator': validator, 'username': self.username, @@ -314,7 +313,7 @@ async def edit_desc(self, meta): if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { - 'mediainfo' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), + 'mediainfo': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), } if int(meta['imdb_id'].replace('tt', '')) != 0: data['imdbURL'] = f"tt{meta['imdb_id']}" @@ -327,10 +326,10 @@ async def edit_desc(self, meta): else: # BD Description Generator final_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_EXT.txt", 'r', encoding='utf-8').read() - if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it + if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it final_desc = final_desc.replace('[/pre][/quote]', f'[/pre][/quote]\n\n{desc}\n', 1) final_desc = final_desc.replace('DISC INFO:', '[pre][quote=BD_Info][b][color=#FF0000]DISC INFO:[/color][/b]').replace('PLAYLIST REPORT:', '[b][color=#FF0000]PLAYLIST REPORT:[/color][/b]').replace('VIDEO:', '[b][color=#FF0000]VIDEO:[/color][/b]').replace('AUDIO:', '[b][color=#FF0000]AUDIO:[/color][/b]').replace('SUBTITLES:', '[b][color=#FF0000]SUBTITLES:[/color][/b]') - final_desc += "[/pre][/quote]\n" # Closed bbcode tags + final_desc += "[/pre][/quote]\n" # Closed bbcode tags # Upload screens and append to the end of the description url = "https://up.img4k.net/api/description" screen_glob = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['filename']}-*.png") @@ -341,10 +340,10 @@ async def edit_desc(self, meta): final_desc += response.text.replace('\r\n', '\n') descfile.write(final_desc) - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - + async def get_ro_tracks(self, meta): has_ro_audio = has_ro_sub = False if meta.get('is_disc', '') != 'BDMV': diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 8c7ecd0fe..4608925ce 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -3,7 +3,6 @@ import asyncio import requests from str2bool import str2bool -import os import platform from src.trackers.COMMON import COMMON @@ -19,60 +18,50 @@ class FNP(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'FNP' self.source_flag = 'FnP' self.upload_url = 'https://fearnopeer.com/api/torrents/upload' self.search_url = 'https://fearnopeer.com/api/torrents/filter' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -82,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,34 +86,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -136,35 +125,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +161,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 4a3ef86ff..97e8710dd 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -8,11 +8,12 @@ from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console -from datetime import datetime, date +from datetime import datetime from torf import Torrent + class HDB(): def __init__(self, config): @@ -90,7 +91,7 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_tags(self, meta): @@ -313,7 +314,7 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return async def search_existing(self, meta): diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 6bcf76964..a46abb831 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -2,22 +2,18 @@ import asyncio import re import os -import json -import glob import cli_ui -import pickle -from pathlib import Path from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo - from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console + class HDT(): - + def __init__(self, config): self.config = config self.tracker = 'HDT' @@ -26,7 +22,7 @@ def __init__(self, config): self.password = config['TRACKERS'][self.tracker].get('password', '').strip() self.signature = None self.banned_groups = [""] - + async def get_category_id(self, meta): if meta['category'] == 'MOVIE': # BDMV @@ -37,7 +33,7 @@ async def get_category_id(self, meta): if meta['resolution'] in ('1080p', '1080i'): # 1 = Movie/Blu-Ray cat_id = 1 - + # REMUX if meta.get('type', '') == 'REMUX': if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': @@ -46,7 +42,7 @@ async def get_category_id(self, meta): else: # 2 = Movie/Remux cat_id = 2 - + # REST OF THE STUFF if meta.get('type', '') not in ("DISC", "REMUX"): if meta['resolution'] == '2160p': @@ -68,7 +64,7 @@ async def get_category_id(self, meta): if meta['resolution'] in ('1080p', '1080i'): # 59 = TV Show/Blu-ray cat_id = 59 - + # REMUX if meta.get('type', '') == 'REMUX': if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': @@ -77,7 +73,7 @@ async def get_category_id(self, meta): else: # 60 = TV Show/Remux cat_id = 60 - + # REST OF THE STUFF if meta.get('type', '') not in ("DISC", "REMUX"): if meta['resolution'] == '2160p': @@ -89,11 +85,8 @@ async def get_category_id(self, meta): elif meta['resolution'] == '720p': # 38 = TV Show/720p cat_id = 38 - - return cat_id - - + return cat_id async def edit_name(self, meta): hdt_name = meta['name'] @@ -103,14 +96,14 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1)) if 'DV' in meta.get('hdr', ''): hdt_name = hdt_name.replace(' DV ', ' DoVi ') - + hdt_name = ' '.join(hdt_name.split()) hdt_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", hdt_name) hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 ############################################################### async def upload(self, meta): @@ -122,9 +115,9 @@ async def upload(self, meta): # Confirm the correct naming order for HDT cli_ui.info(f"HDT name: {hdt_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: hdt_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if hdt_confirm != True: + if hdt_confirm is not True: hdt_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if hdt_name_manually == "": console.print('No proper name given') @@ -132,7 +125,7 @@ async def upload(self, meta): return else: hdt_name = hdt_name_manually - + # Upload hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" @@ -140,18 +133,18 @@ async def upload(self, meta): with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(hdt_name) files = { - 'torrent' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'torrent': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'filename' : hdt_name, - 'category' : cat_id, - 'info' : hdt_desc.strip() + 'filename': hdt_name, + 'category': cat_id, + 'info': hdt_desc.strip() } # 3D if "3D" in meta.get('3d', ''): data['3d'] = 'true' - + # HDR if "HDR" in meta.get('hdr', ''): if "HDR10+" in meta['hdr']: @@ -161,19 +154,19 @@ async def upload(self, meta): data['HDR10'] = 'true' if "DV" in meta.get('hdr', ''): data['DolbyVision'] = 'true' - + # IMDB if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['infosite'] = f"https://www.imdb.com/title/tt{meta['imdb_id']}/" - + # Full Season Pack if int(meta.get('tv_pack', '0')) != 0: data['season'] = 'true' else: data['season'] = 'false' - + # Anonymous check - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' @@ -200,35 +193,34 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - - + async def search_existing(self, meta): dupes = [] with requests.Session() as session: common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") session.cookies.update(await common.parseCookieFile(cookiefile)) - - search_url = f"https://hd-torrents.org/torrents.php" + + search_url = "https://hd-torrents.org/torrents.php" csrfToken = await self.get_csrfToken(session, search_url) if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'csrfToken' : csrfToken, - 'search' : meta['imdb_id'], - 'active' : '0', - 'options' : '2', - 'category[]' : await self.get_category_id(meta) + 'csrfToken': csrfToken, + 'search': meta['imdb_id'], + 'active': '0', + 'options': '2', + 'category[]': await self.get_category_id(meta) } else: params = { - 'csrfToken' : csrfToken, - 'search' : meta['title'], - 'category[]' : await self.get_category_id(meta), - 'options' : '3' + 'csrfToken': csrfToken, + 'search': meta['title'], + 'category[]': await self.get_category_id(meta), + 'options': '3' } - + r = session.get(search_url, params=params) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') @@ -236,19 +228,17 @@ async def search_existing(self, meta): for each in find: if each['href'].startswith('details.php?id='): dupes.append(each.text) - + return dupes - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site') return False return True - - + async def validate_cookies(self, meta, cookiefile): common = COMMON(config=self.config) url = "https://hd-torrents.org/index.php" @@ -267,10 +257,8 @@ async def validate_cookies(self, meta, cookiefile): return False else: return False - - - """ + """ Old login method, disabled because of site's DDOS protection. Better to use exported cookies. @@ -299,14 +287,13 @@ async def login(self, cookiefile): return """ - async def get_csrfToken(self, session, url): r = session.get(url) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - csrfToken = soup.find('input', {'name' : 'csrfToken'}).get('value') + csrfToken = soup.find('input', {'name': 'csrfToken'}).get('value') return csrfToken - + async def edit_desc(self, meta): # base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: @@ -315,18 +302,18 @@ async def edit_desc(self, meta): video = meta['filelist'][0] mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform" : f"file://{mi_template}"}) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) descfile.write(f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""") else: console.print("[bold red]Couldn't find the MediaInfo template") console.print("[green]Using normal MediaInfo for the description.") - + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: descfile.write(f"""[left][font=consolas]\n{MI.read()}\n[/font][/left]\n\n""") else: with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as BD_SUMMARY: descfile.write(f"""[left][font=consolas]\n{BD_SUMMARY.read()}\n[/font][/left]\n\n""") - + # Add Screenshots images = meta['image_list'] if len(images) > 0: @@ -336,4 +323,3 @@ async def edit_desc(self, meta): descfile.write(f' ') descfile.close() - diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 7c11e0744..3250213be 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,12 +2,12 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON -from src.console import console +from src.console import console + class HP(): """ @@ -18,9 +18,6 @@ class HP(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'HP' @@ -30,45 +27,41 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -78,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -93,34 +86,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -132,41 +125,37 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] - + try: response = requests.get(url=self.search_url, params=params) response = response.json() @@ -175,8 +164,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index ee13c0338..9d80b2c27 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool import os import re @@ -11,6 +10,7 @@ from src.trackers.COMMON import COMMON from src.console import console + class HUNO(): """ Edit for Tracker: @@ -29,7 +29,6 @@ def __init__(self, config): self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass - async def upload(self, meta): common = COMMON(config=self.config) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -37,17 +36,17 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) is False: anon = 0 else: anon = 1 # adding logic to check if its an encode or webrip and not HEVC as only HEVC encodes and webrips are allowed if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): - console.print(f'[bold red]Only x265/HEVC encodes are allowed') + console.print('[bold red]Only x265/HEVC encodes are allowed') return - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -57,22 +56,22 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : await self.get_name(meta), - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : await self.is_plex_friendly(meta), - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': await self.get_name(meta), + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': await self.is_plex_friendly(meta), + 'sd': meta['sd'], + 'keywords': meta['keywords'], 'season_pack': meta.get('tv_pack', 0), # 'featured' : 0, # 'free' : 0, @@ -95,18 +94,18 @@ async def upload(self, meta): 'api_token': tracker_config['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) # adding torrent link to comment of torrent file t_id = response.json()['data'].split(".")[1].split("/")[3] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hawke.uno/torrents/" + t_id) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -124,7 +123,7 @@ def get_audio(self, meta): if language == "zxx": language = "Silent" return f'{codec} {channels} {language}' - + def get_basename(self, meta): path = next(iter(meta['filelist']), meta['path']) return os.path.basename(path) @@ -136,8 +135,8 @@ async def get_name(self, meta): basename = self.get_basename(meta) hc = meta.get('hardcoded-subs') type = meta.get('type', "") - title = meta.get('title',"") - alt_title = meta.get('aka', "") + title = meta.get('title', "") + alt_title = meta.get('aka', "") # noqa F841 year = meta.get('year', "") resolution = meta.get('resolution', "") audio = self.get_audio(meta) @@ -156,7 +155,7 @@ async def get_name(self, meta): hdr = meta.get('hdr', "") if not hdr.strip(): hdr = "SDR" - distributor = meta.get('distributor', "") + distributor = meta.get('distributor', "") # noqa F841 video_codec = meta.get('video_codec', "") video_encode = meta.get('video_encode', "").replace(".", "") if 'x265' in basename: @@ -170,57 +169,55 @@ async def get_name(self, meta): search_year = year scale = "DS4K" if "DS4K" in basename.upper() else "RM4K" if "RM4K" in basename.upper() else "" - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({year}) {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'DVD': name = f"{title} ({year}) {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({year}) {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux + elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux name = f"{title} ({year}) {edition} (DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "ENCODE": #Encode + elif type == "ENCODE": # Encode name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({year}) {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" if meta['is_disc'] == 'DVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux - name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "ENCODE": #Encode - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type == "REMUX" and source == "BluRay": # BluRay Remux + name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "ENCODE": # Encode + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" if hc: name = re.sub(r'((\([0-9]{4}\)))', r'\1 Ensubbed', name) return ' '.join(name.split()).replace(": ", " - ") - async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id - async def get_type_id(self, meta): basename = self.get_basename(meta) type = meta['type'] @@ -236,43 +233,40 @@ async def get_type_id(self, meta): else: return '0' - async def get_res_id(self, resolution): resolution_id = { - 'Other':'10', + 'Other': '10', '4320p': '1', '2160p': '2', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - async def is_plex_friendly(self, meta): lossy_audio_codecs = ["AAC", "DD", "DD+", "OPUS"] - if any(l in meta["audio"] for l in lossy_audio_codecs): + if any(l in meta["audio"] for l in lossy_audio_codecs): # noqa E741 return 1 return 0 - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS']['HUNO']['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -286,7 +280,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 06253aebe..1502d8787 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,12 +18,6 @@ class JPTV(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'JPTV' @@ -34,54 +27,46 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, meta): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(meta['category'], '0') if meta['anime']: category_id = { - 'MOVIE': '7', - 'TV': '9', + 'MOVIE': '7', + 'TV': '9', }.get(meta['category'], '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '16', + 'DISC': '16', 'REMUX': '18', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - # DVDISO 17 - # DVDRIP 1 - # TS (Raw) 14 - # Re-encode 15 + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -92,12 +77,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = "" for each in meta['discs']: mi_dump = mi_dump + each['summary'].strip() + "\n\n" @@ -108,34 +93,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : jptv_name, - 'description' : desc, - 'mediainfo' : mi_dump, - # 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': jptv_name, + 'description': desc, + 'mediainfo': mi_dump, + # 'bdinfo' : bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -147,35 +132,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -190,21 +171,20 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes - async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') - source = meta.get('source') - is_disc = meta.get('is_disc') + source = meta.get('source') # noqa F841 + is_disc = meta.get('is_disc') # noqa F841 if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' @@ -217,4 +197,4 @@ async def edit_name(self, meta): name = name.replace(audio.strip().replace(" ", " "), audio.replace(" ", "")) name = name.replace("DD+ ", "DD+") - return name \ No newline at end of file + return name diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 8e02f8d77..6ca5cac61 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -10,7 +9,6 @@ from src.console import console - class LCD(): """ Edit for Tracker: @@ -25,11 +23,11 @@ def __init__(self, config): self.source_flag = 'LOCADORA' self.search_url = 'https://locadora.cc/api/torrents/filter' self.torrent_url = 'https://locadora.cc/api/torrents/' - self.upload_url = 'https://locadora.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.upload_url = 'https://locadora.cc/api/torrents/upload' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -40,12 +38,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,31 +53,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -96,74 +94,67 @@ async def upload(self, meta): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition, meta): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', 'ANIMES': '6' - }.get(category_name, '0') - if meta['anime'] == True and category_id == '2': + }.get(category_name, '0') + if meta['anime'] is True and category_id == '2': category_id = '6' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { -# '8640p':'10', - '4320p': '1', - '2160p': '2', -# '1440p' : '2', + # '8640p':'10', + '4320p': '1', + '2160p': '2', + # '1440p' : '2', '1080p': '3', - '1080i':'34', - '720p': '5', - '576p': '6', + '1080i': '34', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -177,15 +168,14 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Não foi possivel buscar no tracker torrents duplicados. O tracker está offline ou sua api está incorreta') await asyncio.sleep(5) return dupes async def edit_name(self, meta): - - - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.264").replace("DD+7 1","DD+7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('-C A A','-C.A.A') - + + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), + return name diff --git a/src/trackers/LST.py b/src/trackers/LST.py index bf25df687..ff56ad739 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -35,7 +35,7 @@ async def get_cat_id(self, category_name, keywords, service): 'MOVIE': '1', 'TV': '2', 'Anime': '6', - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'TV' and 'anime' in keywords: category_id = '6' elif category_name == 'TV' and 'hentai' in service: @@ -50,7 +50,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -66,7 +66,7 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def upload(self, meta): diff --git a/src/trackers/LT.py b/src/trackers/LT.py index b5965c843..367428636 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,12 +18,6 @@ class LT(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LT' @@ -34,57 +27,57 @@ def __init__(self, config): self.signature = '' self.banned_groups = [""] pass - + async def get_cat_id(self, category_name, meta): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', 'ANIME': '5', 'TELENOVELAS': '8', - 'Doramas & Turcas': '20', - }.get(category_name, '0') - #if is anime - if meta['anime'] == True and category_id == '2': + 'Doramas & Turcas': '20', + }.get(category_name, '0') + # if is anime + if meta['anime'] is True and category_id == '2': category_id = '5' - #elif is telenovela + # elif is telenovela elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): category_id = '8' - #if is TURCAS o Doramas - #elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : - #category_id = '20' + # if is TURCAS o Doramas + # elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : + # category_id = '20' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_name(self, meta): lt_name = meta['name'].replace('Dubbed', '').replace('Dual-Audio', '').replace(' ', ' ').strip() # Check if audio Spanish exists, if not append [SUBS] at the end - if meta['type'] != 'DISC': #DISC don't have mediainfo + if meta['type'] != 'DISC': # DISC don't have mediainfo audio_language_list = meta['mediainfo']['media']['track'][0].get('Audio_Language_List', '') if 'Spanish' not in audio_language_list and '[SUBS]' not in lt_name: if not meta['tag']: @@ -93,11 +86,6 @@ async def edit_name(self, meta): lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") return lt_name - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -105,15 +93,15 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - #region_id = await common.unit3d_region_ids(meta.get('region')) + # region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -123,35 +111,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : lt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': lt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - if distributor_id != 0: data['distributor_id'] = distributor_id if meta.get('category') == "TV": @@ -161,33 +148,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -201,7 +186,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index b630cc779..656a59f0c 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -303,7 +303,7 @@ async def get_res_id(self, resolution): '576i': '0', '480p': '480', '480i': '480' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_cat_id(self, meta): @@ -344,7 +344,7 @@ async def get_source_id(self, meta): 'MIXED': '11', 'Unknown': '12', 'ENCODE': '7' - }.get(meta['type'], '0') + }.get(meta['type'], '0') return type_id async def get_origin_id(self, meta): diff --git a/src/trackers/OE.py b/src/trackers/OE.py index a9f6c6d09..a1b4d3156 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -2,15 +2,13 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class OE(): """ Edit for Tracker: @@ -25,10 +23,10 @@ def __init__(self, config): self.source_flag = 'OE' self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -37,11 +35,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -51,34 +49,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : oe_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': oe_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') @@ -88,41 +86,39 @@ async def upload(self, meta): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): oe_name = meta.get('name') return oe_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, video_codec, category): type_id = { - 'DISC': '19', + 'DISC': '19', 'REMUX': '20', 'WEBDL': '21', - }.get(type, '0') - if type == "WEBRIP": + }.get(type, '0') + if type == "WEBRIP": if video_codec == "HEVC": # x265 Encode type_id = '10' @@ -146,34 +142,30 @@ async def get_type_id(self, type, tv_pack, video_codec, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -185,8 +177,8 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 8834f5510..8c12ccd22 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -3,7 +3,6 @@ import asyncio import requests from str2bool import str2bool -import os import platform from src.trackers.COMMON import COMMON @@ -19,60 +18,50 @@ class OTW(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'OTW' self.source_flag = 'OTW' self.upload_url = 'https://oldtoons.world/api/torrents/upload' self.search_url = 'https://oldtoons.world/api/torrents/filter' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -82,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,34 +86,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -136,35 +125,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +161,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 85fe032df..e710769a3 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -14,7 +14,7 @@ from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON from src.bbcode import BBCODE -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console from torf import Torrent from datetime import datetime @@ -33,7 +33,7 @@ def __init__(self, config): self.password = config['TRACKERS']['PTP'].get('password', '').strip() self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', + self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY',] @@ -187,8 +187,8 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): bbcode = BBCODE() desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) - console.print(f"[bold green]Successfully grabbed description from PTP") - console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:500]}...") # Show first 500 characters for brevity + console.print("[bold green]Successfully grabbed description from PTP") + console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity # Allow user to edit or discard the description console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") @@ -203,7 +203,7 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): desc = None console.print("[yellow]Description discarded.[/yellow]") else: - console.print(f"[green]Keeping the original description.[/green]") + console.print("[green]Keeping the original description.[/green]") return desc, imagelist @@ -713,14 +713,14 @@ async def get_AntiCsrfToken(self, meta): resp = loginresponse.json() try: if resp["Result"] != "Ok": - raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") + raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") # noqa F405 AntiCsrfToken = resp["AntiCsrfToken"] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 return AntiCsrfToken async def validate_login(self, response): @@ -728,7 +728,7 @@ async def validate_login(self, response): if response.text.find("""""") != -1: console.print("Looks like you are not logged in to PTP. Probably due to the bad user name, password, or expired session.") elif "Your popcorn quota has been reached, come back later!" in response.text: - raise LoginException("Your PTP request/popcorn quota has been reached, try again later") + raise LoginException("Your PTP request/popcorn quota has been reached, try again later") # noqa F405 else: loggedIn = True return loggedIn @@ -760,7 +760,7 @@ async def fill_upload_form(self, groupID, meta): "subtitles[]": ptp_subtitles, "trumpable[]": ptp_trumpable, "AntiCsrfToken": await self.get_AntiCsrfToken(meta) - } + } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" if resolution == "Other": @@ -887,11 +887,11 @@ async def upload(self, meta, url, data): if match is not None: errorMessage = match.group(1) - raise UploadException(f"Upload to PTP failed: {errorMessage} ({response.status_code}). (We are still on the upload page.)") + raise UploadException(f"Upload to PTP failed: {errorMessage} ({response.status_code}). (We are still on the upload page.)") # noqa F405 # URL format in case of successful upload: https://passthepopcorn.me/torrents.php?id=9329&torrentid=91868 match = re.match(r".*?passthepopcorn\.me/torrents\.php\?id=(\d+)&torrentid=(\d+)", response.url) if match is None: console.print(url) console.print(data) - raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") + raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") # noqa F405 diff --git a/src/trackers/RF.py b/src/trackers/RF.py index aa108340d..1019c2253 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console + class RF(): """ Edit for Tracker: @@ -18,19 +18,16 @@ class RF(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RF' self.source_flag = 'ReelFliX' self.upload_url = 'https://reelflix.xyz/api/torrents/upload' self.search_url = 'https://reelflix.xyz/api/torrents/filter' - self.forum_link = "\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by Upload Assistant[/url][/center]" + self.forum_link = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -41,11 +38,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,34 +52,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : stt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -95,20 +92,18 @@ async def upload(self, meta): } if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stt_name = meta['name'] return stt_name @@ -116,7 +111,7 @@ async def edit_name(self, meta): async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -125,10 +120,10 @@ async def get_type_id(self, type): 'REMUX': '40', 'WEBDL': '42', 'WEBRIP': '45', - #'FANRES': '6', + # 'FANRES': '6', 'ENCODE': '41', 'HDTV': '35', - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -139,25 +134,24 @@ async def get_res_id(self, resolution): # '1440p' : '3', '1080p': '3', '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies') @@ -172,7 +166,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 996bab254..e778b6df0 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -19,7 +19,7 @@ class UNIT3D_TEMPLATE(): """ ############################################################### - ######## EDIT ME ######## + ######## EDIT ME ######## noqa E266 ############################################################### # ALSO EDIT CLASS NAME ABOVE @@ -38,7 +38,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -49,7 +49,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -65,11 +65,11 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 ############################################################### async def upload(self, meta): diff --git a/upload.py b/upload.py index 33c83499c..a5a9e5318 100644 --- a/upload.py +++ b/upload.py @@ -136,11 +136,11 @@ async def do_the_thing(base_dir): p1 = split_path[0] for i, each in enumerate(split_path): try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i+1]}"): + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): queue.append(p1) - p1 = split_path[i+1] + p1 = split_path[i + 1] else: - p1 += f" {split_path[i+1]}" + p1 += f" {split_path[i + 1]}" except IndexError: if os.path.exists(p1): queue.append(p1) From ed0fc3bae3dc7ac963997fe2f7c807190c62deb8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 16:04:56 +1000 Subject: [PATCH 135/741] Fix ignore --- .flake8 | 2 +- README.md | 2 +- cogs/commands.py | 6 +++--- src/trackers/PTP.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.flake8 b/.flake8 index 1116f5152..517d063c0 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,2 @@ [flake8] -ignore = F501 \ No newline at end of file +max-line-length = 3000 \ No newline at end of file diff --git a/README.md b/README.md index 0346df7be..36fefa840 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/AL + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/AL/HDB - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/cogs/commands.py b/cogs/commands.py index 373d24531..c40d66123 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -8,10 +8,10 @@ from src.trackers.STC import STC from src.trackers.LCD import LCD from src.trackers.CBR import CBR -from data.config import config +from data.config import config # type: ignore -import discord -from discord.ext import commands +import discord # type: ignore +from discord.ext import commands # type: ignore import os from datetime import datetime import asyncio diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index e710769a3..a7165408e 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -180,10 +180,10 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): console.print(f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}") response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) - + ptp_desc = response.text # console.print(f"[yellow]Raw description received:\n{ptp_desc[:3800]}...") # Show first 500 characters for brevity - + bbcode = BBCODE() desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) From 8c46c2854b35e36cf985b18a9b66c0f5abe73319 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 19:15:30 +1000 Subject: [PATCH 136/741] Linetd --- .flake8 | 2 +- .github/workflows/.flake8 | 2 +- src/discparse.py | 10 +-- src/trackers/PTER.py | 157 ++++++++++++++++++-------------------- src/trackers/R4E.py | 96 +++++++++++------------ src/trackers/SN.py | 1 - src/trackers/STC.py | 129 ++++++++++++++----------------- src/trackers/STT.py | 113 +++++++++++++-------------- src/trackers/TDC.py | 129 ++++++++++++++----------------- src/trackers/THR.py | 97 ++++++++++------------- src/trackers/TTG.py | 132 ++++++++++++++------------------ src/trackers/UTP.py | 87 ++++++++++----------- 12 files changed, 439 insertions(+), 516 deletions(-) diff --git a/.flake8 b/.flake8 index 517d063c0..9270bb728 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,2 @@ [flake8] -max-line-length = 3000 \ No newline at end of file +max-line-length = 6000 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 index 68fb2c09f..229297b69 100644 --- a/.github/workflows/.flake8 +++ b/.github/workflows/.flake8 @@ -1,2 +1,2 @@ [flake8] -ignore = F501 +max-line-length = 6000 diff --git a/src/discparse.py b/src/discparse.py index b885e3261..b4e6b246e 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -105,7 +105,7 @@ def parse_bdinfo(self, bdinfo_input, files, path): bdinfo['subtitles'] = list() bdinfo['path'] = path lines = bdinfo_input.splitlines() - for l in lines: + for l in lines: # noqa E741 line = l.strip().lower() if line.startswith("*"): line = l.replace("*", "").strip().lower() @@ -115,7 +115,7 @@ def parse_bdinfo(self, bdinfo_input, files, path): if line.startswith("disc size:"): size = l.split(':', 1)[1] size = size.split('bytes', 1)[0].replace(',', '') - size = float(size)/float(1 << 30) + size = float(size) / float(1 << 30) bdinfo['size'] = size if line.startswith("length:"): length = l.split(':', 1)[1] @@ -153,8 +153,8 @@ def parse_bdinfo(self, bdinfo_input, files, path): }) elif line.startswith("audio:"): if "(" in l: - l = l.split("(")[0] - l = l.strip() + l = l.split("(")[0] # noqa E741 + l = l.strip() # noqa E741 split1 = l.split(':', 1)[1] split2 = split1.split('/') n = 0 @@ -164,7 +164,7 @@ def parse_bdinfo(self, bdinfo_input, files, path): else: fuckatmos = "" try: - bit_depth = split2[n+5].strip() + bit_depth = split2[n + 5].strip() except Exception: bit_depth = "" bdinfo['audio'].append({ diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 71eb1c2a1..50e44367a 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -1,19 +1,16 @@ from bs4 import BeautifulSoup import requests -import asyncio import re import os from pathlib import Path -import traceback import json import glob from str2bool import str2bool -import cli_ui import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa E403 from src.console import console @@ -23,23 +20,23 @@ def __init__(self, config): self.config = config self.tracker = 'PTER' self.source_flag = 'PTER' - self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() + self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() self.username = config['TRACKERS']['PTER'].get('username', '').strip() self.password = config['TRACKERS']['PTER'].get('password', '').strip() self.rehost_images = config['TRACKERS']['PTER'].get('img_rehost', False) self.ptgen_api = config['TRACKERS']['PTER'].get('ptgen_api').strip() - self.ptgen_retry=3 + self.ptgen_retry = 3 self.signature = None self.banned_groups = [""] async def validate_credentials(self, meta): vcookie = await self.validate_cookies(meta) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True - + async def validate_cookies(self, meta): common = COMMON(config=self.config) url = "https://pterclub.com" @@ -48,7 +45,7 @@ async def validate_cookies(self, meta): with requests.Session() as session: session.cookies.update(await common.parseCookieFile(cookiefile)) resp = session.get(url=url) - + if meta['debug']: console.print('[cyan]Cookies:') console.print(session.cookies.get_dict()) @@ -61,7 +58,7 @@ async def validate_cookies(self, meta): else: console.print("[bold red]Missing Cookie File. (data/cookies/PTER.txt)") return False - + async def search_existing(self, meta): dupes = [] common = COMMON(config=self.config) @@ -79,9 +76,9 @@ async def search_existing(self, meta): soup = BeautifulSoup(r.text, 'lxml') rows = soup.select('table.torrents > tr:has(table.torrentname)') for row in rows: - text=row.select_one('a[href^="details.php?id="]') - if text != None: - release=text.attrs['title'] + text = row.select_one('a[href^="details.php?id="]') + if text is not None: + release = text.attrs['title'] if release: dupes.append(release) else: @@ -91,27 +88,27 @@ async def search_existing(self, meta): async def get_type_category_id(self, meta): cat_id = "EXIT" - + if meta['category'] == 'MOVIE': cat_id = 401 - + if meta['category'] == 'TV': cat_id = 404 - + if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): cat_id = 402 - + if 'Animation' in meta.get("genres", "").lower() or 'Animation' in meta.get("keywords", "").lower(): cat_id = 403 - + return cat_id - + async def get_area_id(self, meta): - - area_id=8 - area_map = { #To do + + area_id = 8 + area_map = { # To do "中国大陆": 1, "中国香港": 2, "中国台湾": 3, "美国": 4, "日本": 6, "韩国": 5, - "印度": 7, "法国": 4, "意大利": 4, "德国": 4, "西班牙": 4, "葡萄牙": 4, + "印度": 7, "法国": 4, "意大利": 4, "德国": 4, "西班牙": 4, "葡萄牙": 4, "英国": 4, "阿根廷": 8, "澳大利亚": 4, "比利时": 4, "巴西": 8, "加拿大": 4, "瑞士": 4, "智利": 8, } @@ -120,25 +117,23 @@ async def get_area_id(self, meta): if area in regions: return area_map[area] return area_id - - async def get_type_medium_id(self, meta): medium_id = "EXIT" # 1 = UHD Discs if meta.get('is_disc', '') in ("BDMV", "HD DVD"): - if meta['resolution']=='2160p': + if meta['resolution'] == '2160p': medium_id = 1 else: - medium_id = 2 #BD Discs - + medium_id = 2 # BD Discs + if meta.get('is_disc', '') == "DVD": - medium_id = 7 - + medium_id = 7 + # 4 = HDTV if meta.get('type', '') == "HDTV": medium_id = 4 - + # 6 = Encode if meta.get('type', '') in ("ENCODE", "WEBRIP"): medium_id = 6 @@ -163,9 +158,8 @@ async def edit_desc(self, meta): if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: ptgen = await common.ptgen(meta, self.ptgen_api, self.ptgen_retry) if ptgen.strip() != '': - descfile.write(ptgen) + descfile.write(ptgen) - bbcode = BBCODE() if meta.get('discs', []) != []: discs = meta['discs'] @@ -189,33 +183,33 @@ async def edit_desc(self, meta): desc = desc.replace('[img]', '[img]') desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - - if self.rehost_images == True: + + if self.rehost_images is True: console.print("[green]Rehosting Images...") images = await self.pterimg_upload(meta) - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") - descfile.write("[/center]") + descfile.write("[/center]") else: images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - - if self.signature != None: + + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() - async def get_auth_token(self,meta): + async def get_auth_token(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): Path(f"{meta['base_dir']}/data/cookies").mkdir(parents=True, exist_ok=True) cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" @@ -228,23 +222,23 @@ async def get_auth_token(self,meta): loggedIn = await self.validate_login(r) else: console.print("[yellow]Pterimg Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: auth_token = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] else: data = { - 'login-subject': self.username, - 'password': self.password, + 'login-subject': self.username, + 'password': self.password, 'keep-login': 1 } r = session.get("https://s3.pterclub.com") data['auth_token'] = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] - loginresponse = session.post(url='https://s3.pterclub.com/login',data=data) + loginresponse = session.post(url='https://s3.pterclub.com/login', data=data) if not loginresponse.ok: - raise LoginException("Failed to login to Pterimg. ") + raise LoginException("Failed to login to Pterimg. ") # noqa #F405 auth_token = re.search(r'auth_token = *?\"(\w+)\"', loginresponse.text).groups()[0] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) - + return auth_token async def validate_login(self, response): @@ -256,14 +250,14 @@ async def validate_login(self, response): async def pterimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") - url='https://s3.pterclub.com' - image_list=[] + url = 'https://s3.pterclub.com' + image_list = [] data = { 'type': 'file', - 'action': 'upload', - 'nsfw': 0, + 'action': 'upload', + 'nsfw': 0, 'auth_token': await self.get_auth_token(meta) - } + } cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" with requests.Session() as session: if os.path.exists(cookiefile): @@ -278,17 +272,17 @@ async def pterimg_upload(self, meta): except json.decoder.JSONDecodeError: res = {} if not req.ok: - if res['error']['message'] in ('重复上传','Duplicated upload'): + if res['error']['message'] in ('重复上传', 'Duplicated upload'): continue - raise(f'HTTP {req.status_code}, reason: {res["error"]["message"]}') + raise (f'HTTP {req.status_code}, reason: {res["error"]["message"]}') image_dict = {} image_dict['web_url'] = res['image']['url'] image_dict['img_url'] = res['image']['url'] - image_list.append(image_dict) + image_list.append(image_dict) return image_list async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' @@ -304,11 +298,11 @@ async def edit_name(self, meta): pter_name = pter_name.replace(meta["aka"], '') pter_name = pter_name.replace('PQ10', 'HDR') - if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) == True: + if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) is True: pter_name = pter_name.replace('H.264', 'x264') return pter_name - + async def is_zhongzi(self, meta): if meta.get('is_disc', '') != 'BDMV': mi = meta['mediainfo'] @@ -316,11 +310,11 @@ async def is_zhongzi(self, meta): if track['@type'] == "Text": language = track.get('Language') if language == "zh": - return 'yes' + return 'yes' else: for language in meta['bdinfo']['subtitles']: if language == "Chinese": - return 'yes' + return 'yes' return None async def upload(self, meta): @@ -328,39 +322,39 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - desc_file=f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" if not os.path.exists(desc_file): await self.edit_desc(meta) - + pter_name = await self.edit_name(meta) - - if meta['bdinfo'] != None: + + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') pter_desc = open(desc_file, 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - + with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), } - #use chinese small_descr + # use chinese small_descr if meta['ptgen']["trans_title"] != ['']: - small_descr='' + small_descr = '' for title_ in meta['ptgen']["trans_title"]: - small_descr+=f'{title_} / ' - small_descr+="| 类别:"+meta['ptgen']["genre"][0] - small_descr=small_descr.replace('/ |','|') + small_descr += f'{title_} / ' + small_descr += "| 类别:" + meta['ptgen']["genre"][0] + small_descr = small_descr.replace('/ |', '|') else: - small_descr=meta['title'] - data= { + small_descr = meta['title'] + data = { "name": pter_name, "small_descr": small_descr, "descr": pter_desc, @@ -371,11 +365,11 @@ async def upload(self, meta): "zhongzi": await self.is_zhongzi(meta) } - if meta.get('personalrelease', False) == True: - data["pr"] = "yes" + if meta.get('personalrelease', False) is True: + data["pr"] = "yes" url = "https://pterclub.com/takeupload.php" - + # Submit if meta['debug']: console.print(url) @@ -388,15 +382,15 @@ async def upload(self, meta): up = session.post(url=url, data=data, files=files) torrentFile.close() mi_dump.close() - + if up.url.startswith("https://pterclub.com/details.php?id="): - console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1','')}[/yellow][/green]") + console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1', '')}[/yellow][/green]") id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(id, torrent_path) else: console.print(data) console.print("\n\n") - raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return async def download_new_torrent(self, id, torrent_path): @@ -408,6 +402,3 @@ async def download_new_torrent(self, id, torrent_path): else: console.print("[red]There was an issue downloading the new .torrent from pter") console.print(r.text) - - - \ No newline at end of file diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 0528c10cc..c3ba5abe5 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -2,16 +2,14 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json import tmdbsimple as tmdb -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class R4E(): """ Edit for Tracker: @@ -28,7 +26,7 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -36,11 +34,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -50,21 +48,21 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], # 'personal_release' : int(meta.get('personalrelease', False)), NOT IMPLEMENTED on R4E # 'internal' : 0, # 'featured' : 0, @@ -79,21 +77,19 @@ async def upload(self, meta): if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): name = meta['name'] return name @@ -103,34 +99,34 @@ async def get_cat_id(self, category_name, tmdb_id): movie = tmdb.Movies(tmdb_id) movie_info = movie.info() is_docu = self.is_docu(movie_info['genres']) - category_id = '70' # Motorsports Movie + category_id = '70' # Motorsports Movie if is_docu: - category_id = '66' # Documentary + category_id = '66' # Documentary elif category_name == 'TV': tv = tmdb.TV(tmdb_id) tv_info = tv.info() is_docu = self.is_docu(tv_info['genres']) - category_id = '79' # TV Series + category_id = '79' # TV Series if is_docu: - category_id = '2' # TV Documentary + category_id = '2' # TV Documentary else: - category_id = '24' + category_id = '24' return category_id async def get_type_id(self, type): type_id = { - '8640p':'2160p', - '4320p': '2160p', - '2160p': '2160p', - '1440p' : '1080p', + '8640p': '2160p', + '4320p': '2160p', + '2160p': '2160p', + '1440p': '1080p', '1080p': '1080p', - '1080i':'1080i', - '720p': '720p', - '576p': 'SD', + '1080i': '1080i', + '720p': '720p', + '576p': 'SD', '576i': 'SD', - '480p': 'SD', + '480p': 'SD', '480i': 'SD' - }.get(type, '10') + }.get(type, '10') return type_id async def is_docu(self, genres): @@ -138,18 +134,18 @@ async def is_docu(self, genres): for each in genres: if each['id'] == 99: is_docu = True - return is_docu + return is_docu async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") url = "https://racing4everyone.eu/api/torrents/filter" params = { - 'api_token' : self.config['TRACKERS']['R4E']['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'name' : "" + 'api_token': self.config['TRACKERS']['R4E']['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -163,8 +159,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/SN.py b/src/trackers/SN.py index b987d3f37..04547ce89 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import requests import asyncio -import traceback from src.trackers.COMMON import COMMON from src.console import console diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 71d70ce2f..8e8c9ef52 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -1,14 +1,12 @@ # -*- coding: utf-8 -*- import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json -import os import platform from src.trackers.COMMON import COMMON -from src.console import console +from src.console import console + class STC(): """ @@ -27,7 +25,7 @@ def __init__(self, config): self.signature = '\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]' self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -36,11 +34,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -50,34 +48,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : stc_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stc_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') @@ -87,43 +85,41 @@ async def upload(self, meta): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stc_name = meta.get('name') return stc_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, sd, category): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') if tv_pack == 1: if sd == 1: # Season SD @@ -146,37 +142,30 @@ async def get_type_id(self, type, tv_pack, sd, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -188,8 +177,8 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/STT.py b/src/trackers/STT.py index fdeed9e88..8117e1440 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -2,15 +2,13 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class STT(): """ Edit for Tracker: @@ -28,7 +26,7 @@ def __init__(self, config): self.signature = '\n[center][url=https://skipthetrailers.xyz/pages/1]Please Seed[/url][/center]' self.banned_groups = [""] pass - + async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -37,11 +35,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -51,28 +49,28 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : stt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' @@ -81,26 +79,24 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stt_name = meta['name'] return stt_name @@ -108,47 +104,46 @@ async def edit_name(self, meta): async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'FANRES': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - # '8640p':'10', - '4320p': '1', - '2160p': '2', + # '8640p':'10', + '4320p': '1', + '2160p': '2', # '1440p' : '3', '1080p': '3', '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '11') + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '11') return resolution_id - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies.') @@ -163,8 +158,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index 74801157a..cd795249e 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -2,11 +2,11 @@ # import discord import asyncio import requests -import os from str2bool import str2bool from src.trackers.COMMON import COMMON -from src.console import console +from src.console import console + class TDC(): """ @@ -17,9 +17,6 @@ class TDC(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'TDC' @@ -29,45 +26,41 @@ def __init__(self, config): self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -77,12 +70,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -92,34 +85,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -131,41 +124,37 @@ async def upload(self, meta): 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] - + try: response = requests.get(url=self.search_url, params=params) response = response.json() @@ -174,7 +163,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/THR.py b/src/trackers/THR.py index f1f74e15b..84a97ad3f 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -4,15 +4,13 @@ import requests import json import glob -from difflib import SequenceMatcher import cli_ui -import base64 import os import re import platform from unidecode import unidecode -from src.console import console +from src.console import console class THR(): @@ -29,19 +27,19 @@ def __init__(self, config): self.password = config['TRACKERS']['THR'].get('password') self.banned_groups = [""] pass - + async def upload(self, session, meta): await self.edit_torrent(meta) cat_id = await self.get_cat_id(meta) subs = self.get_subtitles(meta) - pronfo = await self.edit_desc(meta) + pronfo = await self.edit_desc(meta) # noqa #F841 thr_name = unidecode(meta['name'].replace('DD+', 'DDP')) # Confirm the correct naming order for FL cli_ui.info(f"THR name: {thr_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: thr_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if thr_confirm != True: + if thr_confirm is not True: thr_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if thr_name_manually == "": console.print('No proper name given') @@ -51,7 +49,6 @@ async def upload(self, session, meta): thr_name = thr_name_manually torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) - if meta.get('is_disc', '') == 'BDMV': mi_file = None # bd_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8' @@ -65,39 +62,38 @@ async def upload(self, session, meta): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r') as f: desc = f.read() f.close() - + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent") with open(torrent_path, 'rb') as f: tfile = f.read() f.close() - - #Upload Form + + # Upload Form url = 'https://www.torrenthr.org/takeupload.php' files = { - 'tfile' : (f'{torrent_name}.torrent', tfile) + 'tfile': (f'{torrent_name}.torrent', tfile) } payload = { - 'name' : thr_name, - 'descr' : desc, - 'type' : cat_id, - 'url' : f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", - 'tube' : meta.get('youtube', '') + 'name': thr_name, + 'descr': desc, + 'type': cat_id, + 'url': f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", + 'tube': meta.get('youtube', '') } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - #If pronfo fails, put mediainfo into THR parser + # If pronfo fails, put mediainfo into THR parser if meta.get('is_disc', '') != 'BDMV': files['nfo'] = ("MEDIAINFO.txt", mi_file) if subs != []: payload['subs[]'] = tuple(subs) - - if meta['debug'] == False: + if meta['debug'] is False: thr_upload_prompt = True else: thr_upload_prompt = cli_ui.ask_yes_no("send to takeupload.php?", default=False) - if thr_upload_prompt == True: + if thr_upload_prompt is True: await asyncio.sleep(0.5) response = session.post(url=url, files=files, data=payload, headers=headers) try: @@ -105,18 +101,16 @@ async def upload(self, session, meta): console.print(response.text) if response.url.endswith('uploaded=1'): console.print(f'[green]Successfully Uploaded at: {response.url}') - #Check if actually uploaded - except: + # Check if actually uploaded + except Exception: if meta['debug']: console.print(response.text) console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(payload) - - - + async def get_cat_id(self, meta): if meta['category'] == "MOVIE": if meta.get('is_disc') == "BMDV": @@ -133,7 +127,7 @@ async def get_cat_id(self, meta): cat = '7' else: cat = '34' - elif meta.get('anime') != False: + elif meta.get('anime') is not False: cat = '31' return cat @@ -156,27 +150,23 @@ def get_subtitles(self, meta): if sub_langs != []: subs = [] sub_lang_map = { - 'hr' : 1, 'en' : 2, 'bs' : 3, 'sr' : 4, 'sl' : 5, - 'Croatian' : 1, 'English' : 2, 'Bosnian' : 3, 'Serbian' : 4, 'Slovenian' : 5 + 'hr': 1, 'en': 2, 'bs': 3, 'sr': 4, 'sl': 5, + 'Croatian': 1, 'English': 2, 'Bosnian': 3, 'Serbian': 4, 'Slovenian': 5 } for sub in sub_langs: language = sub_lang_map.get(sub) - if language != None: + if language is not None: subs.append(language) return subs - - - - async def edit_torrent(self, meta): if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): THR_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") THR_torrent.metainfo['announce'] = self.config['TRACKERS']['THR']['announce_url'] THR_torrent.metainfo['info']['source'] = "[https://www.torrenthr.org] TorrentHR.org" Torrent.copy(THR_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent", overwrite=True) - return - + return + async def edit_desc(self, meta): pronfo = False base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() @@ -213,11 +203,11 @@ async def edit_desc(self, meta): for image in image_glob: url = "https://img2.torrenthr.org/api/1/upload" data = { - 'key' : self.config['TRACKERS']['THR'].get('img_api'), + 'key': self.config['TRACKERS']['THR'].get('img_api'), # 'source' : base64.b64encode(open(image, "rb").read()).decode('utf8') } - files = {'source' : open(image, 'rb')} - response = requests.post(url, data = data, files=files) + files = {'source': open(image, 'rb')} + response = requests.post(url, data=data, files=files) try: response = response.json() # med_url = response['image']['medium']['url'] @@ -239,22 +229,22 @@ async def edit_desc(self, meta): # ProNFO pronfo_url = f"https://www.pronfo.com/api/v1/access/upload/{self.config['TRACKERS']['THR'].get('pronfo_api_key', '')}" data = { - 'content' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), - 'theme' : self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), - 'rapi' : self.config['TRACKERS']['THR'].get('pronfo_rapi_id') + 'content': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), + 'theme': self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), + 'rapi': self.config['TRACKERS']['THR'].get('pronfo_rapi_id') } response = requests.post(pronfo_url, data=data) try: response = response.json() - if response.get('error', True) == False: + if response.get('error', True) is False: mi_img = response.get('url') desc.write(f"\n[img]{mi_img}[/img]\n") pronfo = True - except: + except Exception: console.print('[bold red]Error parsing pronfo response, using THR parser instead') if meta['debug']: console.print(f"[red]{response}") - console.print(response.text) + console.print(response.text) for each in image_list[:int(meta['screens'])]: desc.write(f"\n[img]{each}[/img]\n") @@ -267,9 +257,6 @@ async def edit_desc(self, meta): desc.close() return pronfo - - - def search_existing(self, session, imdb_id): from bs4 import BeautifulSoup imdb_id = imdb_id.replace('tt', '') @@ -288,12 +275,12 @@ def search_existing(self, session, imdb_id): def login(self, session): url = 'https://www.torrenthr.org/takelogin.php' payload = { - 'username' : self.username, - 'password' : self.password, - 'ssl' : 'yes' + 'username': self.username, + 'password': self.password, + 'ssl': 'yes' } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } resp = session.post(url, headers=headers, data=payload) if resp.url == "https://www.torrenthr.org/index.php": diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 6795d13cc..94b27bc7d 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -4,15 +4,12 @@ import asyncio import re import os -from pathlib import Path -import traceback -import json import cli_ui from str2bool import str2bool from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa #F405 from src.console import console @@ -28,11 +25,10 @@ def __init__(self, config): self.passan = str(config['TRACKERS']['TTG'].get('login_answer', '')).strip() self.uid = str(config['TRACKERS']['TTG'].get('user_id', '')).strip() self.passkey = str(config['TRACKERS']['TTG'].get('announce_url', '')).strip().split('/')[-1] - + self.signature = None self.banned_groups = [""] - async def edit_name(self, meta): ttg_name = meta['name'] @@ -48,46 +44,45 @@ async def get_type_id(self, meta): if meta['category'] == "MOVIE": # 51 = DVDRip if meta['resolution'].startswith("720"): - type_id = 52 # 720p + type_id = 52 # 720p if meta['resolution'].startswith("1080"): - type_id = 53 # 1080p/i + type_id = 53 # 1080p/i if meta['is_disc'] == "BDMV": - type_id = 54 # Blu-ray disc - + type_id = 54 # Blu-ray disc + elif meta['category'] == "TV": if meta.get('tv_pack', 0) != 1: # TV Singles if meta['resolution'].startswith("720"): - type_id = 69 # 720p TV EU/US + type_id = 69 # 720p TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 76 # Chinese + type_id = 76 # Chinese if meta['resolution'].startswith("1080"): - type_id = 70 # 1080 TV EU/US + type_id = 70 # 1080 TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 75 # Chinese + type_id = 75 # Chinese if lang in ('KR', 'KO'): - type_id = 75 # Korean + type_id = 75 # Korean if lang in ('JA', 'JP'): - type_id = 73 # Japanese + type_id = 73 # Japanese else: # TV Packs - type_id = 87 # EN/US + type_id = 87 # EN/US if lang in ('KR', 'KO'): - type_id = 99 # Korean + type_id = 99 # Korean if lang in ('JA', 'JP'): - type_id = 88 # Japanese + type_id = 88 # Japanese if lang in ('ZH', 'CN', 'CMN'): - type_id = 90 # Chinese - - + type_id = 90 # Chinese + if "documentary" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'documentary' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta['resolution'].startswith("720"): - type_id = 62 # 720p + type_id = 62 # 720p if meta['resolution'].startswith("1080"): - type_id = 63 # 1080 + type_id = 63 # 1080 if meta.get('is_disc', '') == 'BDMV': - type_id = 64 # BDMV - + type_id = 64 # BDMV + if "animation" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'animation' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta.get('sd', 1) == 0: type_id = 58 @@ -104,16 +99,12 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' return anon - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -121,16 +112,16 @@ async def upload(self, meta): ttg_name = await self.edit_name(meta) # FORM - # type = category dropdown - # name = name - # descr = description - # anonymity = "yes" / "no" - # nodistr = "yes" / "no" (exclusive?) not required - # imdb_c = tt123456 - # + # type = category dropdown + # name = name + # descr = description + # anonymity = "yes" / "no" + # nodistr = "yes" / "no" (exclusive?) not required + # imdb_c = tt123456 + # # POST > upload/upload - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') @@ -143,21 +134,20 @@ async def upload(self, meta): else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), - 'nfo' : ("torrent.nfo", mi_dump) + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'nfo': ("torrent.nfo", mi_dump) } data = { - 'MAX_FILE_SIZE' : '4000000', - 'team' : '', - 'hr' : 'no', - 'name' : ttg_name, - 'type' : await self.get_type_id(meta), - 'descr' : ttg_desc.rstrip(), - + 'MAX_FILE_SIZE': '4000000', + 'team': '', + 'hr': 'no', + 'name': ttg_name, + 'type': await self.get_type_id(meta), + 'descr': ttg_desc.rstrip(), + + 'anonymity': await self.get_anon(meta['anon']), + 'nodistr': 'no', - 'anonymity' : await self.get_anon(meta['anon']), - 'nodistr' : 'no', - } url = "https://totheglory.im/takeupload.php" if int(meta['imdb_id'].replace('tt', '')) != 0: @@ -175,7 +165,7 @@ async def upload(self, meta): up = session.post(url=url, data=data, files=files) torrentFile.close() mi_dump.close() - + if up.url.startswith("https://totheglory.im/details.php?id="): console.print(f"[green]Uploaded to: [yellow]{up.url}[/yellow][/green]") id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) @@ -184,17 +174,16 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return - async def search_existing(self, meta): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) - + if int(meta['imdb_id'].replace('tt', '')) != 0: imdb = f"imdb{meta['imdb_id'].replace('tt', '')}" else: @@ -218,18 +207,15 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -238,7 +224,7 @@ async def validate_credentials(self, meta): else: return False return True - + async def validate_cookies(self, meta, cookiefile): url = "https://totheglory.im" if os.path.exists(cookiefile): @@ -259,7 +245,7 @@ async def validate_cookies(self, meta, cookiefile): async def login(self, cookiefile): url = "https://totheglory.im/takelogin.php" - data={ + data = { 'username': self.username, 'password': self.password, 'passid': self.passid, @@ -270,11 +256,11 @@ async def login(self, cookiefile): await asyncio.sleep(0.5) if response.url.endswith('2fa.php'): soup = BeautifulSoup(response.text, 'html.parser') - auth_token = soup.find('input', {'name' : 'authenticity_token'}).get('value') + auth_token = soup.find('input', {'name': 'authenticity_token'}).get('value') two_factor_data = { - 'otp' : console.input('[yellow]TTG 2FA Code: '), - 'authenticity_token' : auth_token, - 'uid' : self.uid + 'otp': console.input('[yellow]TTG 2FA Code: '), + 'authenticity_token': auth_token, + 'uid': self.uid } two_factor_url = "https://totheglory.im/take2fa.php" response = session.post(two_factor_url, data=two_factor_data) @@ -290,8 +276,6 @@ async def login(self, cookiefile): console.print(response.url) return - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: @@ -301,10 +285,10 @@ async def edit_desc(self, meta): if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: ptgen = await common.ptgen(meta) if ptgen.strip() != '': - descfile.write(ptgen) + descfile.write(ptgen) # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][b][color=#ff00ff][size=3]{meta['service_longname']}的无损REMUX片源,没有转码/This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -330,14 +314,14 @@ async def edit_desc(self, meta): desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() @@ -350,4 +334,4 @@ async def download_new_torrent(self, id, torrent_path): tor.write(r.content) else: console.print("[red]There was an issue downloading the new .torrent from TTG") - console.print(r.text) \ No newline at end of file + console.print(r.text) diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 496a52fd3..d6bf86d65 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -3,12 +3,12 @@ import asyncio import requests from str2bool import str2bool -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class UTP(): """ Edit for Tracker: @@ -24,7 +24,7 @@ def __init__(self, config): self.search_url = 'https://utp.to/api/torrents/filter' self.torrent_url = 'https://utp.to/api/torrents/' self.upload_url = 'https://utp.to/api/torrents/upload' - self.signature = f"\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [] pass @@ -37,12 +37,12 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2obool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -52,31 +52,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -94,29 +94,25 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition): category_id = { 'MOVIE': '1', 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' return category_id @@ -129,7 +125,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -138,22 +134,19 @@ async def get_res_id(self, resolution): '2160p': '2', '1080p': '3', '1080i': '4' - }.get(resolution, '1') + }.get(resolution, '1') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -167,7 +160,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) From 4e508c04981b9b7980e2a7fa2df058f53a19eb77 Mon Sep 17 00:00:00 2001 From: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sun, 1 Sep 2024 12:10:15 +0200 Subject: [PATCH 137/741] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4f32372b0..b163b3707 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/SHRI + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs From 5f88767be6b165c3c2972daa7014925039d666d1 Mon Sep 17 00:00:00 2001 From: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sun, 1 Sep 2024 12:13:19 +0200 Subject: [PATCH 138/741] Update example-config.py --- data/example-config.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index 6b6f2bbec..fede8f8b5 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -214,6 +214,18 @@ "announce_url" : "https://UTP/announce/customannounceurl", # "anon" : False }, + "AL": { + "api_key": "AL api key", + "announce_url": "https://animelovers.club/announce/customannounceurl", + # "anon" : False + }, + "HDB": { + "useAPI": True, + "username": "HDB username", + "passkey": "HDB passkey", + "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", + "anon": False, + }, "SHRI" :{ "api_key" : "SHRI api key", "announce_url" : "https://shareisland.org/announce/customannounceurl", From add55a8db28623e729e7d1d5f6381a437ff1a082 Mon Sep 17 00:00:00 2001 From: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sun, 1 Sep 2024 12:18:05 +0200 Subject: [PATCH 139/741] Update upload.py --- upload.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/upload.py b/upload.py index 9aebc13b4..3d94576fc 100644 --- a/upload.py +++ b/upload.py @@ -37,6 +37,7 @@ from src.trackers.FNP import FNP from src.trackers.CBR import CBR from src.trackers.UTP import UTP +from src.trackers.AL import AL from src.trackers.SHRI import SHRI import json from pathlib import Path @@ -248,12 +249,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP','SHRI'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP','AL', 'HDB', 'SHRI'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'SHRI': SHRI} + 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} for tracker in trackers: if meta['name'].endswith('DUPE?'): @@ -586,4 +587,4 @@ def get_missing(meta): loop = asyncio.get_event_loop() loop.run_until_complete(do_the_thing(base_dir)) else: - asyncio.run(do_the_thing(base_dir)) \ No newline at end of file + asyncio.run(do_the_thing(base_dir)) From e42c87ed96e51f5009c1e94c2820497a9704611b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 20:29:22 +1000 Subject: [PATCH 140/741] Improve HDB searching --- src/bbcode.py | 10 ++++++-- src/prep.py | 22 ++++++++-------- src/trackers/HDB.py | 61 +++++++++++++++++++++++++++++++-------------- 3 files changed, 61 insertions(+), 32 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 1afd03bb4..6ae031d1a 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -224,8 +224,14 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(center, cleaned_center.strip()) # Remove bot signatures - bot_signature_regex = r"\[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\]Uploaded Using \[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] Auto Uploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]" - desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE) + bot_signature_regex = r""" + \[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\] + Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\]\s + Auto\sUploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]| + \[center\]\s*\[b\]Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] + \sAuto\sUploader\[\/b\]\s*\[\/center\] + """ + desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) # Ensure no dangling tags and remove extra blank lines diff --git a/src/prep.py b/src/prep.py index ecf2d5806..3f8a63cab 100644 --- a/src/prep.py +++ b/src/prep.py @@ -230,7 +230,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") # Use search_filename function if ID is not found in meta - imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') meta['hdb_name'] = hdb_name @@ -241,7 +241,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if found_match: if imdb or tvdb_id or hdb_name: console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") - if await self.prompt_user_for_confirmation(f"Do you want to keep the data found on {tracker_name}?"): + if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): console.print(f"[green]{tracker_name} data retained.[/green]") else: console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") @@ -407,15 +407,6 @@ async def gather_prep(self, meta, mode): found_match = True # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") - if "HDB" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") - hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - if match: - found_match = True - # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") - if "BLU" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") @@ -425,6 +416,15 @@ async def gather_prep(self, meta, mode): found_match = True # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") + if "HDB" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") + hdb = HDB(config=self.config) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + if match: + found_match = True + # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") + if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") else: diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 97e8710dd..921a64d96 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -509,26 +509,49 @@ async def get_info_from_torrent_id(self, hdb_id): console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash - async def search_filename(self, search_term, search_file_folder): + async def search_filename(self, search_term, search_file_folder, meta): hdb_imdb = hdb_tvdb = hdb_name = hdb_torrenthash = hdb_id = None url = "https://hdbits.org/api/torrents" - if search_file_folder == 'folder': # Handling disc case - data = { - "username": self.username, - "passkey": self.passkey, - "limit": 100, - "folder_in_torrent": os.path.basename(search_term) # Using folder name for search - } - console.print(f"[green]Searching HDB for folder: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + # Handle disc case + if search_file_folder == 'folder' and meta.get('is_disc'): + bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') + bd_summary = None + + # Parse the BD_SUMMARY_00.txt file to extract the Disc Title + try: + with open(bd_summary_path, 'r', encoding='utf-8') as file: + for line in file: + if "Disc Title:" in line: + bd_summary = line.split("Disc Title:")[1].strip() + break + + if bd_summary: + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "search": bd_summary # Using the Disc Title for search + } + console.print(f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]") + console.print(f"[yellow]Using this data: {data}") + else: + console.print(f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + except FileNotFoundError: + console.print(f"[red]Error: File not found at {bd_summary_path}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + else: # Handling non-disc case data = { "username": self.username, "passkey": self.passkey, "limit": 100, - "file_in_torrent": os.path.basename(search_term) # Using filename for search + "file_in_torrent": os.path.basename(search_term) } console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + console.print(f"[yellow]Using this data: {data}") response = requests.get(url, json=data) @@ -537,21 +560,21 @@ async def search_filename(self, search_term, search_file_folder): response_json = response.json() # console.print(f"[green]HDB API response: {response_json}[/green]") # Log the entire response for debugging - # Check if 'data' key is present if 'data' not in response_json: console.print(f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id if response_json['data'] != []: for each in response_json['data']: - if search_file_folder == 'folder' or each['numfiles'] == len(search_term): # Handle folder or filelist match - hdb_imdb = each.get('imdb', {'id': None}).get('id') - hdb_tvdb = each.get('tvdb', {'id': None}).get('id') - hdb_name = each['name'] - hdb_torrenthash = each['hash'] - hdb_id = each['id'] - console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') - return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + hdb_imdb = each.get('imdb', {'id': None}).get('id') + hdb_tvdb = each.get('tvdb', {'id': None}).get('id') + hdb_name = each['name'] + hdb_torrenthash = each['hash'] + hdb_id = each['id'] + console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + else: + console.print('[yellow]No data found in the HDB API response[/yellow]') except Exception as e: console.print_exception() console.print(f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]") From 67ef332ea1da2f1fb98553a622c3f3e443f5038e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 21:01:02 +1000 Subject: [PATCH 141/741] Improved BLU description handling --- src/prep.py | 99 +++++++++++++--------------- src/trackers/COMMON.py | 142 +++++++++++++++++++++-------------------- 2 files changed, 119 insertions(+), 122 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3f8a63cab..51cdac16d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -90,11 +90,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met manual_key = f"{tracker_key}_manual" found_match = False - # console.print(f"[cyan]Starting update_metadata_from_tracker for: {tracker_name}[/cyan]") - - # Handle each tracker separately if tracker_name == "BLU": - # console.print(f"[blue]Handling BLU tracker[/blue]") if meta.get(tracker_key) is not None: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( @@ -103,33 +99,29 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met tracker_instance.search_url, id=meta[tracker_key] ) - # console.print(f"[blue]BLU search by ID complete[/blue]") if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") - if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb).zfill(7) # Pad IMDb ID with leading zeros - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if blu_imagelist: # Ensure blu_imagelist is not empty before setting - meta['image_list'] = blu_imagelist - if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) - if blu_filename: - meta['blu_filename'] = blu_filename # Store the filename in meta for later use - found_match = True - console.print("[green]BLU data successfully updated in meta[/green]") - else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) # Pad IMDb ID with leading zeros + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if blu_filename: + meta['blu_filename'] = blu_filename # Store the filename in meta for later use + found_match = True + console.print("[green]BLU data successfully updated in meta[/green]") else: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") else: @@ -140,33 +132,32 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met tracker_instance.search_url, file_name=search_term ) - # console.print(f"[blue]BLU search by file name complete[/blue]") + if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values[/green]") - if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb).zfill(7) - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if blu_imagelist: # Ensure blu_imagelist is not empty before setting - meta['image_list'] = blu_imagelist - if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) - if blu_filename: - meta['blu_filename'] = blu_filename - found_match = True - console.print("[green]BLU data successfully updated in meta[/green]") - else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if blu_filename: + meta['blu_filename'] = blu_filename + + found_match = True + console.print("[green]BLU data successfully updated in meta[/green]") else: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index b46b12e2d..5cd5f7866 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -142,6 +142,26 @@ async def unit3d_distributor_ids(self, distributor): }.get(distributor, 0) return distributor_id + async def prompt_user_for_id_selection(self, blu_tmdb=None, blu_imdb=None, blu_tvdb=None, blu_filename=None, imdb=None): + if imdb: + imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") + if blu_tmdb or blu_imdb or blu_tvdb: + if blu_imdb: + blu_imdb = str(blu_imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print("[cyan]Found the following IDs on BLU:") + console.print(f"TMDb ID: {blu_tmdb}") + console.print(f"IMDb ID: https://www.imdb.com/title/tt{blu_imdb}") + console.print(f"TVDb ID: {blu_tvdb}") + console.print(f"Filename: {blu_filename}") # Ensure filename is printed if available + + selection = input("Do you want to use this ID? (y/n): ").strip().lower() + return selection == 'y' + + async def prompt_user_for_confirmation(self, message): + selection = input(f"{message} (y/n): ").strip().lower() + return selection == 'y' + async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 imagelist = [] @@ -161,14 +181,13 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f return None, None, None, None, None, None, None, None, None response = requests.get(url=url, params=params) - # console.print(f"Requested URL: {response.url}") - # console.print(f"Status Code: {response.status_code}") try: json_response = response.json() - # console.print(json_response) + + # console.print(f"[blue]Raw API Response: {json_response}[/blue]") + except ValueError: - # console.print(f"Response Text: {response.text}") return None, None, None, None, None, None, None, None, None try: @@ -185,76 +204,63 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f mal = attributes.get('mal_id') imdb = attributes.get('imdb_id') infohash = attributes.get('info_hash') - - if description: - bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) - console.print(f"[green]Successfully grabbed description from {tracker}") - console.print(f"[blue]Extracted description: [yellow]{description}") - - # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") - - if edit_choice.lower() == 'e': - edited_description = click.edit(description) - if edited_description: - description = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {description}") - elif edit_choice.lower() == 'd': - description = None - console.print("[yellow]Description discarded.[/yellow]") - else: - console.print("[green]Keeping the original description.[/green]") - else: - console.print(f"[yellow]No description found for {tracker}.[/yellow]") else: - console.print(f"[yellow]No data found in the response for {tracker} when searching by file name.[/yellow]") - - # Handle response when searching by ID - if id and not data: - attributes = json_response.get('attributes', {}) - - # Extract data from the attributes - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') - - if description: - bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) - console.print(f"[green]Successfully grabbed description from {tracker}") - console.print(f"[blue]Extracted description: [yellow]{description}") - - # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") - - if edit_choice.lower() == 'e': - edited_description = click.edit(description) - if edited_description: - description = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {description}") - elif edit_choice.lower() == 'd': - description = None - console.print("[yellow]Description discarded.[/yellow]") - else: - console.print("[green]Keeping the original description.[/green]") + # Handle response when searching by ID + if id and not data: + attributes = json_response.get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + + # Handle file name extraction + files = attributes.get('files', []) + if files: + if len(files) == 1: + file_name = files[0]['name'] + else: + file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames + + console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) + + if tmdb or imdb or tvdb: + console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") + if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): + console.print("[yellow]User chose to skip based on IDs.[/yellow]") + return None, None, None, None, None, None, None, None, None + + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"[blue]Extracted description: [yellow]{description}") + + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}") + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") else: - console.print(f"[yellow]No description found for {tracker}.[/yellow]") + console.print("[green]Keeping the original description.[/green]") + + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name except Exception as e: console.print_exception() console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") - - if description: # Ensure description is only printed if it's not None - console.print(f"[green]Final description to be returned:[/green] {description}") - - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name + return None, None, None, None, None, None, None, None, None async def parseCookieFile(self, cookiefile): """Parse a cookies.txt file and return a dictionary of key value pairs From bbea2f37f892a39444883a18a674bc1e605fb7ce Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 21:49:27 +1000 Subject: [PATCH 142/741] Improved PTP file searching --- src/prep.py | 16 ++++++++-------- src/trackers/HDB.py | 2 +- src/trackers/PTP.py | 44 +++++++++++++++++++++++++------------------- 3 files changed, 34 insertions(+), 28 deletions(-) diff --git a/src/prep.py b/src/prep.py index 51cdac16d..304c7e333 100644 --- a/src/prep.py +++ b/src/prep.py @@ -162,20 +162,20 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") elif tracker_name == "PTP": - # console.print(f"[blue]Handling PTP tracker[/blue]") - + imdb_id = None # Ensure imdb_id is defined + # Check if the PTP ID is already in meta if meta.get('ptp') is None: - # console.print(f"[yellow]No PTP ID in meta, searching by search term[/yellow]") - imdb, ptp_torrent_id, meta['ext_torrenthash'] = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder) + # No PTP ID in meta, search by search term + imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) if ptp_torrent_id: meta['ptp'] = ptp_torrent_id - meta['imdb'] = str(imdb).zfill(7) if imdb else None + meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None else: ptp_torrent_id = meta['ptp'] console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") - imdb, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) - if imdb: - meta['imdb'] = str(imdb).zfill(7) + imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + if imdb_id: + meta['imdb'] = str(imdb_id).zfill(7) console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 921a64d96..cbe0b8975 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -534,7 +534,7 @@ async def search_filename(self, search_term, search_file_folder, meta): "search": bd_summary # Using the Disc Title for search } console.print(f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]") - console.print(f"[yellow]Using this data: {data}") + # console.print(f"[yellow]Using this data: {data}") else: console.print(f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index a7165408e..8377c8545 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -81,7 +81,7 @@ def __init__(self, config): ("Vietnamese", "vie", "vi"): 25, } - async def get_ptp_id_imdb(self, search_term, search_file_folder): + async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) params = { @@ -96,41 +96,46 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder): response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) console.print(f"[green]Searching PTP for: [bold yellow]{filename}[/bold yellow]") + try: if response.status_code == 200: response = response.json() + # console.print(f"[blue]Raw API Response: {response}[/blue]") + if int(response['TotalResults']) >= 1: for movie in response['Movies']: if len(movie['Torrents']) >= 1: for torrent in movie['Torrents']: - if search_file_folder == 'file': - for file in torrent['FileList']: - if file['Path'] == filename: - imdb_id = movie['ImdbId'] - ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash = await self.get_imdb_from_torrent_id(ptp_torrent_id) - console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') - return imdb_id, ptp_torrent_id, ptp_torrent_hash - if search_file_folder == 'folder': - if str(torrent['FilePath']) == filename: + for file in torrent['FileList']: + if file['Path'] == filename: imdb_id = movie['ImdbId'] ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash = await self.get_imdb_from_torrent_id(ptp_torrent_id) + dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + + # Call get_torrent_info and print the results + tinfo = await self.get_torrent_info(imdb_id, meta) + console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") + return imdb_id, ptp_torrent_id, ptp_torrent_hash - else: - console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') - return None, None, None - elif int(response.status_code) in [400, 401, 403]: + + console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') + return None, None, None + + elif response.status_code in [400, 401, 403]: console.print(f"[bold red]PTP: {response.text}") return None, None, None - elif int(response.status_code) == 503: + + elif response.status_code == 503: console.print("[bold yellow]PTP Unavailable (503)") return None, None, None + else: return None, None, None - except Exception: - pass + + except Exception as e: + console.print(f'[red]An error occurred: {str(e)}[/red]') + console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None @@ -250,6 +255,7 @@ async def get_torrent_info(self, imdb, meta): tinfo = {} try: response = response.json() + # console.print(f"[blue]Raw info API Response: {response}[/blue]") # title, plot, art, year, tags, Countries, Languages for key, value in response[0].items(): if value not in (None, ""): From 447c1ad50a7970a0e0dd7a65aad4a8f2465440e7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 22:18:33 +1000 Subject: [PATCH 143/741] Skip user confirmation step when using manual ID --- src/prep.py | 41 ++++++++++++++++++++++------------------- src/trackers/COMMON.py | 14 +++++++++----- src/trackers/PTP.py | 16 +++++++++++++++- 3 files changed, 46 insertions(+), 25 deletions(-) diff --git a/src/prep.py b/src/prep.py index 304c7e333..f44918770 100644 --- a/src/prep.py +++ b/src/prep.py @@ -170,6 +170,11 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if ptp_torrent_id: meta['ptp'] = ptp_torrent_id meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None + + if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + found_match = True + else: ptp_torrent_id = meta['ptp'] console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") @@ -180,10 +185,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") - if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): - console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") - found_match = True - + # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) if ptp_desc.strip(): meta['description'] = ptp_desc @@ -201,9 +203,9 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met console.print("[yellow]Description discarded from PTP[/yellow]") meta['skip_gen_desc'] = True meta['description'] = None - else: - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") - meta['skip_gen_desc'] = True + + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + meta['skip_gen_desc'] = True elif tracker_name == "HDB": if meta.get('hdb') is not None: @@ -217,6 +219,8 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['hdb_name'] = hdb_name found_match = True + # Skip user confirmation if searching by ID + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") else: console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") @@ -229,20 +233,19 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta[tracker_key] = tracker_id found_match = True - if found_match: - if imdb or tvdb_id or hdb_name: - console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") - if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): - console.print(f"[green]{tracker_name} data retained.[/green]") + if found_match: + if imdb or tvdb_id or hdb_name: + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): + console.print(f"[green]{tracker_name} data retained.[/green]") + else: + console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") + meta[tracker_key] = None + meta['tvdb_id'] = None + meta['hdb_name'] = None + found_match = False else: - console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") - meta[tracker_key] = None - meta['tvdb_id'] = None - meta['hdb_name'] = None found_match = False - else: - # console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") - found_match = False # console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") return meta, found_match diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5cd5f7866..89b4022e6 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -226,13 +226,17 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f else: file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames - console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) + console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) + + # Skip the ID selection prompt if searching by ID + console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") if tmdb or imdb or tvdb: - console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") - if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): - console.print("[yellow]User chose to skip based on IDs.[/yellow]") - return None, None, None, None, None, None, None, None, None + if not id: + # Only prompt the user for ID selection if not searching by ID + if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): + console.print("[yellow]User chose to skip based on IDs.[/yellow]") + return None, None, None, None, None, None, None, None, None if description: bbcode = BBCODE() diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 8377c8545..253591966 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -106,8 +106,9 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): for movie in response['Movies']: if len(movie['Torrents']) >= 1: for torrent in movie['Torrents']: + # First, try matching in filelist > path for file in torrent['FileList']: - if file['Path'] == filename: + if file.get('Path') == filename: imdb_id = movie['ImdbId'] ptp_torrent_id = torrent['Id'] dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) @@ -118,6 +119,19 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") return imdb_id, ptp_torrent_id, ptp_torrent_hash + + # If no match in filelist > path, check directly in filepath + if torrent.get('FilePath') == filename: + imdb_id = movie['ImdbId'] + ptp_torrent_id = torrent['Id'] + dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) + console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + + # Call get_torrent_info and print the results + tinfo = await self.get_torrent_info(imdb_id, meta) + console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") + + return imdb_id, ptp_torrent_id, ptp_torrent_hash console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None From 8ff7f94c1b020561888e0aa27dcf874df1fba06c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 22:48:40 +1000 Subject: [PATCH 144/741] Skip auto ID handling if manual tracker ID is set --- src/prep.py | 55 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index f44918770..33686b61f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -389,35 +389,60 @@ async def gather_prep(self, meta, mode): found_match = False if search_term: - # console.print(f"[blue]Starting search with search_term: {search_term}[/blue]") - default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") - - if "PTP" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") + # Check if specific trackers are already set in meta + specific_tracker = None + if meta.get('ptp'): + specific_tracker = 'PTP' + elif meta.get('hdb'): + specific_tracker = 'HDB' + elif meta.get('blu'): + specific_tracker = 'BLU' + + # If a specific tracker is found, only process that one + if specific_tracker: + console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") + + if specific_tracker == 'PTP' and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) if match: found_match = True - # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") - if "BLU" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") + elif specific_tracker == 'BLU' and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) if match: found_match = True - # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") - if "HDB" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") + elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) if match: found_match = True - # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") + else: + # Process all trackers if no specific tracker is set in meta + default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + + if "PTP" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + ptp = PTP(config=self.config) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + if match: + found_match = True + + if "BLU" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + blu = BLU(config=self.config) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + if match: + found_match = True + + if "HDB" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + hdb = HDB(config=self.config) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + if match: + found_match = True if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") From 07432deb01854caa032a43cc611c529a778a7ee0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Sep 2024 22:53:41 +1000 Subject: [PATCH 145/741] Add linter back to master --- .github/workflows/lint.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5527ec6b5..85d13a19a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,9 +4,11 @@ on: push: branches: - develop + - master pull_request: branches: - master + - develop workflow_dispatch: jobs: From dcdc6280afc95e161b9b734c0dfd48291ebc18d2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 09:26:14 +1000 Subject: [PATCH 146/741] Lint cleaning --- data/example-config.py | 12 ++-- src/trackers/SHRI.py | 121 ++++++++++++++++++++--------------------- upload.py | 12 ++-- 3 files changed, 72 insertions(+), 73 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index db8ef0c7b..dfbb6a011 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -225,13 +225,13 @@ "passkey": "HDB passkey", "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", "anon": False, - }, - "SHRI" :{ - "api_key" : "SHRI api key", - "announce_url" : "https://shareisland.org/announce/customannounceurl", + }, + "SHRI": { + "api_key": "SHRI api key", + "announce_url": "https://shareisland.org/announce/customannounceurl", # "anon" : "False" - }, - "MANUAL" : { + }, + "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" }, diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 47fe6d614..689bfc62e 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -3,7 +3,6 @@ import asyncio import requests from str2bool import str2bool -import os import platform from src.trackers.COMMON import COMMON @@ -20,7 +19,7 @@ class SHRI(): """ ############################################################### - ######## EDIT ME ######## + ######## EDIT ME ######## # noqa #E266 ############################################################### # ALSO EDIT CLASS NAME ABOVE @@ -31,46 +30,46 @@ def __init__(self, config): self.source_flag = 'Shareisland' self.upload_url = 'https://shareisland.org/api/torrents/upload' self.search_url = 'https://shareisland.org/api/torrents/filter' - self.signature = f"\n[center][url=https://shareisland.org]Created by SHRI Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://shareisland.org]Created by SHRI Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '26', + 'DISC': '26', 'REMUX': '7', - 'WEBDL': '27', - 'WEBRIP': '27', + 'WEBDL': '27', + 'WEBRIP': '27', 'HDTV': '6', 'ENCODE': '15' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa #E266 ############################################################### async def upload(self, meta): @@ -82,12 +81,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,34 +96,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -136,18 +135,18 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -155,12 +154,12 @@ async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -172,8 +171,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes diff --git a/upload.py b/upload.py index 446e9b1c0..5f5fde7a4 100644 --- a/upload.py +++ b/upload.py @@ -242,17 +242,17 @@ async def do_the_thing(base_dir): trackers = [s.strip().upper() for s in trackers] if meta.get('manual', False): trackers.insert(0, "MANUAL") - + #################################### - ####### Upload to Trackers ####### + ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP','AL', 'HDB', 'SHRI'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'LST', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { - 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, - 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} + 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, + 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, + 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} for tracker in trackers: if meta['name'].endswith('DUPE?'): From 4aa0d19eb1a7e029a8d7735c5dfc4413678909b4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 09:27:56 +1000 Subject: [PATCH 147/741] Lint --- src/trackers/HDB.py | 2 +- src/trackers/PTP.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index cbe0b8975..7e205a29c 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -514,7 +514,7 @@ async def search_filename(self, search_term, search_file_folder, meta): url = "https://hdbits.org/api/torrents" # Handle disc case - if search_file_folder == 'folder' and meta.get('is_disc'): + if search_file_folder == 'folder' and meta.get('is_disc'): bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') bd_summary = None diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 253591966..79b479f90 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -96,12 +96,12 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) console.print(f"[green]Searching PTP for: [bold yellow]{filename}[/bold yellow]") - + try: if response.status_code == 200: response = response.json() # console.print(f"[blue]Raw API Response: {response}[/blue]") - + if int(response['TotalResults']) >= 1: for movie in response['Movies']: if len(movie['Torrents']) >= 1: @@ -113,43 +113,43 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): ptp_torrent_id = torrent['Id'] dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') - + # Call get_torrent_info and print the results tinfo = await self.get_torrent_info(imdb_id, meta) console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") - + return imdb_id, ptp_torrent_id, ptp_torrent_hash - + # If no match in filelist > path, check directly in filepath if torrent.get('FilePath') == filename: imdb_id = movie['ImdbId'] ptp_torrent_id = torrent['Id'] dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') - + # Call get_torrent_info and print the results tinfo = await self.get_torrent_info(imdb_id, meta) console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") - + return imdb_id, ptp_torrent_id, ptp_torrent_hash console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None - + elif response.status_code in [400, 401, 403]: console.print(f"[bold red]PTP: {response.text}") return None, None, None - + elif response.status_code == 503: console.print("[bold yellow]PTP Unavailable (503)") return None, None, None else: return None, None, None - + except Exception as e: console.print(f'[red]An error occurred: {str(e)}[/red]') - + console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None From 018d5b05cd0091859da375cd491a3025b1a8aaef Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 16:29:21 +1000 Subject: [PATCH 148/741] Allow ID searching with description and image return for Aither and LST --- src/args.py | 28 ++++++++ src/prep.py | 145 ++++++++++++++++++++--------------------- src/trackers/COMMON.py | 29 +++++---- 3 files changed, 118 insertions(+), 84 deletions(-) diff --git a/src/args.py b/src/args.py index ac8796f8b..6c9a1e9e5 100644 --- a/src/args.py +++ b/src/args.py @@ -44,6 +44,8 @@ def parse(self, args, meta): parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) + parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) + parser.add_argument('-lst', '--lst', nargs='*', required=False, help="LST torrent id/link", type=str) parser.add_argument('-hdb', '--hdb', nargs='*', required=False, help="HDB torrent id/link", type=str) parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") @@ -136,6 +138,32 @@ def parse(self, args, meta): console.print('[red]Continuing without --blu') else: meta['blu'] = value2 + elif key == 'aither': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + aitherpath = parsed.path + if aitherpath.endswith('/'): + aitherpath = aitherpath[:-1] + meta['aither'] = aitherpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --aither') + else: + meta['aither'] = value2 + elif key == 'lst': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + lstpath = parsed.path + if lstpath.endswith('/'): + lstpath = lstpath[:-1] + meta['lst'] = lstpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --lst') + else: + meta['lst'] = value2 elif key == 'hdb': if value2.startswith('http'): parsed = urllib.parse.urlparse(value2) diff --git a/src/prep.py b/src/prep.py index 33686b61f..133d60db9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4,6 +4,8 @@ from src.exceptions import * # noqa: F403 from src.trackers.PTP import PTP from src.trackers.BLU import BLU +from src.trackers.AITHER import AITHER +from src.trackers.LST import LST from src.trackers.HDB import HDB from src.trackers.COMMON import COMMON @@ -65,101 +67,83 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] - async def prompt_user_for_id_selection(self, blu_tmdb=None, blu_imdb=None, blu_tvdb=None, blu_filename=None, imdb=None): + async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name="BLU"): if imdb: imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}") - if blu_tmdb or blu_imdb or blu_tvdb: - if blu_imdb: - blu_imdb = str(blu_imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print("[cyan]Found the following IDs on BLU:") - console.print(f"TMDb ID: {blu_tmdb}") - console.print(f"IMDb ID: https://www.imdb.com/title/tt{blu_imdb}") - console.print(f"TVDb ID: {blu_tvdb}") - console.print(f"Filename: {blu_filename}") - - selection = input("Do you want to use this ID? (y/n): ").strip().lower() + console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") + if tmdb or imdb or tvdb: + if imdb: + imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print(f"[cyan]Found the following IDs on {tracker_name}:") + console.print(f"TMDb ID: {tmdb}") + console.print(f"IMDb ID: https://www.imdb.com/title/tt{imdb}") + console.print(f"TVDb ID: {tvdb}") + console.print(f"Filename: {filename}") # Ensure filename is printed if available + + selection = input(f"Do you want to use these IDs from {tracker_name}? (y/n): ").strip().lower() return selection == 'y' async def prompt_user_for_confirmation(self, message): - selection = input(f"{message} (y/n): ").strip().lower() - return selection == 'y' + response = input(f"{message} (Y/n): ").strip().lower() + if response == '' or response == 'y': + return True + return False + + async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): + # Unpack the expected 9 elements, ignoring any additional ones + tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data + + if tmdb not in [None, '0']: + meta['tmdb_manual'] = tmdb + if imdb not in [None, '0']: + meta['imdb'] = str(imdb).zfill(7) + if tvdb not in [None, '0']: + meta['tvdb_id'] = tvdb + if mal not in [None, '0']: + meta['mal'] = mal + if desc not in [None, '0', '']: + meta[f'{tracker_name.lower()}_desc'] = desc + if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if imagelist: # Ensure imagelist is not empty before setting + meta['image_list'] = imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if filename: + meta[f'{tracker_name.lower()}_filename'] = filename + + console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): tracker_key = tracker_name.lower() manual_key = f"{tracker_key}_manual" found_match = False - if tracker_name == "BLU": + if tracker_name in ["BLU", "AITHER", "LST", ]: # Example for UNIT3D trackers if meta.get(tracker_key) is not None: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") - blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( - "BLU", + tracker_data = await COMMON(self.config).unit3d_torrent_info( + tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, id=meta[tracker_key] ) - if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: - console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb).zfill(7) # Pad IMDb ID with leading zeros - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if blu_imagelist: # Ensure blu_imagelist is not empty before setting - meta['image_list'] = blu_imagelist - if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) - if blu_filename: - meta['blu_filename'] = blu_filename # Store the filename in meta for later use - found_match = True - console.print("[green]BLU data successfully updated in meta[/green]") - else: - console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") else: - console.print("[yellow]No ID found in meta for BLU, searching by file name[/yellow]") - blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( - "BLU", + console.print(f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]") + tracker_data = await COMMON(self.config).unit3d_torrent_info( + tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, file_name=search_term ) - if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: - console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values[/green]") - - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb).zfill(7) - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() - if not meta.get('image_list'): # Only handle images if image_list is not already populated - if blu_imagelist: # Ensure blu_imagelist is not empty before setting - meta['image_list'] = blu_imagelist - if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) - if blu_filename: - meta['blu_filename'] = blu_filename - - found_match = True - console.print("[green]BLU data successfully updated in meta[/green]") - else: - console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + if any(item not in [None, '0'] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb + console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") + await self.update_meta_with_unit3d_data(meta, tracker_data, tracker_name) + found_match = True + else: + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") elif tracker_name == "PTP": imdb_id = None # Ensure imdb_id is defined @@ -247,7 +231,6 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met else: found_match = False - # console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") return meta, found_match async def handle_image_list(self, meta, tracker_name): @@ -397,6 +380,10 @@ async def gather_prep(self, meta, mode): specific_tracker = 'HDB' elif meta.get('blu'): specific_tracker = 'BLU' + elif meta.get('aither'): + specific_tracker = 'AITHER' + elif meta.get('lst'): + specific_tracker = 'LST' # If a specific tracker is found, only process that one if specific_tracker: @@ -414,6 +401,18 @@ async def gather_prep(self, meta, mode): if match: found_match = True + elif specific_tracker == 'AITHER' and str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + aither = AITHER(config=self.config) + meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) + if match: + found_match = True + + elif specific_tracker == 'LST' and str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + lst = LST(config=self.config) + meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) + if match: + found_match = True + elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 89b4022e6..9e7ebea1d 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -142,20 +142,27 @@ async def unit3d_distributor_ids(self, distributor): }.get(distributor, 0) return distributor_id - async def prompt_user_for_id_selection(self, blu_tmdb=None, blu_imdb=None, blu_tvdb=None, blu_filename=None, imdb=None): + async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name=None): + if not tracker_name: + tracker_name = "Tracker" # Fallback if tracker_name is not provided + if imdb: imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") - if blu_tmdb or blu_imdb or blu_tvdb: - if blu_imdb: - blu_imdb = str(blu_imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print("[cyan]Found the following IDs on BLU:") - console.print(f"TMDb ID: {blu_tmdb}") - console.print(f"IMDb ID: https://www.imdb.com/title/tt{blu_imdb}") - console.print(f"TVDb ID: {blu_tvdb}") - console.print(f"Filename: {blu_filename}") # Ensure filename is printed if available - - selection = input("Do you want to use this ID? (y/n): ").strip().lower() + + if any([tmdb, imdb, tvdb]): + console.print(f"[cyan]Found the following IDs on {tracker_name}:") + if tmdb: + console.print(f"TMDb ID: {tmdb}") + if imdb: + console.print(f"IMDb ID: https://www.imdb.com/title/tt{imdb}") + if tvdb: + console.print(f"TVDb ID: {tvdb}") + + if filename: + console.print(f"Filename: {filename}") # Ensure filename is printed if available + + selection = input(f"Do you want to use these IDs from {tracker_name}? (y/n): ").strip().lower() return selection == 'y' async def prompt_user_for_confirmation(self, message): From 541736c6a33a4df73b3303e2a31a998bda31a88a Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 16:31:43 +1000 Subject: [PATCH 149/741] Linter and docker --- .github/workflows/docker-image.yml | 2 +- .github/workflows/lint.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 557a516bb..f33827acd 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - ID-and-Description + - unit3d-searching env: REGISTRY: ghcr.io diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 85d13a19a..4a0cc1873 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -5,6 +5,7 @@ on: branches: - develop - master + - unit3d-searching pull_request: branches: - master From daef3e2d35472a0843ffa9073f4924c3ac0684a7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 17:55:45 +1000 Subject: [PATCH 150/741] Capture another bot --- src/bbcode.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bbcode.py b/src/bbcode.py index 6ae031d1a..63dd21d35 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -229,7 +229,8 @@ def clean_unit3d_description(self, desc, site): Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\]\s Auto\sUploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]| \[center\]\s*\[b\]Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] - \sAuto\sUploader\[\/b\]\s*\[\/center\] + \sAuto\sUploader\[\/b\]\s*\[\/center\]| + \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\] """ desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) From 9934f640934823a33ba16c12864262932aaf63b0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 17:57:41 +1000 Subject: [PATCH 151/741] Remove extra lint process --- .github/workflows/lint.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4a0cc1873..85d13a19a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -5,7 +5,6 @@ on: branches: - develop - master - - unit3d-searching pull_request: branches: - master From 58cf5ef91fcfec8a9468f468cf839d5d641b1e1b Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 18:06:54 +1000 Subject: [PATCH 152/741] Capture the bot image --- src/bbcode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bbcode.py b/src/bbcode.py index 63dd21d35..6509d594e 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -201,6 +201,7 @@ def clean_unit3d_description(self, desc, site): # Filter out bot images from imagelist bot_image_urls = [ "https://blutopia.xyz/favicon.ico", # Example bot image URL + "https://i.ibb.co/2NVWb0c/uploadrr.webp", # Add any other known bot image URLs here ] imagelist = [img for img in imagelist if img['img_url'] not in bot_image_urls] From b4dc4aaac8dc7b350edf5796ec782cf13f01c00e Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 19:48:41 +1000 Subject: [PATCH 153/741] Fix PTP ID and description handling --- src/prep.py | 68 +++++++++++++++++++++------------------------ src/trackers/HDB.py | 4 +-- 2 files changed, 34 insertions(+), 38 deletions(-) diff --git a/src/prep.py b/src/prep.py index 133d60db9..2941336dd 100644 --- a/src/prep.py +++ b/src/prep.py @@ -67,22 +67,6 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] - async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name="BLU"): - if imdb: - imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") - if tmdb or imdb or tvdb: - if imdb: - imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print(f"[cyan]Found the following IDs on {tracker_name}:") - console.print(f"TMDb ID: {tmdb}") - console.print(f"IMDb ID: https://www.imdb.com/title/tt{imdb}") - console.print(f"TVDb ID: {tvdb}") - console.print(f"Filename: {filename}") # Ensure filename is printed if available - - selection = input(f"Do you want to use these IDs from {tracker_name}? (y/n): ").strip().lower() - return selection == 'y' - async def prompt_user_for_confirmation(self, message): response = input(f"{message} (Y/n): ").strip().lower() if response == '' or response == 'y': @@ -94,7 +78,7 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data if tmdb not in [None, '0']: - meta['tmdb_manual'] = tmdb + meta['tmdb'] = tmdb if imdb not in [None, '0']: meta['imdb'] = str(imdb).zfill(7) if tvdb not in [None, '0']: @@ -120,7 +104,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met manual_key = f"{tracker_key}_manual" found_match = False - if tracker_name in ["BLU", "AITHER", "LST", ]: # Example for UNIT3D trackers + if tracker_name in ["BLU", "AITHER", "LST"]: # Example for UNIT3D trackers if meta.get(tracker_key) is not None: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") tracker_data = await COMMON(self.config).unit3d_torrent_info( @@ -155,10 +139,31 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['ptp'] = ptp_torrent_id meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None - if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): - console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): + meta['skip_gen_desc'] = True found_match = True + # Retrieve PTP description and image list + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta.get('is_disc', False)) + meta['description'] = ptp_desc + if not meta.get('image_list'): # Only handle images if image_list is not already populated + meta['image_list'] = ptp_imagelist + if meta.get('image_list'): + await self.handle_image_list(meta, tracker_name) + meta['skip_gen_desc'] = True + console.print("[green]PTP images added to metadata.[/green]") + + else: + found_match = False + meta['skip_gen_desc'] = True + meta['description'] = None + + else: + console.print("[yellow]Skipping PTP as no match found[/yellow]") + found_match = True + meta['skip_gen_desc'] = True + meta['description'] = None else: ptp_torrent_id = meta['ptp'] console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") @@ -171,22 +176,13 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) - if ptp_desc.strip(): - meta['description'] = ptp_desc - if not meta.get('image_list'): # Only handle images if image_list is not already populated - meta['image_list'] = ptp_imagelist - if meta.get('image_list'): - await self.handle_image_list(meta, tracker_name) - meta['skip_gen_desc'] = True - console.print("[green]PTP description and images added to metadata.[/green]") - - if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): - meta['skip_gen_desc'] = True - found_match = True - else: - console.print("[yellow]Description discarded from PTP[/yellow]") - meta['skip_gen_desc'] = True - meta['description'] = None + meta['description'] = ptp_desc + if not meta.get('image_list'): # Only handle images if image_list is not already populated + meta['image_list'] = ptp_imagelist + if meta.get('image_list'): + await self.handle_image_list(meta, tracker_name) + meta['skip_gen_desc'] = True + console.print("[green]PTP images added to metadata.[/green]") console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") meta['skip_gen_desc'] = True diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 7e205a29c..b07750e23 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -514,7 +514,7 @@ async def search_filename(self, search_term, search_file_folder, meta): url = "https://hdbits.org/api/torrents" # Handle disc case - if search_file_folder == 'folder' and meta.get('is_disc'): + if search_file_folder == 'folder' and meta.get('is_disc'): bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') bd_summary = None @@ -551,7 +551,7 @@ async def search_filename(self, search_term, search_file_folder, meta): "file_in_torrent": os.path.basename(search_term) } console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") - console.print(f"[yellow]Using this data: {data}") + # console.print(f"[yellow]Using this data: {data}") response = requests.get(url, json=data) From b500448058b631505798b25042e225717dfe47ae Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 19:54:27 +1000 Subject: [PATCH 154/741] Remove false console output --- src/prep.py | 3 --- src/trackers/HDB.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 2941336dd..00ae7eae2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -184,9 +184,6 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['skip_gen_desc'] = True console.print("[green]PTP images added to metadata.[/green]") - console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") - meta['skip_gen_desc'] = True - elif tracker_name == "HDB": if meta.get('hdb') is not None: meta[manual_key] = meta[tracker_key] diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index b07750e23..9a4777a94 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -514,7 +514,7 @@ async def search_filename(self, search_term, search_file_folder, meta): url = "https://hdbits.org/api/torrents" # Handle disc case - if search_file_folder == 'folder' and meta.get('is_disc'): + if search_file_folder == 'folder' and meta.get('is_disc'): bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') bd_summary = None From 8f83d9ecc90ee6d4d15720ba5dfa30568788e739 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 21:59:38 +1000 Subject: [PATCH 155/741] Existing description image checking Will try to async the process better --- src/prep.py | 72 ++++++++++++++++++++++++++++++++++++++++----- src/trackers/PTP.py | 2 +- 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/src/prep.py b/src/prep.py index 00ae7eae2..9e6d36fd1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -44,6 +44,9 @@ import cli_ui from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn import platform + import aiohttp + from PIL import Image + import io except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -73,6 +76,33 @@ async def prompt_user_for_confirmation(self, message): return True return False + async def check_image_link(self, url): + async with aiohttp.ClientSession() as session: + try: + async with session.get(url) as response: + if response.status == 200: + content_type = response.headers.get('Content-Type', '').lower() + if 'image' in content_type: + # Attempt to load the image + image_data = await response.read() + try: + image = Image.open(io.BytesIO(image_data)) + image.verify() # This will check if the image is broken + console.print(f"[green]Image verified successfully: {url}[/green]") + return True + except (IOError, SyntaxError) as e: + console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") + return False + else: + console.print(f"[red]Content type is not an image: {url}[/red]") + return False + else: + console.print(f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]") + return False + except Exception as e: + console.print(f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]") + return False + async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): # Unpack the expected 9 elements, ignoring any additional ones tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data @@ -89,11 +119,21 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): meta[f'{tracker_name.lower()}_desc'] = desc if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated if imagelist: # Ensure imagelist is not empty before setting - meta['image_list'] = imagelist - if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) + valid_images = [] + for image_dict in imagelist: + img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url + if img_url and await self.check_image_link(img_url): + valid_images.append(image_dict) + else: + console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + if valid_images: + meta['image_list'] = valid_images + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if filename: meta[f'{tracker_name.lower()}_filename'] = filename @@ -147,10 +187,19 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta.get('is_disc', False)) meta['description'] = ptp_desc + if not meta.get('image_list'): # Only handle images if image_list is not already populated - meta['image_list'] = ptp_imagelist - if meta.get('image_list'): + valid_images = [] + for image_dict in ptp_imagelist: + img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url + if img_url and await self.check_image_link(img_url): + valid_images.append(image_dict) + else: + console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + if valid_images: + meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) + meta['skip_gen_desc'] = True console.print("[green]PTP images added to metadata.[/green]") @@ -177,10 +226,19 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) meta['description'] = ptp_desc + if not meta.get('image_list'): # Only handle images if image_list is not already populated - meta['image_list'] = ptp_imagelist - if meta.get('image_list'): + valid_images = [] + for image_dict in ptp_imagelist: + img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url + if img_url and await self.check_image_link(img_url): + valid_images.append(image_dict) + else: + console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + if valid_images: + meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) + meta['skip_gen_desc'] = True console.print("[green]PTP images added to metadata.[/green]") diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 79b479f90..66d5d30c7 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -211,7 +211,7 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): # Allow user to edit or discard the description console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") if edit_choice.lower() == 'e': edited_description = click.edit(desc) From b8d09e8da8c3819cdf8391bec5b92066b34d9dea Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 22:21:38 +1000 Subject: [PATCH 156/741] Proper async image checking --- src/prep.py | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/src/prep.py b/src/prep.py index 9e6d36fd1..026980396 100644 --- a/src/prep.py +++ b/src/prep.py @@ -76,6 +76,19 @@ async def prompt_user_for_confirmation(self, message): return True return False + async def check_images_concurrently(self, imagelist): + async def check_and_collect(image_dict): + img_url = image_dict.get('img_url') or image_dict.get('raw_url') + if img_url and await self.check_image_link(img_url): + return image_dict + else: + console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + return None + + tasks = [check_and_collect(image_dict) for image_dict in imagelist] + results = await asyncio.gather(*tasks) + return [image for image in results if image is not None] + async def check_image_link(self, url): async with aiohttp.ClientSession() as session: try: @@ -90,7 +103,7 @@ async def check_image_link(self, url): image.verify() # This will check if the image is broken console.print(f"[green]Image verified successfully: {url}[/green]") return True - except (IOError, SyntaxError) as e: + except (IOError, SyntaxError) as e: # noqa #F841 console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") return False else: @@ -122,13 +135,7 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): if not meta.get('image_list'): # Only handle images if image_list is not already populated if imagelist: # Ensure imagelist is not empty before setting - valid_images = [] - for image_dict in imagelist: - img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url - if img_url and await self.check_image_link(img_url): - valid_images.append(image_dict) - else: - console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + valid_images = await self.check_images_concurrently(imagelist) if valid_images: meta['image_list'] = valid_images if meta.get('image_list'): # Double-check if image_list is set before handling it @@ -189,13 +196,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = ptp_desc if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = [] - for image_dict in ptp_imagelist: - img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url - if img_url and await self.check_image_link(img_url): - valid_images.append(image_dict) - else: - console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + valid_images = await self.check_images_concurrently(ptp_imagelist) if valid_images: meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) @@ -228,13 +229,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = ptp_desc if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = [] - for image_dict in ptp_imagelist: - img_url = image_dict.get('img_url') or image_dict.get('raw_url') # Use img_url or raw_url - if img_url and await self.check_image_link(img_url): - valid_images.append(image_dict) - else: - console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") + valid_images = await self.check_images_concurrently(ptp_imagelist) if valid_images: meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) From 48db0e77bade5a0dbdaf3bac9f581d0c4ce88940 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 22:29:10 +1000 Subject: [PATCH 157/741] Set found_match correctly --- src/prep.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 026980396..5c7eabc20 100644 --- a/src/prep.py +++ b/src/prep.py @@ -175,6 +175,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met found_match = True else: console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + found_match = False elif tracker_name == "PTP": imdb_id = None # Ensure imdb_id is defined @@ -211,7 +212,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met else: console.print("[yellow]Skipping PTP as no match found[/yellow]") - found_match = True + found_match = False meta['skip_gen_desc'] = True meta['description'] = None else: @@ -223,6 +224,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") + found_match = False # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) From ae7746b1331e3adef3eae03a09f4dd6828321170 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Sep 2024 22:35:05 +1000 Subject: [PATCH 158/741] UNIT3D - enter means yes --- src/trackers/COMMON.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 9e7ebea1d..a9fb5f536 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -162,12 +162,17 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, fi if filename: console.print(f"Filename: {filename}") # Ensure filename is printed if available - selection = input(f"Do you want to use these IDs from {tracker_name}? (y/n): ").strip().lower() - return selection == 'y' + selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() + if selection == '' or selection == 'y' or selection == 'yes': + return True + else: + return False async def prompt_user_for_confirmation(self, message): - selection = input(f"{message} (y/n): ").strip().lower() - return selection == 'y' + response = input(f"{message} (Y/n): ").strip().lower() + if response == '' or response == 'y': + return True + return False async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 From 8d13a144d45ad8c0d2b4ce2c77263fac4d798f13 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Sep 2024 14:11:49 +1000 Subject: [PATCH 159/741] Add warning if keeping images not compatible with MTV Couldn't yet figure out how to have MTV.py create screens for uploading. --- src/prep.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 5c7eabc20..1a0dc2e91 100644 --- a/src/prep.py +++ b/src/prep.py @@ -286,12 +286,21 @@ async def handle_image_list(self, meta, tracker_name): console.print(f"[cyan]Found the following images from {tracker_name}:") for img in meta['image_list']: console.print(f"[blue]{img}[/blue]") + + approved_image_hosts = ['ptpimg', 'imgbox'] + + # Check if the images are already hosted on an approved image host + if all(any(host in img for host in approved_image_hosts) for img in meta['image_list']): + image_list = meta['image_list'] # noqa #F841 + else: + console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") + keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") if not keep_images: meta['image_list'] = [] - console.print(f"[yellow]Images discarded from {tracker_name}") + console.print(f"[yellow]Images discarded from {tracker_name}.") else: - console.print(f"[green]Images retained from {tracker_name}") + console.print(f"[green]Images retained from {tracker_name}.") async def gather_prep(self, meta, mode): meta['mode'] = mode From ed47d5dc6cefda918c231ed6dad79ea737b4d5a3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Sep 2024 15:02:12 +1000 Subject: [PATCH 160/741] Fix imgbox uploads Fixes https://github.com/Audionut/Upload-Assistant/issues/29 --- src/prep.py | 273 +++++++++++++++++++++++++++++----------------------- 1 file changed, 151 insertions(+), 122 deletions(-) diff --git a/src/prep.py b/src/prep.py index 1a0dc2e91..468799ca4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2460,6 +2460,8 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): Upload Screenshots """ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False): + import nest_asyncio + nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] # Use the correctly updated image host from meta @@ -2475,145 +2477,172 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i image_glob.remove('POSTER.png') existing_images = meta.get('image_list', []) - # Only skip uploading if retry_mode is False and the hosts match if len(existing_images) >= total_screens and not retry_mode and img_host == initial_img_host: console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens - # Initialize the progress bar outside of the retry loop - with Progress( - TextColumn("[bold green]Uploading Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - while True: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) + if img_host == "imgbox": + # Handle Imgbox uploads without the main progress bar + console.print("[green]Uploading Screens to Imgbox...") + image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) + if not image_list: + console.print("[yellow]Imgbox failed, trying next image host") + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + else: + return image_list, i # Return after successful Imgbox upload + else: + with Progress( + TextColumn("[bold green]Uploading Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + while True: + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - for image in image_glob[-screens:]: - try: - timeout = 60 - if img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - break - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") + for image in image_glob[-screens:]: + try: + timeout = 60 + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]PT Screens failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + break + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = response['data']['url_viewer'] + else: + console.print(f"[red]Unsupported image host: {img_host}") break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - else: - console.print(f"[red]Unsupported image host: {img_host}") - break - # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') + # Update progress bar and print the result on the same line + progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') - # Add the image details to the list - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - image_list.append(image_dict) - progress.advance(upload_task) - i += 1 + # Add the image details to the list + image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_list.append(image_dict) + progress.advance(upload_task) + i += 1 - except Exception as e: - console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") - break + except Exception as e: + console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") + break - time.sleep(0.5) + time.sleep(0.5) - if i >= total_screens: - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") - return image_list, i + if i >= total_screens: + return_dict['image_list'] = image_list + console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return image_list, i - # If we broke out of the loop due to a failure, switch to the next host and retry - img_host_num += 1 - if img_host_num > len(self.config['DEFAULT']) - 1: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i # Or you could raise an exception if preferred + # If we broke out of the loop due to a failure, switch to the next host and retry + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i - img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] + # Ensure that if all attempts fail, a valid tuple is returned + return image_list, i async def imgbox_upload(self, chdir, image_glob): os.chdir(chdir) image_list = [] - # image_glob = glob.glob("*.png") - async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: - async for submission in gallery.add(image_glob): - if not submission['success']: - console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") - return [] - else: - image_dict = {} - image_dict['web_url'] = submission['web_url'] - image_dict['img_url'] = submission['thumbnail_url'] - image_dict['raw_url'] = submission['image_url'] - image_list.append(image_dict) + + # Initialize the progress bar + with Progress( + TextColumn("[bold green]Uploading Screens to Imgbox..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + upload_task = progress.add_task("Uploading...", total=len(image_glob)) + + async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: + async for submission in gallery.add(image_glob): + if not submission['success']: + console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") + return [] + else: + image_dict = {} + image_dict['web_url'] = submission['web_url'] + image_dict['img_url'] = submission['thumbnail_url'] + image_dict['raw_url'] = submission['image_url'] + image_list.append(image_dict) + + # Update the progress bar + progress.advance(upload_task) + return image_list async def get_name(self, meta): From 4352cb3072f54ffde2760455748bcb9889e75920 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Sep 2024 22:44:31 +1000 Subject: [PATCH 161/741] Requirements update --- requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c63a8c332..241b76116 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,6 @@ rich Jinja2 pyotp str2bool -click \ No newline at end of file +click +aiohttp +Pillow \ No newline at end of file From 7034c4ea71a82d7ca326a651787e1a7d984286a7 Mon Sep 17 00:00:00 2001 From: LostRager Date: Wed, 4 Sep 2024 22:35:24 +0200 Subject: [PATCH 162/741] Add modq and drafts to LST --- data/example-config.py | 4 +++- src/args.py | 3 ++- src/trackers/LST.py | 10 ++++++++++ upload.py | 27 +++++++++++++++++++++++++-- 4 files changed, 40 insertions(+), 4 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index dfbb6a011..da8cf8463 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -151,7 +151,9 @@ "LST": { "api_key": "LST api key", "announce_url": "https://lst.gg/announce/customannounceurl", - # "anon" : False + # "anon" : False, + # "modq" : False, + # "draft" : False }, "LT": { "api_key": "LT api key", diff --git a/src/args.py b/src/args.py index 6c9a1e9e5..58bf929c4 100644 --- a/src/args.py +++ b/src/args.py @@ -69,7 +69,8 @@ def parse(self, args, meta): parser.add_argument('-nh', '--nohash', action='store_true', required=False, help="Don't hash .torrent") parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") parser.add_argument('-ps', '--piece-size-max', dest='piece_size_max', nargs='*', required=False, help="Maximum piece size in MiB", choices=[1, 2, 4, 8, 16], type=int) - parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD)") + parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD, LST)") + parser.add_argument('-mq', '--modq', action='store_true', required=False, help="Send to modQ") parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64', '128']) parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") diff --git a/src/trackers/LST.py b/src/trackers/LST.py index ff56ad739..553caadd7 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -75,6 +75,8 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') + draft = await self.get_flag(meta, 'draft') await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) @@ -119,6 +121,8 @@ async def upload(self, meta): 'free': 0, 'doubleup': 0, 'sticky': 0, + 'mod_queue_opt_in': modq, + 'draft_queue_opt_in': draft, } # Internal @@ -152,6 +156,12 @@ async def upload(self, meta): console.print(data) open_torrent.close() + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag: + return 1 + return 1 if meta.get(flag_name, False) else 0 + async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") diff --git a/upload.py b/upload.py index 5f5fde7a4..3851c5e27 100644 --- a/upload.py +++ b/upload.py @@ -169,7 +169,7 @@ async def do_the_thing(base_dir): for key, value in saved_meta.items(): overwrite_list = [ 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' ] if meta.get(key, None) != value and key in overwrite_list: @@ -247,7 +247,7 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'LST', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, @@ -346,6 +346,29 @@ async def do_the_thing(base_dir): await bhd.upload(meta) await client.add_to_client(meta, "BHD") + if tracker == "LST": + lst = LST(config=config) + modq, draft = await asyncio.gather(lst.get_flag(meta, 'modq'), lst.get_flag(meta, 'draft')) + + modq = 'Yes' if modq else 'No' + draft = 'Yes' if draft else 'No' + + upload_to_lst = meta['unattended'] or cli_ui.ask_yes_no(f"Upload to LST? (draft: {draft}) (modq: {modq}) {debug}", default=meta['unattended']) + if not upload_to_lst: + return + + console.print("Uploading to LST") + + if check_banned_group('LST', lst.banned_groups, meta): + return + + dupes = await lst.search_existing(meta) + dupes = await common.filter_dupes(dupes, meta) + meta = dupe_check(dupes, meta) + if meta['upload']: + await lst.upload(meta) + await client.add_to_client(meta, lst.tracker) + if tracker == "THR": if meta['unattended']: upload_to_thr = True From f55284d9cbecfcf1831abb7f7b62580dc24fbf5e Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Sep 2024 09:29:25 +1000 Subject: [PATCH 163/741] Nonestype string error on subs Should resolve https://github.com/Audionut/Upload-Assistant/issues/31 --- src/trackers/AITHER.py | 55 +++++++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 7436862a7..a329cd582 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -104,29 +104,50 @@ async def upload(self, meta): async def edit_name(self, meta): aither_name = meta['name'] - has_eng_audio = False + + # Helper function to check if English audio is present + def has_english_audio(tracks, is_bdmv=False): + for track in tracks: + if is_bdmv and track.get('language') == 'English': + return True + if not is_bdmv and track['@type'] == "Audio": + # Ensure Language is not None and is a string before checking startswith + if isinstance(track.get('Language'), str) and track.get('Language').startswith('en'): + return True + return False + + # Helper function to get audio language + def get_audio_lang(tracks, is_bdmv=False): + if is_bdmv: + return tracks[0].get('language', '').upper() if tracks else "" + return tracks[2].get('Language_String', '').upper() if len(tracks) > 2 else "" + if meta['is_disc'] != "BDMV": - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + try: + with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + audio_tracks = mi['media']['track'] + has_eng_audio = has_english_audio(audio_tracks) + if not has_eng_audio: + audio_lang = get_audio_lang(audio_tracks) + if audio_lang: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError, IndexError) as e: + print(f"Error processing MediaInfo: {e}") - for track in mi['media']['track']: - if track['@type'] == "Audio": - if track.get('Language', 'None').startswith('en'): - has_eng_audio = True - if not has_eng_audio: - audio_lang = mi['media']['track'][2].get('Language_String', "").upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) else: - for audio in meta['bdinfo']['audio']: - if audio['language'] == 'English': - has_eng_audio = True + bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) + has_eng_audio = has_english_audio(bdinfo_audio, is_bdmv=True) if not has_eng_audio: - audio_lang = meta['bdinfo']['audio'][0]['language'].upper() - if audio_lang != "": + audio_lang = get_audio_lang(bdinfo_audio, is_bdmv=True) + if audio_lang: aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': + + # Handle TV show episode title inclusion + if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() and meta['episode'].strip(): aither_name = aither_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + return aither_name async def get_cat_id(self, category_name): From 97eafcd2eada43a17c38f1ab8bb40d89204c9f29 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Sep 2024 09:30:19 +1000 Subject: [PATCH 164/741] Lint --- src/trackers/AITHER.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index a329cd582..18304a23c 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -104,7 +104,7 @@ async def upload(self, meta): async def edit_name(self, meta): aither_name = meta['name'] - + # Helper function to check if English audio is present def has_english_audio(tracks, is_bdmv=False): for track in tracks: @@ -115,7 +115,7 @@ def has_english_audio(tracks, is_bdmv=False): if isinstance(track.get('Language'), str) and track.get('Language').startswith('en'): return True return False - + # Helper function to get audio language def get_audio_lang(tracks, is_bdmv=False): if is_bdmv: @@ -126,7 +126,7 @@ def get_audio_lang(tracks, is_bdmv=False): try: with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) - + audio_tracks = mi['media']['track'] has_eng_audio = has_english_audio(audio_tracks) if not has_eng_audio: From 7fb6b1ba70fb466a701bec7d1552e0e791621173 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 07:51:59 +1000 Subject: [PATCH 165/741] Lint --- upload.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/upload.py b/upload.py index 3851c5e27..f84719679 100644 --- a/upload.py +++ b/upload.py @@ -349,19 +349,19 @@ async def do_the_thing(base_dir): if tracker == "LST": lst = LST(config=config) modq, draft = await asyncio.gather(lst.get_flag(meta, 'modq'), lst.get_flag(meta, 'draft')) - + modq = 'Yes' if modq else 'No' draft = 'Yes' if draft else 'No' - + upload_to_lst = meta['unattended'] or cli_ui.ask_yes_no(f"Upload to LST? (draft: {draft}) (modq: {modq}) {debug}", default=meta['unattended']) if not upload_to_lst: return - + console.print("Uploading to LST") if check_banned_group('LST', lst.banned_groups, meta): return - + dupes = await lst.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) From 78cd997329c75c6291f3cb39cd39e58414e79dec Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 15:41:26 +1000 Subject: [PATCH 166/741] Continue if LST is skipped by user --- upload.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index f84719679..e6630fc4a 100644 --- a/upload.py +++ b/upload.py @@ -355,12 +355,12 @@ async def do_the_thing(base_dir): upload_to_lst = meta['unattended'] or cli_ui.ask_yes_no(f"Upload to LST? (draft: {draft}) (modq: {modq}) {debug}", default=meta['unattended']) if not upload_to_lst: - return + continue console.print("Uploading to LST") if check_banned_group('LST', lst.banned_groups, meta): - return + continue dupes = await lst.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) From 7cec387151502326335df662e0dc34ab7fdedde8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 16:24:10 +1000 Subject: [PATCH 167/741] RTF - save returned API token to config --- src/trackers/RTF.py | 52 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 232cbda2c..07b78d7e8 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -5,6 +5,7 @@ import base64 import re import datetime +import httpx from src.trackers.COMMON import COMMON from src.console import console @@ -121,7 +122,7 @@ async def search_existing(self, meta): return dupes - # tests if API key valid site API key expires every week so a new one has to be generated. + # Tests if stored API key is valid. Site API key expires every week so a new one has to be generated. async def api_test(self, meta): headers = { 'accept': 'application/json', @@ -146,12 +147,45 @@ async def generate_new_api(self, meta): 'password': self.config['TRACKERS'][self.tracker]['password'], } - response = requests.post('https://retroflix.club/api/login', headers=headers, json=json_data) + base_dir = meta.get('base_dir', '.') + config_path = f"{base_dir}/data/config.py" - if response.status_code == 201: - console.print('[bold green]Using New API key generated for this upload') - console.print('[bold green]Please update your L4G config with the below RTF API Key for future uploads') - console.print(f'[bold yellow]{response.json()["token"]}') - self.config['TRACKERS'][self.tracker]['api_key'] = response.json()["token"] - else: - console.print(f'[bold red]Error getting new API key got error code {response.status_code}, Please check username and password in config') + try: + async with httpx.AsyncClient() as client: + response = await client.post('https://retroflix.club/api/login', headers=headers, json=json_data) + + if response.status_code == 201: + token = response.json().get("token") + if token: + console.print('[bold green]Saving and using New API key generated for this upload') + console.print(f'[bold yellow]{token}') + + # Update the in-memory config dictionary + self.config['TRACKERS'][self.tracker]['api_key'] = token + + # Now we update the config file on disk using utf-8 encoding + with open(config_path, 'r', encoding='utf-8') as file: + config_data = file.read() + + # Find the RTF tracker and replace the api_key value + new_config_data = re.sub( + r'("RTF":\s*{[^}]*"api_key":\s*\')[^\']*(\'[^\}]*})', # Match the api_key content only between single quotes + rf'\1{token}\2', # Replace only the content inside the quotes without adding extra backslashes + config_data + ) + + # Write the updated config back to the file + with open(config_path, 'w', encoding='utf-8') as file: + file.write(new_config_data) + + console.print(f'[bold green]API Key successfully saved to {config_path}') + else: + console.print('[bold red]API response does not contain a token.') + else: + console.print(f'[bold red]Error getting new API key: {response.status_code}, please check username and password in the config.') + + except httpx.RequestError as e: + console.print(f'[bold red]An error occurred while requesting the API: {str(e)}') + + except Exception as e: + console.print(f'[bold red]An unexpected error occurred: {str(e)}') From c46988c1ad7553beefdd1618588b773b164267f3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 16:48:00 +1000 Subject: [PATCH 168/741] Force python version and start graceful user shutdown --- src/prep.py | 14 +++++++++----- upload.py | 18 ++++++++---------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index 468799ca4..87248664a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -70,11 +70,15 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] - async def prompt_user_for_confirmation(self, message): - response = input(f"{message} (Y/n): ").strip().lower() - if response == '' or response == 'y': - return True - return False + async def prompt_user_for_confirmation(self, message: str) -> bool: + try: + response = input(f"{message} (Y/n): ").strip().lower() + if response in ["y", "yes", ""]: + return True + return False + except EOFError: + console.print("[bold red]Input was interrupted.") + return False async def check_images_concurrently(self, imagelist): async def check_and_collect(image_dict): diff --git a/upload.py b/upload.py index e6630fc4a..5121aaf52 100644 --- a/upload.py +++ b/upload.py @@ -602,13 +602,11 @@ def get_missing(meta): if __name__ == '__main__': pyver = platform.python_version_tuple() - if int(pyver[0]) != 3: - console.print("[bold red]Python2 Detected, please use python3") - exit() - else: - if int(pyver[1]) <= 6: - console.print("[bold red]Python <= 3.6 Detected, please use Python >=3.7") - loop = asyncio.get_event_loop() - loop.run_until_complete(do_the_thing(base_dir)) - else: - asyncio.run(do_the_thing(base_dir)) + if int(pyver[0]) != 3 or int(pyver[1]) < 12: + console.print("[bold red]Python version is too low. Please use Python 3.12 or higher.") + sys.exit(1) + + try: + asyncio.run(do_the_thing(base_dir)) # Pass the correct base_dir value here + except (KeyboardInterrupt, SystemExit): + console.print("[bold red]Program interrupted. Exiting.") From efb36b34d73982ace2ac43bf66f635a3883c631d Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 17:40:03 +1000 Subject: [PATCH 169/741] Skip auto search if imagelist is populated If the imagelist is populated, search and/or screenshot taking has already been done. Delete meta.json in tmp directory to force search --- src/prep.py | 133 +++++++++++++++++++++++++++------------------------- 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/src/prep.py b/src/prep.py index 87248664a..c860718c8 100644 --- a/src/prep.py +++ b/src/prep.py @@ -429,87 +429,90 @@ async def gather_prep(self, meta, mode): # Debugging information after population # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") - # Reuse information from trackers with fallback - found_match = False - - if search_term: - # Check if specific trackers are already set in meta - specific_tracker = None - if meta.get('ptp'): - specific_tracker = 'PTP' - elif meta.get('hdb'): - specific_tracker = 'HDB' - elif meta.get('blu'): - specific_tracker = 'BLU' - elif meta.get('aither'): - specific_tracker = 'AITHER' - elif meta.get('lst'): - specific_tracker = 'LST' - - # If a specific tracker is found, only process that one - if specific_tracker: - console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") - - if specific_tracker == 'PTP' and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'BLU' and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'AITHER' and str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": - aither = AITHER(config=self.config) - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'LST' and str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": - lst = LST(config=self.config) - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - if match: - found_match = True - else: - # Process all trackers if no specific tracker is set in meta - default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") - - if "PTP" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + if not meta.get('image_list'): + # Reuse information from trackers with fallback + found_match = False + + if search_term: + # Check if specific trackers are already set in meta + specific_tracker = None + if meta.get('ptp'): + specific_tracker = 'PTP' + elif meta.get('hdb'): + specific_tracker = 'HDB' + elif meta.get('blu'): + specific_tracker = 'BLU' + elif meta.get('aither'): + specific_tracker = 'AITHER' + elif meta.get('lst'): + specific_tracker = 'LST' + + # If a specific tracker is found, only process that one + if specific_tracker: + console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") + + if specific_tracker == 'PTP' and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) if match: found_match = True - if "BLU" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'BLU' and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) if match: found_match = True - if "HDB" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'AITHER' and str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + aither = AITHER(config=self.config) + meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) + if match: + found_match = True + + elif specific_tracker == 'LST' and str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + lst = LST(config=self.config) + meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) + if match: + found_match = True + + elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) if match: found_match = True - - if not found_match: - console.print("[yellow]No matches found on any trackers.[/yellow]") + else: + # Process all trackers if no specific tracker is set in meta + default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + + if "PTP" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + ptp = PTP(config=self.config) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + if match: + found_match = True + + if "BLU" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + blu = BLU(config=self.config) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + if match: + found_match = True + + if "HDB" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + hdb = HDB(config=self.config) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + if match: + found_match = True + + if not found_match: + console.print("[yellow]No matches found on any trackers.[/yellow]") + else: + console.print(f"[green]Match found: {found_match}[/green]") else: - console.print(f"[green]Match found: {found_match}[/green]") + console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") else: - console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") + console.print("Skipping existing search as meta already populated") # Take Screenshots if meta['is_disc'] == "BDMV": From d0c11c1ea753065a9c11c61c0c6ff398c7460531 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 17:53:43 +1000 Subject: [PATCH 170/741] Fix HDB dual upload call Missed that a change was reverted in https://github.com/Audionut/Upload-Assistant/pull/25/files --- upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upload.py b/upload.py index 5121aaf52..3abab7f25 100644 --- a/upload.py +++ b/upload.py @@ -248,7 +248,7 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] + http_trackers = ['TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, From 67402b52e36f5dad68d15898fec9366572dd40b8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 19:27:40 +1000 Subject: [PATCH 171/741] Refactor upload process Update modq/draft support. Will close https://github.com/Audionut/Upload-Assistant/issues/34 --- .github/workflows/docker-image.yml | 2 +- data/example-config.py | 6 +- src/trackers/AITHER.py | 63 ++++++++++++----- src/trackers/BLU.py | 8 +++ upload.py | 105 +++++++++++++++-------------- 5 files changed, 114 insertions(+), 70 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index f33827acd..7e839bcb7 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - unit3d-searching + - upload-refactor env: REGISTRY: ghcr.io diff --git a/data/example-config.py b/data/example-config.py index da8cf8463..e0d222cba 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -44,7 +44,8 @@ "useAPI": False, # Set to True if using BLU "api_key": "BLU api key", "announce_url": "https://blutopia.cc/announce/customannounceurl", - # "anon" : False + # "anon" : False, + # "modq" : False }, "BHD": { "api_key": "BHD api key", @@ -71,7 +72,8 @@ "AITHER": { "api_key": "AITHER api key", "announce_url": "https://aither.cc/announce/customannounceurl", - # "anon" : False + # "anon" : False, + # "modq" : False }, "R4E": { "api_key": "R4E api key", diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 7436862a7..600a58472 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -37,6 +37,7 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') name = await self.edit_name(meta) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 @@ -74,6 +75,7 @@ async def upload(self, meta): 'free': 0, 'doubleup': 0, 'sticky': 0, + 'mod_queue_opt_in': modq, } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' @@ -102,31 +104,58 @@ async def upload(self, meta): console.print(data) open_torrent.close() + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag: + return 1 + return 1 if meta.get(flag_name, False) else 0 + async def edit_name(self, meta): aither_name = meta['name'] - has_eng_audio = False + + # Helper function to check if English audio is present + def has_english_audio(tracks, is_bdmv=False): + for track in tracks: + if is_bdmv and track.get('language') == 'English': + return True + if not is_bdmv and track['@type'] == "Audio": + # Ensure Language is not None and is a string before checking startswith + if isinstance(track.get('Language'), str) and track.get('Language').startswith('en'): + return True + return False + + # Helper function to get audio language + def get_audio_lang(tracks, is_bdmv=False): + if is_bdmv: + return tracks[0].get('language', '').upper() if tracks else "" + return tracks[2].get('Language_String', '').upper() if len(tracks) > 2 else "" + if meta['is_disc'] != "BDMV": - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + try: + with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + audio_tracks = mi['media']['track'] + has_eng_audio = has_english_audio(audio_tracks) + if not has_eng_audio: + audio_lang = get_audio_lang(audio_tracks) + if audio_lang: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError, IndexError) as e: + print(f"Error processing MediaInfo: {e}") - for track in mi['media']['track']: - if track['@type'] == "Audio": - if track.get('Language', 'None').startswith('en'): - has_eng_audio = True - if not has_eng_audio: - audio_lang = mi['media']['track'][2].get('Language_String', "").upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) else: - for audio in meta['bdinfo']['audio']: - if audio['language'] == 'English': - has_eng_audio = True + bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) + has_eng_audio = has_english_audio(bdinfo_audio, is_bdmv=True) if not has_eng_audio: - audio_lang = meta['bdinfo']['audio'][0]['language'].upper() - if audio_lang != "": + audio_lang = get_audio_lang(bdinfo_audio, is_bdmv=True) + if audio_lang: aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': + + # Handle TV show episode title inclusion + if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() and meta['episode'].strip(): aither_name = aither_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + return aither_name async def get_cat_id(self, category_name): diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 54509c9b7..ba543788f 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -46,6 +46,7 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: @@ -85,6 +86,7 @@ async def upload(self, meta): 'free': 0, 'doubleup': 0, 'sticky': 0, + 'mod_queue_opt_in': modq, } # Internal if self.config['TRACKERS'][self.tracker].get('internal', False) is True: @@ -118,6 +120,12 @@ async def upload(self, meta): console.print(data) open_torrent.close() + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag: + return 1 + return 1 if meta.get(flag_name, False) else 0 + async def get_cat_id(self, category_name, edition): category_id = { 'MOVIE': '1', diff --git a/upload.py b/upload.py index 3abab7f25..ce50538cb 100644 --- a/upload.py +++ b/upload.py @@ -247,17 +247,46 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI', 'LST', 'BHD'] http_trackers = ['TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} + tracker_capabilities = { + 'LST': {'mod_q': False, 'draft': True}, + 'BLU': {'mod_q': True, 'draft': False}, + 'AITHER': {'mod_q': True, 'draft': False}, + 'BHD': {'draft_live': True}, + } + + async def check_mod_q_and_draft(tracker_class, meta, debug): + modq, draft = None, None + + tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) + + # Handle BHD specific draft/live logic + if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): + draft_int = await tracker_class.get_live(meta) + draft = "Draft" if draft_int == 0 else "Live" + + # Handle mod_q and draft for other trackers + else: + if tracker_caps.get('mod_q'): + modq = await tracker_class.get_flag(meta, 'modq') + modq = 'Yes' if modq else 'No' + if tracker_caps.get('draft'): + draft = await tracker_class.get_flag(meta, 'draft') + draft = 'Yes' if draft else 'No' + + return modq, draft + for tracker in trackers: + tracker = tracker.replace(" ", "").upper().strip() if meta['name'].endswith('DUPE?'): meta['name'] = meta['name'].replace(' DUPE?', '') - tracker = tracker.replace(" ", "").upper().strip() + if meta['debug']: debug = "(DEBUG)" else: @@ -265,21 +294,42 @@ async def do_the_thing(base_dir): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) + + # Confirm upload if meta['unattended']: upload_to_tracker = True else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + if upload_to_tracker: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + console.print(f"Uploading to {tracker_class.tracker}") + + # Check if the group is banned for the tracker if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue + + # Perform the existing checks for dupes if tracker == "RTF": await tracker_class.api_test(meta) + dupes = await tracker_class.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) - # note BHDTV does not have search implemented. meta = dupe_check(dupes, meta) - if meta['upload'] is True: + + # Proceed with upload if the meta is set to upload + if meta['upload']: await tracker_class.upload(meta) if tracker == 'SN': await asyncio.sleep(16) @@ -324,51 +374,6 @@ async def do_the_thing(base_dir): console.print(f"[green]{meta['name']}") console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") - if tracker == "BHD": - bhd = BHD(config=config) - draft_int = await bhd.get_live(meta) - if draft_int == 0: - draft = "Draft" - else: - draft = "Live" - if meta['unattended']: - upload_to_bhd = True - else: - upload_to_bhd = cli_ui.ask_yes_no(f"Upload to BHD? ({draft}) {debug}", default=meta['unattended']) - if upload_to_bhd: - console.print("Uploading to BHD") - if check_banned_group("BHD", bhd.banned_groups, meta): - continue - dupes = await bhd.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] is True: - await bhd.upload(meta) - await client.add_to_client(meta, "BHD") - - if tracker == "LST": - lst = LST(config=config) - modq, draft = await asyncio.gather(lst.get_flag(meta, 'modq'), lst.get_flag(meta, 'draft')) - - modq = 'Yes' if modq else 'No' - draft = 'Yes' if draft else 'No' - - upload_to_lst = meta['unattended'] or cli_ui.ask_yes_no(f"Upload to LST? (draft: {draft}) (modq: {modq}) {debug}", default=meta['unattended']) - if not upload_to_lst: - continue - - console.print("Uploading to LST") - - if check_banned_group('LST', lst.banned_groups, meta): - continue - - dupes = await lst.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload']: - await lst.upload(meta) - await client.add_to_client(meta, lst.tracker) - if tracker == "THR": if meta['unattended']: upload_to_thr = True From c25caf7ce56bcdcb2fdeb4b3c0516e982a76a485 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 19:29:48 +1000 Subject: [PATCH 172/741] Lint --- upload.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/upload.py b/upload.py index ce50538cb..2b9094de3 100644 --- a/upload.py +++ b/upload.py @@ -265,12 +265,12 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): modq, draft = None, None tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) - + # Handle BHD specific draft/live logic if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): draft_int = await tracker_class.get_live(meta) draft = "Draft" if draft_int == 0 else "Live" - + # Handle mod_q and draft for other trackers else: if tracker_caps.get('mod_q'): @@ -307,7 +307,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if upload_to_tracker: # Get mod_q, draft, or draft/live depending on the tracker modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug) - + # Print mod_q and draft info if relevant if modq is not None: console.print(f"(modq: {modq})") @@ -315,11 +315,11 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): console.print(f"(draft: {draft})") console.print(f"Uploading to {tracker_class.tracker}") - + # Check if the group is banned for the tracker if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue - + # Perform the existing checks for dupes if tracker == "RTF": await tracker_class.api_test(meta) @@ -327,7 +327,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): dupes = await tracker_class.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - + # Proceed with upload if the meta is set to upload if meta['upload']: await tracker_class.upload(meta) From 6b16cdcead7da37ea9b21a8aebbf578a2327a323 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Sep 2024 19:42:44 +1000 Subject: [PATCH 173/741] LST does have both options --- upload.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index 2b9094de3..d0f222d02 100644 --- a/upload.py +++ b/upload.py @@ -255,7 +255,7 @@ async def do_the_thing(base_dir): 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} tracker_capabilities = { - 'LST': {'mod_q': False, 'draft': True}, + 'LST': {'mod_q': True, 'draft': True}, 'BLU': {'mod_q': True, 'draft': False}, 'AITHER': {'mod_q': True, 'draft': False}, 'BHD': {'draft_live': True}, @@ -300,7 +300,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): upload_to_tracker = True else: upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", + f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended'] ) From 34c81458fc719da1b0319625c6227aedf77b65d5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Sep 2024 18:04:30 +1000 Subject: [PATCH 174/741] Handle edge case description PTP --- src/bbcode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bbcode.py b/src/bbcode.py index 6509d594e..755cea4ca 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -123,7 +123,7 @@ def clean_ptp_description(self, desc, is_disc): desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) # Extract loose images and add to imagelist as dictionaries - loose_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) if loose_images: for img_url in loose_images: image_dict = { From 04bd843850dd59c68614fe867612cc677de1ff5e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Sep 2024 20:48:38 +1000 Subject: [PATCH 175/741] Slight adjustment to get_flag --- src/trackers/AITHER.py | 5 +++-- src/trackers/BLU.py | 5 +++-- src/trackers/LST.py | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 600a58472..0dc0c05ab 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -106,8 +106,9 @@ async def upload(self, meta): async def get_flag(self, meta, flag_name): config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag: - return 1 + if config_flag is not None: + return 1 if config_flag else 0 + return 1 if meta.get(flag_name, False) else 0 async def edit_name(self, meta): diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index ba543788f..9af559cf5 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -122,8 +122,9 @@ async def upload(self, meta): async def get_flag(self, meta, flag_name): config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag: - return 1 + if config_flag is not None: + return 1 if config_flag else 0 + return 1 if meta.get(flag_name, False) else 0 async def get_cat_id(self, category_name, edition): diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 553caadd7..10f667251 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -158,8 +158,9 @@ async def upload(self, meta): async def get_flag(self, meta, flag_name): config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) - if config_flag: - return 1 + if config_flag is not None: + return 1 if config_flag else 0 + return 1 if meta.get(flag_name, False) else 0 async def search_existing(self, meta): From eb8d2bdbf81f490ddd2a314dda6fbd048fb9c674 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Sep 2024 21:23:29 +1000 Subject: [PATCH 176/741] More graceful exiting --- upload.py | 56 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/upload.py b/upload.py index d0f222d02..9a3339e08 100644 --- a/upload.py +++ b/upload.py @@ -295,14 +295,16 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) - # Confirm upload if meta['unattended']: upload_to_tracker = True else: - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) + try: + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately if upload_to_tracker: # Get mod_q, draft, or draft/live depending on the tracker @@ -337,10 +339,18 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) + if meta['unattended']: upload_to_tracker = True else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) + try: + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately + if upload_to_tracker: console.print(f"Uploading to {tracker}") if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): @@ -378,7 +388,13 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if meta['unattended']: upload_to_thr = True else: - upload_to_thr = cli_ui.ask_yes_no(f"Upload to THR? {debug}", default=meta['unattended']) + try: + upload_to_ptp = cli_ui.ask_yes_no( + f"Upload to THR? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately if upload_to_thr: console.print("Uploading to THR") # nable to get IMDB id/Youtube Link @@ -407,8 +423,15 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if meta['unattended']: upload_to_ptp = True else: - upload_to_ptp = cli_ui.ask_yes_no(f"Upload to {tracker}? {debug}", default=meta['unattended']) - if upload_to_ptp: + try: + upload_to_ptp = cli_ui.ask_yes_no( + f"Upload to {tracker}? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately + + if upload_to_ptp: # Ensure the variable is defined before this check console.print(f"Uploading to {tracker}") if meta.get('imdb_id', '0') == '0': imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") @@ -445,7 +468,14 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if meta['unattended']: upload_to_tracker = True else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) + try: + upload_to_ptp = cli_ui.ask_yes_no( + f"Upload to {tracker}? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately + if upload_to_tracker: console.print(f"Uploading to {tracker_class.tracker}") if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): @@ -509,7 +539,11 @@ def get_confirmation(meta): cli_ui.info_section(cli_ui.yellow, "Is this correct?") cli_ui.info(f"Name: {meta['name']}") - confirm = cli_ui.ask_yes_no("Correct?", default=False) + try: + confirm = cli_ui.ask_yes_no("Correct?", default=False) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately + else: cli_ui.info(f"Name: {meta['name']}") confirm = True From b9302bb6f49c38dbeea18dc48f39f12c4e767935 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Sep 2024 22:21:42 +1000 Subject: [PATCH 177/741] More graceful existing Fix some bbcode rendering in unit3d trackers Hopefully fix MTV warning for wrong host urls --- src/bbcode.py | 14 ++++++++++---- src/prep.py | 6 +++--- src/trackers/COMMON.py | 25 ++++++++++++++++--------- 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 755cea4ca..983469c4e 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -198,6 +198,12 @@ def clean_unit3d_description(self, desc, site): # Remove the [img] tag and its contents from the description desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) + # Now, remove matching URLs from [URL] tags + for img in imagelist: + img_url = re.escape(img['img_url']) + desc = re.sub(rf"\[URL={img_url}\]\[/URL\]", '', desc, flags=re.IGNORECASE) + desc = re.sub(rf"\[URL={img_url}\]\[img[^\]]*\]{img_url}\[/img\]\[/URL\]", '', desc, flags=re.IGNORECASE) + # Filter out bot images from imagelist bot_image_urls = [ "https://blutopia.xyz/favicon.ico", # Example bot image URL @@ -236,10 +242,10 @@ def clean_unit3d_description(self, desc, site): desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) - # Ensure no dangling tags and remove extra blank lines - desc = re.sub(r'\n\s*\n', '\n', desc) # Remove multiple consecutive blank lines - desc = re.sub(r'\n\n+', '\n\n', desc) # Ensure no excessive blank lines - desc = desc.strip() # Final cleanup of trailing newlines and spaces + # Remove leftover [img] or [URL] tags in the description + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[URL=[\s\S]*?\]\[\/URL\]", "", desc, flags=re.IGNORECASE) # Strip trailing whitespace and newlines: desc = desc.rstrip() diff --git a/src/prep.py b/src/prep.py index c860718c8..f93e12faa 100644 --- a/src/prep.py +++ b/src/prep.py @@ -47,6 +47,7 @@ import aiohttp from PIL import Image import io + import sys except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -77,8 +78,7 @@ async def prompt_user_for_confirmation(self, message: str) -> bool: return True return False except EOFError: - console.print("[bold red]Input was interrupted.") - return False + sys.exit(1) async def check_images_concurrently(self, imagelist): async def check_and_collect(image_dict): @@ -294,7 +294,7 @@ async def handle_image_list(self, meta, tracker_name): approved_image_hosts = ['ptpimg', 'imgbox'] # Check if the images are already hosted on an approved image host - if all(any(host in img for host in approved_image_hosts) for img in meta['image_list']): + if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): image_list = meta['image_list'] # noqa #F841 else: console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a9fb5f536..b9a9270d9 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -4,6 +4,7 @@ import re import json import click +import sys from src.bbcode import BBCODE from src.console import console @@ -163,10 +164,13 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, fi console.print(f"Filename: {filename}") # Ensure filename is printed if available selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() - if selection == '' or selection == 'y' or selection == 'yes': - return True - else: - return False + try: + if selection == '' or selection == 'y' or selection == 'yes': + return True + else: + return False + except (KeyboardInterrupt, EOFError): + sys.exit(1) async def prompt_user_for_confirmation(self, message): response = input(f"{message} (Y/n): ").strip().lower() @@ -246,15 +250,18 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f if tmdb or imdb or tvdb: if not id: # Only prompt the user for ID selection if not searching by ID - if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): - console.print("[yellow]User chose to skip based on IDs.[/yellow]") - return None, None, None, None, None, None, None, None, None + try: + if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): + console.print("[yellow]User chose to skip based on IDs.[/yellow]") + return None, None, None, None, None, None, None, None, None + except (KeyboardInterrupt, EOFError): + sys.exit(1) if description: bbcode = BBCODE() description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) console.print(f"[green]Successfully grabbed description from {tracker}") - console.print(f"[blue]Extracted description: [yellow]{description}") + console.print(f"[blue]Extracted description: [yellow]{description}", markup=False) # Allow user to edit or discard the description console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") @@ -264,7 +271,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f edited_description = click.edit(description) if edited_description: description = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {description}") + console.print(f"[green]Final description after editing:[/green] {description}", markup=False) elif edit_choice.lower() == 'd': description = None console.print("[yellow]Description discarded.[/yellow]") From 53727f36fc8743cfce7f78a2b5287fa1eb8c04c6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 00:21:32 +1000 Subject: [PATCH 178/741] Add OE filename/description parsing --- src/args.py | 14 ++++++++++++++ src/prep.py | 19 ++++++++++++++----- src/trackers/AITHER.py | 1 + src/trackers/COMMON.py | 3 ++- src/trackers/LST.py | 1 + src/trackers/OE.py | 1 + 6 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/args.py b/src/args.py index 58bf929c4..988c22715 100644 --- a/src/args.py +++ b/src/args.py @@ -46,6 +46,7 @@ def parse(self, args, meta): parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) parser.add_argument('-lst', '--lst', nargs='*', required=False, help="LST torrent id/link", type=str) + parser.add_argument('-oe', '--oe', nargs='*', required=False, help="OE torrent id/link", type=str) parser.add_argument('-hdb', '--hdb', nargs='*', required=False, help="HDB torrent id/link", type=str) parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") @@ -165,6 +166,19 @@ def parse(self, args, meta): console.print('[red]Continuing without --lst') else: meta['lst'] = value2 + elif key == 'oe': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + oepath = parsed.path + if oepath.endswith('/'): + oepath = oepath[:-1] + meta['oe'] = oepath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --oe') + else: + meta['oe'] = value2 elif key == 'hdb': if value2.startswith('http'): parsed = urllib.parse.urlparse(value2) diff --git a/src/prep.py b/src/prep.py index f93e12faa..b09f485ef 100644 --- a/src/prep.py +++ b/src/prep.py @@ -6,6 +6,7 @@ from src.trackers.BLU import BLU from src.trackers.AITHER import AITHER from src.trackers.LST import LST +from src.trackers.OE import OE from src.trackers.HDB import HDB from src.trackers.COMMON import COMMON @@ -155,7 +156,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met manual_key = f"{tracker_key}_manual" found_match = False - if tracker_name in ["BLU", "AITHER", "LST"]: # Example for UNIT3D trackers + if tracker_name in ["BLU", "AITHER", "LST", "OE"]: if meta.get(tracker_key) is not None: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") tracker_data = await COMMON(self.config).unit3d_torrent_info( @@ -446,6 +447,8 @@ async def gather_prep(self, meta, mode): specific_tracker = 'AITHER' elif meta.get('lst'): specific_tracker = 'LST' + elif meta.get('oe'): + specific_tracker = 'OE' # If a specific tracker is found, only process that one if specific_tracker: @@ -475,6 +478,12 @@ async def gather_prep(self, meta, mode): if match: found_match = True + elif specific_tracker == 'OE' and str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": + oe = OE(config=self.config) + meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) + if match: + found_match = True + elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) @@ -1324,8 +1333,8 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non .global_args('-loglevel', loglevel) .run(quiet=debug) ) - except Exception: - console.print(traceback.format_exc()) + except (KeyboardInterrupt, Exception): + sys.exit(1) self.optimize_images(image_path) if os.path.getsize(Path(image_path)) <= 75000: @@ -1394,8 +1403,8 @@ def optimize_images(self, image): oxipng.optimize(image, level=6) else: oxipng.optimize(image, level=3) - except Exception: - pass + except (KeyboardInterrupt, Exception): + sys.exit(1) return """ diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 0dc0c05ab..7da76fe81 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -24,6 +24,7 @@ def __init__(self, config): self.source_flag = 'Aither' self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' + self.torrent_url = 'https://aither.cc/api/torrents/' self.signature = "\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index b9a9270d9..19cc873ce 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -197,11 +197,12 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f return None, None, None, None, None, None, None, None, None response = requests.get(url=url, params=params) + # console.print(f"[blue]Raw API Response: {response}[/blue]") try: json_response = response.json() - # console.print(f"[blue]Raw API Response: {json_response}[/blue]") + # console.print(f"[blue]Raw API Response: {json_response}[/blue]", markup=False) except ValueError: return None, None, None, None, None, None, None, None, None diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 10f667251..7278f7ed4 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -24,6 +24,7 @@ def __init__(self, config): self.source_flag = 'LST.GG' self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' + self.torrent_url = 'https://lst.gg/api/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', diff --git a/src/trackers/OE.py b/src/trackers/OE.py index a1b4d3156..8cb819518 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -23,6 +23,7 @@ def __init__(self, config): self.source_flag = 'OE' self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' + self.torrent_url = 'https://onlyencodes.cc/api/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass From 9c38e5300e09265c5adc95573a79a58614537cdc Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 00:36:26 +1000 Subject: [PATCH 179/741] Only print MTV warning if MTV selected --- src/prep.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index b09f485ef..bc0dabd6c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -298,7 +298,10 @@ async def handle_image_list(self, meta, tracker_name): if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): image_list = meta['image_list'] # noqa #F841 else: - console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") + default_trackers = self.config['TRACKERS'].get('default_trackers', '') + trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] + if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', ''): + console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") if not keep_images: From edfe4ee97ebe76692f7fd45767a8446d02f545e6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 09:12:51 +1000 Subject: [PATCH 180/741] Bump docker python base --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 61a00d426..743714237 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.11 +FROM python:3.12 # Update the package list and install system dependencies including mono RUN apt-get update && \ From 349d1b14783d6addf94b4b6ea37b7349ad2cc08a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 09:41:09 +1000 Subject: [PATCH 181/741] Fix docker bt_backup folder checking Thanks nx --- src/clients.py | 57 +++++++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/src/clients.py b/src/clients.py index 5972c8cd4..1b97da004 100644 --- a/src/clients.py +++ b/src/clients.py @@ -111,11 +111,15 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) if meta['debug']: - console.log(f"[DEBUG] Torrent path after normalization: {torrent_path}") + console.log(f"Torrent path after normalization: {torrent_path}") # Check if torrent file exists if os.path.exists(torrent_path): - torrent = Torrent.read(torrent_path) + try: + torrent = Torrent.read(torrent_path) + except Exception as e: + console.print(f'[bold red]Error reading torrent file: {e}') + return valid, torrent_path # Reuse if disc and basename matches or --keep-folder was specified if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): @@ -123,7 +127,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client if os.path.basename(meta['path']) in torrent_filepath: valid = True if meta['debug']: - console.log(f"[DEBUG] Torrent is valid based on disc/basename or keep-folder: {valid}") + console.log(f"Torrent is valid based on disc/basename or keep-folder: {valid}") # If one file, check for folder if len(torrent.files) == len(meta['filelist']) == 1: @@ -133,7 +137,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client else: wrong_file = True if meta['debug']: - console.log(f"[DEBUG] Single file match status: valid={valid}, wrong_file={wrong_file}") + console.log(f"Single file match status: valid={valid}, wrong_file={wrong_file}") # Check if number of files matches number of videos elif len(torrent.files) == len(meta['filelist']): @@ -142,17 +146,16 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client local_path, remote_path = await self.remote_path_map(meta) if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): - actual_filepath = torrent_path.replace(local_path, remote_path) - actual_filepath = torrent_path.replace(os.sep, '/') + actual_filepath = actual_filepath.replace(local_path, remote_path).replace(os.sep, '/') if meta['debug']: - console.log(f"[DEBUG] torrent_filepath: {torrent_filepath}") - console.log(f"[DEBUG] actual_filepath: {actual_filepath}") + console.log(f"Torrent_filepath: {torrent_filepath}") + console.log(f"Actual_filepath: {actual_filepath}") if torrent_filepath in actual_filepath: valid = True if meta['debug']: - console.log(f"[DEBUG] Multiple file match status: valid={valid}") + console.log(f"Multiple file match status: valid={valid}") else: console.print(f'[bold yellow]{torrent_path} was not found') @@ -160,23 +163,29 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): - reuse_torrent = Torrent.read(torrent_path) - if meta['debug']: - console.log(f"[DEBUG] Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") - - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): - err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" - valid = False - elif reuse_torrent.piece_size < 32768: - err_print = "[bold yellow]Piece size too small to reuse" - valid = False - elif wrong_file: - err_print = "[bold red] Provided .torrent has files that were not expected" + try: + reuse_torrent = Torrent.read(torrent_path) + if meta['debug']: + console.log(f"Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") + + # Piece size and count validations + if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): + err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" + valid = False + elif reuse_torrent.piece_size < 32768: + err_print = "[bold yellow]Piece size too small to reuse" + valid = False + elif wrong_file: + err_print = "[bold red] Provided .torrent has files that were not expected" + valid = False + else: + err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + except Exception as e: + console.print(f'[bold red]Error checking reuse torrent: {e}') valid = False - else: - err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + if meta['debug']: - console.log(f"[DEBUG] Final validity after piece checks: valid={valid}") + console.log(f"Final validity after piece checks: valid={valid}") else: err_print = '[bold yellow]Unwanted Files/Folders Identified' From ea1420998bcd8ba49c5f6a8575486fbfa25fb705 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 10:59:56 +1000 Subject: [PATCH 182/741] Cleanup --- README.md | 8 +++++--- data/example-config.py | 8 ++++---- upload.py | 39 ++++++++++----------------------------- 3 files changed, 19 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 86f2a571f..1a1b9b00f 100644 --- a/README.md +++ b/README.md @@ -8,14 +8,15 @@ A simple tool to take the work out of uploading. - Generates and Parses MediaInfo/BDInfo. - Generates and Uploads screenshots. - Uses srrdb to fix scene filenames - - Can grab descriptions from PTP (automatically on filename match or arg) / BLU (arg) + - Can grab descriptions from PTP/BLU (automatically on filename match or arg) / Aither/LST/OE (with arg) + - Can strip existing screenshots from descriptions to skip screenshot generation and uploading - Obtains TMDb/IMDb/MAL identifiers. - Converts absolute to season episode numbering for Anime - Generates custom .torrents without useless top level folders/nfos. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs @@ -33,7 +34,7 @@ Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-n - Also needs MediaInfo and ffmpeg installed on your system - On Windows systems, ffmpeg must be added to PATH (https://windowsloop.com/install-ffmpeg-windows-10/) - On linux systems, get it from your favorite package manager - - Clone the repo to your system `git clone https://github.com/Audionut/Upload-Assistant.git` + - Clone the repo to your system `git clone https://github.com/Audionut/Upload-Assistant.git` - or download a zip of the source - Copy and Rename `data/example-config.py` to `data/config.py` - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)) - tmdb_api (v3) key can be obtained from https://developers.themoviedb.org/3/getting-started/introduction @@ -50,6 +51,7 @@ Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-n - To update first navigate into the Upload-Assistant directory: `cd Upload-Assistant` - Run a `git pull` to grab latest updates - Run `python3 -m pip install --user -U -r requirements.txt` to ensure dependencies are up to date + - Or download a fresh zip and overwrite existing files ## **CLI Usage:** `python3 upload.py /downloads/path/to/content --args` diff --git a/data/example-config.py b/data/example-config.py index e0d222cba..ce739e71c 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -37,7 +37,7 @@ "TRACKERS": { # Which trackers do you want to upload to? # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB - # Remove the ones not used to save being asked everytime + # Remove the trackers from the default_trackers list that are not used, to save being asked everytime "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB", "BLU": { @@ -241,9 +241,9 @@ }, }, - # enable_search to true will automatically try and find a suitable hash to save having to rehash when creating torrents + # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes - # If you find issue, use the "--debug" command option to print out some related details + # If you find issue, use the "--debug" argument to print out some related details "TORRENT_CLIENTS": { # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above # All options relate to the webui, make sure you have the webui secured if it has WAN access @@ -255,7 +255,7 @@ "qbit_port": "8080", "qbit_user": "username", "qbit_pass": "password", - # "torrent_storage_dir": "path/to/BT_backup folder" + # "torrent_storage_dir": "path/to/BT_backup folder" ## use double-backslash on windows eg: "C:\\client\\backup" # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path": "/LocalPath", diff --git a/upload.py b/upload.py index 9a3339e08..249e257b7 100644 --- a/upload.py +++ b/upload.py @@ -247,8 +247,8 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'HDB', 'SHRI', 'LST', 'BHD'] - http_trackers = ['TTG', 'FL', 'PTER', 'HDT', 'MTV'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL'] + http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, @@ -322,16 +322,17 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue - # Perform the existing checks for dupes - if tracker == "RTF": - await tracker_class.api_test(meta) + # Perform the existing checks for dupes except TL + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) - dupes = await tracker_class.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) + dupes = await tracker_class.search_existing(meta) + dupes = await common.filter_dupes(dupes, meta) + meta = dupe_check(dupes, meta) # Proceed with upload if the meta is set to upload - if meta['upload']: + if tracker == "TL" or meta.get('upload', False): await tracker_class.upload(meta) if tracker == 'SN': await asyncio.sleep(16) @@ -463,26 +464,6 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): except Exception: console.print(traceback.print_exc()) - if tracker == "TL": - tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: - upload_to_tracker = True - else: - try: - upload_to_ptp = cli_ui.ask_yes_no( - f"Upload to {tracker}? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - - if upload_to_tracker: - console.print(f"Uploading to {tracker_class.tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - await tracker_class.upload(meta) - await client.add_to_client(meta, tracker_class.tracker) - def get_confirmation(meta): if meta['debug'] is True: From 24ae426ef51787c7c956b28f2af1f0054d7f04dd Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 12:43:31 +1000 Subject: [PATCH 183/741] Prepare for master --- .github/workflows/docker-image.yml | 1 - data/example-config.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 7e839bcb7..84e6adfe1 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,6 @@ on: branches: - master - develop - - upload-refactor env: REGISTRY: ghcr.io diff --git a/data/example-config.py b/data/example-config.py index ce739e71c..0043aad12 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -45,7 +45,7 @@ "api_key": "BLU api key", "announce_url": "https://blutopia.cc/announce/customannounceurl", # "anon" : False, - # "modq" : False + # "modq" : False ## Not working yet }, "BHD": { "api_key": "BHD api key", @@ -73,7 +73,7 @@ "api_key": "AITHER api key", "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False, - # "modq" : False + # "modq" : False ## Not working yet }, "R4E": { "api_key": "R4E api key", From decfa5e1668d63bd252973dc0a36e457fe388893 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Sep 2024 17:35:22 +1000 Subject: [PATCH 184/741] Fix PTP miniseries images --- src/prep.py | 17 ++++++++++++----- src/trackers/PTP.py | 11 +++++++++-- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index bc0dabd6c..179855297 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1259,14 +1259,16 @@ def _is_vob_good(n, loops, num_screens): smallest = screens os.remove(smallest) - def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None): + def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False): # Ensure the image list is initialized and preserve existing images if 'image_list' not in meta: meta['image_list'] = [] # Check if there are already at least 3 image links in the image list existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= 3: + + # Skip taking screenshots if there are already 3 images and force_screenshots is False + if len(existing_images) >= 3 and not force_screenshots: console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") return @@ -1375,9 +1377,14 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non # Remove the smallest image if there are more than needed if len(meta['image_list']) > self.screens: - smallest = min(meta['image_list'], key=lambda x: os.path.getsize(x['img_url'])) - os.remove(smallest['img_url']) - meta['image_list'].remove(smallest) + local_images = [img for img in meta['image_list'] if not img['img_url'].startswith('http')] + + if local_images: + smallest = min(local_images, key=lambda x: os.path.getsize(x['img_url'])) + os.remove(smallest['img_url']) + meta['image_list'].remove(smallest) + else: + console.print("[yellow]No local images found to remove.") def valid_ss_time(self, ss_times, num_screens, length): valid_time = False diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 66d5d30c7..a2f992e98 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -665,6 +665,7 @@ async def edit_desc(self, meta): elif len(meta.get('filelist', [])) >= 1: for i in range(len(meta['filelist'])): file = meta['filelist'][i] + if i == 0: # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: @@ -673,24 +674,30 @@ async def edit_desc(self, meta): else: # Export Mediainfo mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - # mi_dump = mi_dump.replace(file, os.path.basename(file)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() + # Generate and upload screens for other files - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2)) + # Add force_screenshots=True to ensure screenshots are taken even if images exist + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2, True)) s.start() while s.is_alive() is True: await asyncio.sleep(3) + + # Upload new screenshots new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + # Write MediaInfo and screenshots to the description desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + if i == 0: base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") + if len(images) > 0: for each in range(len(images[:int(meta['screens'])])): raw_url = images[each]['raw_url'] From 1a9fd38d9eae980d74caab2aaa13ef92e578a860 Mon Sep 17 00:00:00 2001 From: SiskoUrso <91812199+SiskoUrso@users.noreply.github.com> Date: Tue, 10 Sep 2024 02:05:03 -0500 Subject: [PATCH 185/741] Added new tracker PSS --- README.md | 2 +- data/example-config.py | 9 ++- src/trackers/PSS.py | 179 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 4 files changed, 190 insertions(+), 5 deletions(-) create mode 100644 src/trackers/PSS.py diff --git a/README.md b/README.md index 1a1b9b00f..d14f5b179 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP + - Uploads to PTP/BLU/BHD/Aither/THR/STC/PSS/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 0043aad12..27d938271 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -36,9 +36,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB", + "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS", "BLU": { "useAPI": False, # Set to True if using BLU @@ -235,6 +235,11 @@ "announce_url": "https://shareisland.org/announce/customannounceurl", # "anon" : "False" }, + "PSS" : { + "api_key" : "PSS api key", + "announce_url" : "https://privatesilverscreen.cc/announce/customannounceurl", + # "anon" : False + }, "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py new file mode 100644 index 000000000..4ca9f9fe8 --- /dev/null +++ b/src/trackers/PSS.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import platform +from str2bool import str2bool + +from src.trackers.COMMON import COMMON +from src.console import console + + +class PSS(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + ############################################################### + ######## EDIT ME ######## noqa E266 + ############################################################### + + # ALSO EDIT CLASS NAME ABOVE + + def __init__(self, config): + self.config = config + self.tracker = 'PSS' + self.source_flag = 'PSS' + self.upload_url = 'https://privatesilverscreen.cc/api/torrents/upload' + self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' + self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' + self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'NeXus', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', + 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL','Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', + 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'mSD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', + 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'CK4', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + ############################################################### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 + ############################################################### + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index 249e257b7..d3984f5f3 100644 --- a/upload.py +++ b/upload.py @@ -39,6 +39,7 @@ from src.trackers.UTP import UTP from src.trackers.AL import AL from src.trackers.SHRI import SHRI +from src.trackers.PSS import PSS import json from pathlib import Path import asyncio @@ -247,12 +248,12 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'PSS'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, - 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} + 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI, 'PSS': PSS} tracker_capabilities = { 'LST': {'mod_q': True, 'draft': True}, From 8a798cb281e2b6c3b3c1ed0cec825a75a113e759 Mon Sep 17 00:00:00 2001 From: SiskoUrso <91812199+SiskoUrso@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:39:30 -0500 Subject: [PATCH 186/741] added "'8640p':'10'," back to resolution_id --- src/trackers/PSS.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 4ca9f9fe8..01401b2b4 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -57,6 +57,7 @@ async def get_type_id(self, type): async def get_res_id(self, resolution): resolution_id = { + '8640p':'10', '4320p': '1', '2160p': '2', '1080p': '3', From 9513a29c12e60a042ce899d52ea9c77c121b1d65 Mon Sep 17 00:00:00 2001 From: LostRager Date: Tue, 10 Sep 2024 20:27:52 +0200 Subject: [PATCH 187/741] Update requirements.txt --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 241b76116..743fbd895 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ torf guessit ffmpeg-python -pymediainfo +pymediainfo==6.0.1 tmdbsimple anitopy cli-ui @@ -22,4 +22,4 @@ pyotp str2bool click aiohttp -Pillow \ No newline at end of file +Pillow From 2cff2eac0cb7150f77b355cd427668bd94265bd7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Sep 2024 17:49:48 +1000 Subject: [PATCH 188/741] Lint --- README.md | 2 +- data/example-config.py | 6 +++--- src/trackers/PSS.py | 16 +++------------- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index d14f5b179..544a57612 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/PSS/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 27d938271..1f92cfb66 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -235,9 +235,9 @@ "announce_url": "https://shareisland.org/announce/customannounceurl", # "anon" : "False" }, - "PSS" : { - "api_key" : "PSS api key", - "announce_url" : "https://privatesilverscreen.cc/announce/customannounceurl", + "PSS": { + "api_key": "PSS api key", + "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", # "anon" : False }, "MANUAL": { diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 01401b2b4..b19a594ae 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -18,12 +18,6 @@ class PSS(): Upload """ - ############################################################### - ######## EDIT ME ######## noqa E266 - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'PSS' @@ -32,7 +26,7 @@ def __init__(self, config): self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'NeXus', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', - 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL','Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', + 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'mSD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'CK4', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD'] pass @@ -51,13 +45,13 @@ async def get_type_id(self, type): 'ENCODE': '3', 'WEBDL': '4', 'WEBRIP': '5', - 'HDTV': '6', + 'HDTV': '6', }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', '1080p': '3', @@ -70,10 +64,6 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) From 6c2e54a9f25a2f11b6ecc34bb7b8ed7237c315da Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Sep 2024 18:21:16 +1000 Subject: [PATCH 189/741] Ensure docker has required mediainfo version --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 743714237..3c4ada561 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ FROM python:3.12 RUN apt-get update && \ apt-get install -y --no-install-recommends \ ffmpeg \ - mediainfo \ + mediainfo=23.04-1 \ git \ g++ \ cargo \ From edda726b61f4802cb107294511fff22fcfc89faf Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Sep 2024 18:54:17 +1000 Subject: [PATCH 190/741] Multiple duration in DVD screenshot, use longest duration only --- src/prep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 179855297..8ba8531e2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1125,7 +1125,8 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": - length = float(track.duration)/1000 # noqa F841 + durations = [float(d) for d in track.duration.split(' / ')] + length = max(durations) / 1000 # noqa #F841 par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) From 98897e8bf9c3cbc904d36c1759c97a8ceb11b240 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Sep 2024 18:54:59 +1000 Subject: [PATCH 191/741] Push a docker build --- .github/workflows/docker-image.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 84e6adfe1..b1b1e33bd 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,6 +5,7 @@ on: branches: - master - develop + - dvd-seasons env: REGISTRY: ghcr.io From 302ef4795c037e4dc08867dc6eb5cb769517a433 Mon Sep 17 00:00:00 2001 From: azulu Date: Wed, 11 Sep 2024 18:48:18 +0000 Subject: [PATCH 192/741] Added AV1 codec as an option for encodes and WEBs. --- src/prep.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/prep.py b/src/prep.py index 179855297..69a3fea0a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2243,11 +2243,15 @@ def get_video_encode(self, mi, type, bdinfo): codec = 'x264' elif format == 'HEVC': codec = 'x265' + elif format == 'AV1': + codec = 'AV1' elif type in ('WEBDL', 'HDTV'): # WEB-DL if format == 'AVC': codec = 'H.264' elif format == 'HEVC': codec = 'H.265' + elif format == 'AV1': + codec = 'AV1' if type == 'HDTV' and has_encode_settings is True: codec = codec.replace('H.', 'x') From 744e3d0dc6903413ff5d9e2b99594833293ebdcc Mon Sep 17 00:00:00 2001 From: azulu Date: Wed, 11 Sep 2024 19:00:34 +0000 Subject: [PATCH 193/741] Updated OnlyEncodes+ ban list to 2024-09-11. --- src/trackers/OE.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 8cb819518..e0b0e4e30 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -25,7 +25,7 @@ def __init__(self, config): self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' self.torrent_url = 'https://onlyencodes.cc/api/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass async def upload(self, meta): From 85132439df02bf9ebff64d7ada3fb1335666adc1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Sep 2024 08:12:04 +1000 Subject: [PATCH 194/741] Duplicate, comma, formatting --- src/trackers/OE.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index e0b0e4e30..e1b7453ed 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -25,7 +25,17 @@ def __init__(self, config): self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' self.torrent_url = 'https://onlyencodes.cc/api/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', + 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', + 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', + 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', + 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', + 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'nHD', 'nikt0', + 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', + 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', + 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', + 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', + 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass async def upload(self, meta): From 7271d1ca4c630cbb875199f26e3a1a159f5500bc Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Sep 2024 13:37:55 +1000 Subject: [PATCH 195/741] Use float directly --- src/prep.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 8ba8531e2..3b9cf2c2e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1125,8 +1125,15 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": - durations = [float(d) for d in track.duration.split(' / ')] - length = max(durations) / 1000 # noqa #F841 + if isinstance(track.duration, str): + # If the duration is a string, split and find the longest duration + durations = [float(d) for d in track.duration.split(' / ')] + length = max(durations) / 1000 # Use the longest duration + else: + # If the duration is already an int or float, use it directly + length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds + + # Proceed as usual for other fields par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) From 95b58068ef8415f0c0c9a6b1abb010e278e27098 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Sep 2024 15:27:36 +1000 Subject: [PATCH 196/741] Lint --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 3b9cf2c2e..cff54730e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1132,7 +1132,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): else: # If the duration is already an int or float, use it directly length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds - + # Proceed as usual for other fields par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) From dccd28c970c22b687d253b72b91026ca018230ef Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Sep 2024 16:49:19 +1000 Subject: [PATCH 197/741] Export cleanpath mediainfo with DVD, for MTV --- src/prep.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/prep.py b/src/prep.py index ae6987c34..14349660a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -682,6 +682,9 @@ async def get_disc(self, meta): export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') export.write(discs[0]['ifo_mi']) export.close() + export_clean = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') + export_clean.write(discs[0]['ifo_mi']) + export_clean.close() elif is_disc == "HDDVD": discs = await parse.get_hddvd_info(discs) export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') From 415ceab7b7cc22f5d7c1dbf68bc276eca4746809 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Sep 2024 17:04:12 +1000 Subject: [PATCH 198/741] Look for audio tracks rather than a hardcoded track number --- .github/workflows/docker-image.yml | 2 +- src/trackers/AITHER.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index b1b1e33bd..f3534c521 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - dvd-seasons + - aither_language env: REGISTRY: ghcr.io diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 7da76fe81..11973429f 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -130,8 +130,14 @@ def has_english_audio(tracks, is_bdmv=False): def get_audio_lang(tracks, is_bdmv=False): if is_bdmv: return tracks[0].get('language', '').upper() if tracks else "" - return tracks[2].get('Language_String', '').upper() if len(tracks) > 2 else "" + # For regular files, find the first audio track and return the language string + for track in tracks: + if track['@type'] == "Audio": + return track.get('Language', '').upper() # Correctly retrieve the language + return "" # Return an empty string if no audio track is found + + # Handle non-BDMV cases if meta['is_disc'] != "BDMV": try: with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: @@ -139,9 +145,12 @@ def get_audio_lang(tracks, is_bdmv=False): audio_tracks = mi['media']['track'] has_eng_audio = has_english_audio(audio_tracks) + + # If English audio is not present, get the audio language if not has_eng_audio: audio_lang = get_audio_lang(audio_tracks) if audio_lang: + # Insert the audio language before the resolution in the name aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) except (FileNotFoundError, KeyError, IndexError) as e: print(f"Error processing MediaInfo: {e}") From 126d5113a8674c76a415b2f30a1758d0e702ef4b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Sep 2024 00:01:43 +1000 Subject: [PATCH 199/741] Add TIK --- data/example-config.py | 11 +- src/args.py | 22 ++ src/prep.py | 48 +++- src/trackers/AITHER.py | 7 +- src/trackers/PTP.py | 2 +- src/trackers/TIK.py | 595 +++++++++++++++++++++++++++++++++++++++++ upload.py | 14 +- 7 files changed, 677 insertions(+), 22 deletions(-) create mode 100644 src/trackers/TIK.py diff --git a/data/example-config.py b/data/example-config.py index 0043aad12..04d1b3a2a 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -36,9 +36,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB", + "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK", "BLU": { "useAPI": False, # Set to True if using BLU @@ -235,6 +235,13 @@ "announce_url": "https://shareisland.org/announce/customannounceurl", # "anon" : "False" }, + "TIK": { + "useAPI": True, # Set to True if using TIK + "api_key": "", + "announce_url": "https://cinematik.net/announce/", + "anon": False, + "modq": True, + }, "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/args.py b/src/args.py index 988c22715..fd73009d6 100644 --- a/src/args.py +++ b/src/args.py @@ -47,8 +47,15 @@ def parse(self, args, meta): parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) parser.add_argument('-lst', '--lst', nargs='*', required=False, help="LST torrent id/link", type=str) parser.add_argument('-oe', '--oe', nargs='*', required=False, help="OE torrent id/link", type=str) + parser.add_argument('-tik', '--tik', nargs='*', required=False, help="TIK torrent id/link", type=str) parser.add_argument('-hdb', '--hdb', nargs='*', required=False, help="HDB torrent id/link", type=str) + parser.add_argument('--foreign', dest='foreign', action='store_true', required=False, help="Set for TIK Foreign category") + parser.add_argument('--opera', dest='opera', action='store_true', required=False, help="Set for TIK Opera & Musical category") + parser.add_argument('--asian', dest='asian', action='store_true', required=False, help="Set for TIK Asian category") + parser.add_argument('-disctype', '--disctype', nargs='*', required=False, help="Type of disc for TIK (BD100, BD66, BD50, BD25, NTSC DVD9, NTSC DVD5, PAL DVD9, PAL DVD5, Custom, 3D)", type=str) + parser.add_argument('--untouched', dest='untouched', action='store_true', required=False, help="Set when a completely untouched disc at TIK") parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") + parser.add_argument('-manual_dvds', '--manual_dvds', nargs='*', required=False, help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", type=str, dest='manual_dvds', default="") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") parser.add_argument('-df', '--descfile', nargs='*', required=False, help="Custom Description (path to file)") parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens']) @@ -179,6 +186,19 @@ def parse(self, args, meta): console.print('[red]Continuing without --oe') else: meta['oe'] = value2 + elif key == 'tik': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + tikpath = parsed.path + if tikpath.endswith('/'): + tikpath = tikpath[:-1] + meta['tik'] = tikpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --tik') + else: + meta['tik'] = value2 elif key == 'hdb': if value2.startswith('http'): parsed = urllib.parse.urlparse(value2) @@ -196,6 +216,8 @@ def parse(self, args, meta): meta[key] = value elif key in ("manual_edition"): meta[key] = value + elif key in ("manual_dvds"): + meta[key] = value elif key in ("freeleech"): meta[key] = 100 elif key in ("tag") and value == []: diff --git a/src/prep.py b/src/prep.py index 179855297..69af197ad 100644 --- a/src/prep.py +++ b/src/prep.py @@ -8,6 +8,7 @@ from src.trackers.LST import LST from src.trackers.OE import OE from src.trackers.HDB import HDB +from src.trackers.TIK import TIK from src.trackers.COMMON import COMMON try: @@ -156,7 +157,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met manual_key = f"{tracker_key}_manual" found_match = False - if tracker_name in ["BLU", "AITHER", "LST", "OE"]: + if tracker_name in ["BLU", "AITHER", "LST", "OE", "TIK"]: if meta.get(tracker_key) is not None: console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") tracker_data = await COMMON(self.config).unit3d_torrent_info( @@ -376,7 +377,7 @@ async def gather_prep(self, meta, mode): else: mi = meta['mediainfo'] - meta['dvd_size'] = await self.get_dvd_size(meta['discs']) + meta['dvd_size'] = await self.get_dvd_size(meta['discs'], meta.get('manual_dvds')) meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) @@ -452,6 +453,8 @@ async def gather_prep(self, meta, mode): specific_tracker = 'LST' elif meta.get('oe'): specific_tracker = 'OE' + elif meta.get('tik'): + specific_tracker = 'TIK' # If a specific tracker is found, only process that one if specific_tracker: @@ -487,6 +490,12 @@ async def gather_prep(self, meta, mode): if match: found_match = True + elif specific_tracker == 'TIK' and str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": + tik = TIK(config=self.config) + meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) + if match: + found_match = True + elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) @@ -682,6 +691,9 @@ async def get_disc(self, meta): export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') export.write(discs[0]['ifo_mi']) export.close() + export_clean = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') + export_clean.write(discs[0]['ifo_mi']) + export_clean.close() elif is_disc == "HDDVD": discs = await parse.get_hddvd_info(discs) export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') @@ -872,7 +884,6 @@ def filter_mediainfo(data): "@type": track["@type"], "extra": track.get("extra"), }) - return filtered if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: @@ -1107,14 +1118,17 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ time.sleep(1) progress.advance(screen_task) # remove smallest image - smallest = "" + smallest = None smallestsize = 99 ** 99 for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): - screensize = os.path.getsize(screens) + screen_path = os.path.join(f"{base_dir}/tmp/{folder_id}/", screens) + screensize = os.path.getsize(screen_path) if screensize < smallestsize: smallestsize = screensize - smallest = screens - os.remove(smallest) + smallest = screen_path + + if smallest is not None: + os.remove(smallest) def dvd_screenshots(self, meta, disc_num, num_screens=None): if num_screens is None: @@ -1250,14 +1264,17 @@ def _is_vob_good(n, loops, num_screens): looped += 1 progress.advance(screen_task) # remove smallest image - smallest = "" + smallest = None smallestsize = 99**99 for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): - screensize = os.path.getsize(screens) + screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) + screensize = os.path.getsize(screen_path) if screensize < smallestsize: smallestsize = screensize - smallest = screens - os.remove(smallest) + smallest = screen_path + + if smallest is not None: + os.remove(smallest) def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False): # Ensure the image list is initialized and preserve existing images @@ -1589,6 +1606,7 @@ async def tmdb_other_meta(self, meta): if meta.get('anime', False) is False: meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) meta['poster'] = response.get('poster_path', "") + meta['tmdb_poster'] = response.get('poster_path', "") meta['overview'] = response['overview'] meta['tmdb_type'] = 'Movie' meta['runtime'] = response.get('episode_run_time', 60) @@ -3299,7 +3317,7 @@ async def get_imdb_aka(self, imdb_id): aka = f" AKA {aka}" return aka, original_language - async def get_dvd_size(self, discs): + async def get_dvd_size(self, discs, manual_dvds): sizes = [] dvd_sizes = [] for each in discs: @@ -3312,6 +3330,10 @@ async def get_dvd_size(self, discs): dvd_sizes.append(each[0]) dvd_sizes.sort() compact = " ".join(dvd_sizes) + + if manual_dvds: + compact = str(manual_dvds) + return compact def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): @@ -3362,6 +3384,7 @@ async def get_imdb_info(self, imdbID, meta): imdb_info['cover'] = info.get('full-size cover url', '').replace(".jpg", "._V1_FMjpg_UX750_.jpg") imdb_info['plot'] = info.get('plot', [''])[0] imdb_info['genres'] = ', '.join(info.get('genres', '')) + imdb_info['rating'] = info.get('rating', 'N/A') imdb_info['original_language'] = info.get('language codes') if isinstance(imdb_info['original_language'], list): if len(imdb_info['original_language']) > 1: @@ -3406,6 +3429,7 @@ async def imdb_other_meta(self, meta): meta['poster'] = imdb_info['cover'] meta['original_language'] = imdb_info['original_language'] meta['overview'] = imdb_info['plot'] + meta['imdb_rating'] = imdb_info['rating'] difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 7da76fe81..b0d219aa3 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -40,6 +40,8 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) modq = await self.get_flag(meta, 'modq') name = await self.edit_name(meta) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: @@ -89,7 +91,10 @@ async def upload(self, meta): if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index a2f992e98..f13e5d39b 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -201,7 +201,7 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): await asyncio.sleep(1) ptp_desc = response.text - # console.print(f"[yellow]Raw description received:\n{ptp_desc[:3800]}...") # Show first 500 characters for brevity + # console.print(f"[yellow]Raw description received:\n{ptp_desc[:6800]}...") # Show first 500 characters for brevity bbcode = BBCODE() desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py new file mode 100644 index 000000000..a3f2b36f0 --- /dev/null +++ b/src/trackers/TIK.py @@ -0,0 +1,595 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import os +import re +import platform +import sys +import cli_ui +import urllib.request +import click +from str2bool import str2bool + +from src.trackers.COMMON import COMMON +from src.console import console + + +class TIK(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'TIK' + self.source_flag = 'TIK' + self.search_url = 'https://cinematik.net/api/torrents/filter' + self.upload_url = 'https://cinematik.net/api/torrents/upload' + self.torrent_url = 'https://cinematik.net/api/torrents/' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by testing 123, Audionuts Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + cat_id = await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')) + type_id = await self.get_type_id(disctype) + resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if not meta['is_disc']: + console.print("[red]Only disc-based content allowed at TIK") + return + elif meta['bdinfo'] is not None: + mi_dump = None + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as bd_file: + bd_dump = bd_file.read() + else: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as mi_file: + mi_dump = mi_file.read() + bd_dump = None + + if meta.get('desclink'): + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + print(f"Custom Description Link: {desc}") + + elif meta.get('descfile'): + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + print(f"Custom Description File Path: {desc}") + + else: + await self.edit_desc(meta) + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name': await self.get_name(meta, disctype), + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'region_id': region_id, + 'distributor_id': distributor_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': 0, + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + 'mod_queue_opt_in': modq, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + if self.config['TRACKERS'][self.tracker].get('personal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('personal_group', [])): + data['personal_release'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + console.print(data) + console.print(f"TIK response: {response}") + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + def get_basename(self, meta): + path = next(iter(meta['filelist']), meta['path']) + return os.path.basename(path) + + async def get_name(self, meta, disctype): + disctype = meta.get('disctype', None) + basename = self.get_basename(meta) + type = meta.get('type', "") + title = meta.get('title', "").replace('AKA', '/').strip() + alt_title = meta.get('aka', "").replace('AKA', '/').strip() + year = meta.get('year', "") + resolution = meta.get('resolution', "") + season = meta.get('season', "") + repack = meta.get('repack', "") + if repack.strip(): + repack = f"[{repack}]" + three_d = meta.get('3D', "") + three_d_tag = f"[{three_d}]" if three_d else "" + tag = meta.get('tag', "").replace("-", "- ") + if tag == "": + tag = "- NOGRP" + source = meta.get('source', "") + uhd = meta.get('uhd', "") # noqa #841 + hdr = meta.get('hdr', "") + if not hdr.strip(): + hdr = "SDR" + distributor = meta.get('distributor', "") # noqa F841 + video_codec = meta.get('video_codec', "") + video_encode = meta.get('video_encode', "").replace(".", "") + if 'x265' in basename: + video_encode = video_encode.replace('H', 'x') + dvd_size = meta.get('dvd_size', "") + search_year = meta.get('search_year', "") + if not str(search_year).strip(): + search_year = year + + category_name = meta.get('category', "") + foreign = meta.get('foreign') + opera = meta.get('opera') + asian = meta.get('asian') + meta['category_id'] = await self.get_cat_id(category_name, foreign, opera, asian) + + name = "" + alt_title_part = f" / {alt_title}" if alt_title else "" + if meta['category_id'] in ("1", "3", "5", "6"): + if meta['is_disc'] == 'BDMV': + name = f"{title}{alt_title_part} ({year}) {disctype} {resolution} {video_codec} {three_d_tag}" + elif meta['is_disc'] == 'DVD': + name = f"{title}{alt_title_part} ({year}) {source} {dvd_size}" + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk + if meta['is_disc'] == 'BDMV': + name = f"{title}{alt_title_part} ({search_year}) {season} {disctype} {resolution} {video_codec}" + if meta['is_disc'] == 'DVD': + name = f"{title}{alt_title_part} ({search_year}) {season} {source} {dvd_size}" + + # User confirmation + console.print(f"[yellow]Final generated name: [greee]{name}") + confirmation = cli_ui.ask_yes_no("Do you want to use this name?", default=False) # Default is 'No' + + if confirmation: + return name + else: + console.print("[red]Sorry, this seems to be an edge case, please report at (insert_link)") + sys.exit(1) + + async def get_cat_id(self, category_name, foreign, opera, asian): + category_id = { + 'FILM': '1', + 'TV': '2', + 'Foreign Film': '3', + 'Foreign TV': '4', + 'Opera & Musical': '5', + 'Asian Film': '6', + }.get(category_name, '0') + + if category_name == 'MOVIE': + if foreign: + category_id = '3' + elif opera: + category_id = '5' + elif asian: + category_id = '6' + else: + category_id = '1' + elif category_name == 'TV': + if foreign: + category_id = '4' + elif opera: + category_id = '5' + else: + category_id = '2' + + return category_id + + async def get_type_id(self, disctype): + type_id_map = { + 'Custom': '1', + 'BD100': '3', + 'BD66': '4', + 'BD50': '5', + 'BD25': '6', + 'NTSC DVD9': '7', + 'NTSC DVD5': '8', + 'PAL DVD9': '9', + 'PAL DVD5': '10', + '3D': '11' + } + + if not disctype: + console.print("[red]You must specify a --disctype") + return None + + disctype_value = disctype[0] if isinstance(disctype, list) else disctype + type_id = type_id_map.get(disctype_value, '1') # '1' is the default fallback + + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + 'Other': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 + + return 1 if meta.get(flag_name, False) else 0 + + async def edit_desc(self, meta): + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + # Fetch additional IMDb metadata + meta_imdb = await prep.imdb_other_meta(meta) # noqa #F841 + + if len(meta.get('discs', [])) > 0: + summary = meta['discs'][0].get('summary', '') + else: + summary = None + + # Proceed with matching Total Bitrate if the summary exists + if summary: + match = re.search(r"Total Bitrate: ([\d.]+ Mbps)", summary) + if match: + total_bitrate = match.group(1) + else: + total_bitrate = "Unknown" + else: + total_bitrate = "Unknown" + + country_name = self.country_code_to_name(meta.get('region')) + + # Rehost poster if tmdb_poster is available + poster_url = f"https://image.tmdb.org/t/p/original{meta.get('tmdb_poster', '')}" + + # Define the paths for both jpg and png poster images + poster_jpg_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/poster.jpg" + poster_png_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/poster.png" + + # Check if either poster.jpg or poster.png already exists + if os.path.exists(poster_jpg_path): + poster_path = poster_jpg_path + console.print("[green]Poster already exists as poster.jpg, skipping download.[/green]") + elif os.path.exists(poster_png_path): + poster_path = poster_png_path + console.print("[green]Poster already exists as poster.png, skipping download.[/green]") + else: + # No poster file exists, download the poster image + poster_path = poster_jpg_path # Default to saving as poster.jpg + try: + urllib.request.urlretrieve(poster_url, poster_path) + console.print(f"[green]Poster downloaded to {poster_path}[/green]") + except Exception as e: + console.print(f"[red]Error downloading poster: {e}[/red]") + + # Upload the downloaded or existing poster image once + if os.path.exists(poster_path): + try: + console.print("Uploading standard poster to image host....") + new_poster_url, _ = prep.upload_screens(meta, 1, 1, 0, 1, [poster_path], {}) + + # Ensure that the new poster URL is assigned only once + if len(new_poster_url) > 0: + poster_url = new_poster_url[0]['raw_url'] + except Exception as e: + console.print(f"[red]Error uploading poster: {e}[/red]") + else: + console.print("[red]Poster file not found, cannot upload.[/red]") + + # Generate the description text + desc_text = [] + + images = meta['image_list'] + discs = meta.get('discs', []) # noqa #F841 + + if len(images) >= 4: + image_link_1 = images[0]['raw_url'] + image_link_2 = images[1]['raw_url'] + image_link_3 = images[2]['raw_url'] + image_link_4 = images[3]['raw_url'] + image_link_5 = images[4]['raw_url'] + image_link_6 = images[5]['raw_url'] + else: + image_link_1 = image_link_2 = image_link_3 = image_link_4 = image_link_5 = image_link_6 = "" + + # Write the cover section with rehosted poster URL + desc_text.append("[h3]Cover[/h3] [color=red]A stock poster has been automatically added, but you'll get more love if you include a proper cover, see rule 6.6[/color]\n") + desc_text.append("[center]\n") + desc_text.append(f"[IMG=500]{poster_url}[/IMG]\n") + desc_text.append("[/center]\n\n") + + # Write screenshots section + desc_text.append("[h3]Screenshots[/h3]\n") + desc_text.append("[center]\n") + desc_text.append(f"[URL={image_link_1}][IMG=300]{image_link_1}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_2}][IMG=300]{image_link_2}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_3}][IMG=300]{image_link_3}[/IMG][/URL]\n ") + desc_text.append(f"[URL={image_link_4}][IMG=300]{image_link_4}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_5}][IMG=300]{image_link_5}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_6}][IMG=300]{image_link_6}[/IMG][/URL]\n") + desc_text.append("[/center]\n\n") + + # Write synopsis section with the custom title + desc_text.append("[h3]Synopsis/Review/Personal Thoughts (edit as needed)[/h3]\n") + desc_text.append("[color=red]Default TMDB sypnosis added, more love if you use a sypnosis from credible film institutions such as the BFI or directly quoting well-known film critics, see rule 6.3[/color]\n") + desc_text.append("[quote]\n") + desc_text.append(f"{meta.get('overview', 'No synopsis available.')}\n") + desc_text.append("[/quote]\n\n") + + # Write technical info section + desc_text.append("[h3]Technical Info[/h3]\n") + desc_text.append("[code]\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Disc Label.........:{meta.get('bdinfo', {}).get('label', '')}\n") + desc_text.append(f" IMDb...............: [url=https://www.imdb.com/title/tt{meta.get('imdb_id')}]{meta.get('imdb_rating', '')}[/url]\n") + desc_text.append(f" Year...............: {meta.get('year', '')}\n") + desc_text.append(f" Country............: {country_name}\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Runtime............: {meta.get('bdinfo', {}).get('length', '')} hrs [color=red](double check this is actual runtime)[/color]\n") + else: + desc_text.append(" Runtime............: [color=red]Insert the actual runtime[/color]\n") + + if meta['is_disc'] == 'BDMV': + audio_languages = ', '.join([f"{track.get('language', 'Unknown')} {track.get('codec', 'Unknown')} {track.get('channels', 'Unknown')}" for track in meta.get('bdinfo', {}).get('audio', [])]) + desc_text.append(f" Audio..............: {audio_languages}\n") + desc_text.append(f" Subtitles..........: {', '.join(meta.get('bdinfo', {}).get('subtitles', []))}\n") + else: + # Process each disc's `vob_mi` or `ifo_mi` to extract audio and subtitles separately + for disc in meta.get('discs', []): + vob_mi = disc.get('vob_mi', '') + ifo_mi = disc.get('ifo_mi', '') + + unique_audio = set() # Store unique audio strings + + audio_section = vob_mi.split('\n\nAudio\n')[1].split('\n\n')[0] if 'Audio\n' in vob_mi else None + if audio_section: + if "AC-3" in audio_section: + codec = "AC-3" + elif "DTS" in audio_section: + codec = "DTS" + elif "MPEG Audio" in audio_section: + codec = "MPEG Audio" + elif "PCM" in audio_section: + codec = "PCM" + elif "AAC" in audio_section: + codec = "AAC" + else: + codec = "Unknown" + + channels = audio_section.split("Channel(s)")[1].split(":")[1].strip().split(" ")[0] if "Channel(s)" in audio_section else "Unknown" + # Convert 6 channels to 5.1, otherwise leave as is + channels = "5.1" if channels == "6" else channels + language = disc.get('ifo_mi_full', '').split('Language')[1].split(":")[1].strip().split('\n')[0] if "Language" in disc.get('ifo_mi_full', '') else "Unknown" + audio_info = f"{language} {codec} {channels}" + unique_audio.add(audio_info) + + # Append audio information to the description + if unique_audio: + desc_text.append(f" Audio..............: {', '.join(sorted(unique_audio))}\n") + + # Subtitle extraction using the helper function + unique_subtitles = self.parse_subtitles(ifo_mi) + + # Append subtitle information to the description + if unique_subtitles: + desc_text.append(f" Subtitles..........: {', '.join(sorted(unique_subtitles))}\n") + + if meta['is_disc'] == 'BDMV': + video_info = meta.get('bdinfo', {}).get('video', []) + video_codec = video_info[0].get('codec', 'Unknown') + video_bitrate = video_info[0].get('bitrate', 'Unknown') + desc_text.append(f" Video Format.......: {video_codec} / {video_bitrate}\n") + else: + desc_text.append(f" DVD Format.........: {meta.get('source', 'Unknown')}\n") + desc_text.append(" Film Aspect Ratio..: [color=red]The actual aspect ratio of the content, not including the black bars[/color]\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Source.............: {meta.get('disctype', 'Unknown')}\n") + else: + desc_text.append(f" Source.............: {meta.get('dvd_size', 'Unknown')}\n") + desc_text.append(f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url]\n") + desc_text.append(f" Average Bitrate....: {total_bitrate}\n") + desc_text.append(" Ripping Program....: [color=red]Specify - if it's your rip or custom version, otherwise 'Not my rip'[/color]\n") + desc_text.append("\n") + if meta.get('untouched') is True: + desc_text.append(" Menus......: [X] Untouched\n") + desc_text.append(" Video......: [X] Untouched\n") + desc_text.append(" Extras.....: [X] Untouched\n") + desc_text.append(" Audio......: [X] Untouched\n") + else: + desc_text.append(" Menus......: [ ] Untouched\n") + desc_text.append(" [ ] Stripped\n") + desc_text.append(" Video......: [ ] Untouched\n") + desc_text.append(" [ ] Re-encoded\n") + desc_text.append(" Extras.....: [ ] Untouched\n") + desc_text.append(" [ ] Stripped\n") + desc_text.append(" [ ] Re-encoded\n") + desc_text.append(" [ ] None\n") + desc_text.append(" Audio......: [ ] Untouched\n") + desc_text.append(" [ ] Stripped tracks\n") + + desc_text.append("[/code]\n\n") + + # Extras + desc_text.append("[h4]Extras[/h4]\n") + desc_text.append("[*] Insert special feature 1 here\n") + desc_text.append("[*] Insert special feature 2 here\n") + desc_text.append("... (add more special features as needed)\n\n") + + # Uploader Comments + desc_text.append("[h4]Uploader Comments[/h4]\n") + desc_text.append(f" - {meta.get('uploader_comments', 'No comments.')}\n") + + # Convert the list to a single string for the description + description = ''.join(desc_text) + + # Ask user if they want to edit or keep the description + console.print(f"Current description: {description}", markup=False) + console.print("[cyan]Do you want to edit or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, or press Enter to keep it as is: ") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"Final description after editing: {description}", markup=False) + else: + console.print("[green]Keeping the original description.[/green]") + + # Write the final description to the file + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc_file: + desc_file.write(description) + + def parse_subtitles(self, disc_mi): + unique_subtitles = set() # Store unique subtitle strings + lines = disc_mi.splitlines() # Split the multiline text into individual lines + current_block = None + + for line in lines: + # Detect the start of a subtitle block (Text #) + if line.startswith("Text #"): + current_block = "subtitle" + continue + + # Extract language information for subtitles + if current_block == "subtitle" and "Language" in line: + language = line.split(":")[1].strip() + unique_subtitles.add(language) + + return unique_subtitles + + def country_code_to_name(self, code): + country_mapping = { + 'AFG': 'Afghanistan', 'ALB': 'Albania', 'DZA': 'Algeria', 'AND': 'Andorra', 'AGO': 'Angola', + 'ARG': 'Argentina', 'ARM': 'Armenia', 'AUS': 'Australia', 'AUT': 'Austria', 'AZE': 'Azerbaijan', + 'BHS': 'Bahamas', 'BHR': 'Bahrain', 'BGD': 'Bangladesh', 'BRB': 'Barbados', 'BLR': 'Belarus', + 'BEL': 'Belgium', 'BLZ': 'Belize', 'BEN': 'Benin', 'BTN': 'Bhutan', 'BOL': 'Bolivia', + 'BIH': 'Bosnia and Herzegovina', 'BWA': 'Botswana', 'BRA': 'Brazil', 'BRN': 'Brunei', + 'BGR': 'Bulgaria', 'BFA': 'Burkina Faso', 'BDI': 'Burundi', 'CPV': 'Cabo Verde', 'KHM': 'Cambodia', + 'CMR': 'Cameroon', 'CAN': 'Canada', 'CAF': 'Central African Republic', 'TCD': 'Chad', 'CHL': 'Chile', + 'CHN': 'China', 'COL': 'Colombia', 'COM': 'Comoros', 'COG': 'Congo', 'CRI': 'Costa Rica', + 'HRV': 'Croatia', 'CUB': 'Cuba', 'CYP': 'Cyprus', 'CZE': 'Czech Republic', 'DNK': 'Denmark', + 'DJI': 'Djibouti', 'DMA': 'Dominica', 'DOM': 'Dominican Republic', 'ECU': 'Ecuador', 'EGY': 'Egypt', + 'SLV': 'El Salvador', 'GNQ': 'Equatorial Guinea', 'ERI': 'Eritrea', 'EST': 'Estonia', + 'SWZ': 'Eswatini', 'ETH': 'Ethiopia', 'FJI': 'Fiji', 'FIN': 'Finland', 'FRA': 'France', + 'GAB': 'Gabon', 'GMB': 'Gambia', 'GEO': 'Georgia', 'DEU': 'Germany', 'GHA': 'Ghana', + 'GRC': 'Greece', 'GRD': 'Grenada', 'GTM': 'Guatemala', 'GIN': 'Guinea', 'GNB': 'Guinea-Bissau', + 'GUY': 'Guyana', 'HTI': 'Haiti', 'HND': 'Honduras', 'HUN': 'Hungary', 'ISL': 'Iceland', 'IND': 'India', + 'IDN': 'Indonesia', 'IRN': 'Iran', 'IRQ': 'Iraq', 'IRL': 'Ireland', 'ISR': 'Israel', 'ITA': 'Italy', + 'JAM': 'Jamaica', 'JPN': 'Japan', 'JOR': 'Jordan', 'KAZ': 'Kazakhstan', 'KEN': 'Kenya', + 'KIR': 'Kiribati', 'KOR': 'Korea', 'KWT': 'Kuwait', 'KGZ': 'Kyrgyzstan', 'LAO': 'Laos', 'LVA': 'Latvia', + 'LBN': 'Lebanon', 'LSO': 'Lesotho', 'LBR': 'Liberia', 'LBY': 'Libya', 'LIE': 'Liechtenstein', + 'LTU': 'Lithuania', 'LUX': 'Luxembourg', 'MDG': 'Madagascar', 'MWI': 'Malawi', 'MYS': 'Malaysia', + 'MDV': 'Maldives', 'MLI': 'Mali', 'MLT': 'Malta', 'MHL': 'Marshall Islands', 'MRT': 'Mauritania', + 'MUS': 'Mauritius', 'MEX': 'Mexico', 'FSM': 'Micronesia', 'MDA': 'Moldova', 'MCO': 'Monaco', + 'MNG': 'Mongolia', 'MNE': 'Montenegro', 'MAR': 'Morocco', 'MOZ': 'Mozambique', 'MMR': 'Myanmar', + 'NAM': 'Namibia', 'NRU': 'Nauru', 'NPL': 'Nepal', 'NLD': 'Netherlands', 'NZL': 'New Zealand', + 'NIC': 'Nicaragua', 'NER': 'Niger', 'NGA': 'Nigeria', 'MKD': 'North Macedonia', 'NOR': 'Norway', + 'OMN': 'Oman', 'PAK': 'Pakistan', 'PLW': 'Palau', 'PAN': 'Panama', 'PNG': 'Papua New Guinea', + 'PRY': 'Paraguay', 'PER': 'Peru', 'PHL': 'Philippines', 'POL': 'Poland', 'PRT': 'Portugal', + 'QAT': 'Qatar', 'ROU': 'Romania', 'RUS': 'Russia', 'RWA': 'Rwanda', 'KNA': 'Saint Kitts and Nevis', + 'LCA': 'Saint Lucia', 'VCT': 'Saint Vincent and the Grenadines', 'WSM': 'Samoa', 'SMR': 'San Marino', + 'STP': 'Sao Tome and Principe', 'SAU': 'Saudi Arabia', 'SEN': 'Senegal', 'SRB': 'Serbia', + 'SYC': 'Seychelles', 'SLE': 'Sierra Leone', 'SGP': 'Singapore', 'SVK': 'Slovakia', 'SVN': 'Slovenia', + 'SLB': 'Solomon Islands', 'SOM': 'Somalia', 'ZAF': 'South Africa', 'SSD': 'South Sudan', + 'ESP': 'Spain', 'LKA': 'Sri Lanka', 'SDN': 'Sudan', 'SUR': 'Suriname', 'SWE': 'Sweden', + 'CHE': 'Switzerland', 'SYR': 'Syria', 'TWN': 'Taiwan', 'TJK': 'Tajikistan', 'TZA': 'Tanzania', + 'THA': 'Thailand', 'TLS': 'Timor-Leste', 'TGO': 'Togo', 'TON': 'Tonga', 'TTO': 'Trinidad and Tobago', + 'TUN': 'Tunisia', 'TUR': 'Turkey', 'TKM': 'Turkmenistan', 'TUV': 'Tuvalu', 'UGA': 'Uganda', + 'UKR': 'Ukraine', 'ARE': 'United Arab Emirates', 'GBR': 'United Kingdom', 'USA': 'United States', + 'URY': 'Uruguay', 'UZB': 'Uzbekistan', 'VUT': 'Vanuatu', 'VEN': 'Venezuela', 'VNM': 'Vietnam', + 'YEM': 'Yemen', 'ZMB': 'Zambia', 'ZWE': 'Zimbabwe' + } + return country_mapping.get(code.upper(), 'Unknown Country') + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + disctype = meta.get('disctype', None) + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')), + 'types[]': await self.get_type_id(disctype), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index 249e257b7..d6ce8cde4 100644 --- a/upload.py +++ b/upload.py @@ -39,6 +39,7 @@ from src.trackers.UTP import UTP from src.trackers.AL import AL from src.trackers.SHRI import SHRI +from src.trackers.TIK import TIK import json from pathlib import Path import asyncio @@ -247,10 +248,10 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'TIK'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { - 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, + 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI} @@ -261,7 +262,7 @@ async def do_the_thing(base_dir): 'BHD': {'draft_live': True}, } - async def check_mod_q_and_draft(tracker_class, meta, debug): + async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): modq, draft = None, None tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) @@ -283,6 +284,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): return modq, draft for tracker in trackers: + disctype = meta.get('disctype', None) tracker = tracker.replace(" ", "").upper().strip() if meta['name'].endswith('DUPE?'): meta['name'] = meta['name'].replace(' DUPE?', '') @@ -308,7 +310,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if upload_to_tracker: # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug) + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) # Print mod_q and draft info if relevant if modq is not None: @@ -327,13 +329,13 @@ async def check_mod_q_and_draft(tracker_class, meta, debug): if tracker == "RTF": await tracker_class.api_test(meta) - dupes = await tracker_class.search_existing(meta) + dupes = await tracker_class.search_existing(meta, disctype) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) # Proceed with upload if the meta is set to upload if tracker == "TL" or meta.get('upload', False): - await tracker_class.upload(meta) + await tracker_class.upload(meta, disctype) if tracker == 'SN': await asyncio.sleep(16) await client.add_to_client(meta, tracker_class.tracker) From 7e9cec156f7a91e623d02a4f0ce02b2f4b5684ad Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Sep 2024 00:07:30 +1000 Subject: [PATCH 200/741] Fix conflict --- data/example-config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index fc38754b1..8d78bcdbf 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -36,9 +36,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS", + "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS", "BLU": { "useAPI": False, # Set to True if using BLU From 38847d9193b9316eb298ec7bca87b112a06af994 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Sep 2024 00:08:19 +1000 Subject: [PATCH 201/741] Add branch to docker --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index b1b1e33bd..0a33e0f0e 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - dvd-seasons + - tik env: REGISTRY: ghcr.io From ca5b3d773c53f59983d696cd504ff8745214087b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Sep 2024 23:50:01 +1000 Subject: [PATCH 202/741] Use the text file with full language instead of meta with 2 digit --- src/trackers/AITHER.py | 67 ++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 39 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 11973429f..4382e47d8 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -3,8 +3,8 @@ import asyncio import requests from str2bool import str2bool -import json import platform +import re from src.trackers.COMMON import COMMON from src.console import console @@ -40,6 +40,8 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) modq = await self.get_flag(meta, 'modq') name = await self.edit_name(meta) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: @@ -89,7 +91,10 @@ async def upload(self, meta): if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') @@ -115,57 +120,41 @@ async def get_flag(self, meta, flag_name): async def edit_name(self, meta): aither_name = meta['name'] - # Helper function to check if English audio is present - def has_english_audio(tracks, is_bdmv=False): - for track in tracks: - if is_bdmv and track.get('language') == 'English': + # Helper function to check if English audio is present in the text-based MediaInfo + def has_english_audio(media_info_text): + # Look for the audio section and extract the language line + audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if audio_section: + language = audio_section.group(1) + if language.lower().startswith('en'): # Check if it's English return True - if not is_bdmv and track['@type'] == "Audio": - # Ensure Language is not None and is a string before checking startswith - if isinstance(track.get('Language'), str) and track.get('Language').startswith('en'): - return True return False - # Helper function to get audio language - def get_audio_lang(tracks, is_bdmv=False): - if is_bdmv: - return tracks[0].get('language', '').upper() if tracks else "" - - # For regular files, find the first audio track and return the language string - for track in tracks: - if track['@type'] == "Audio": - return track.get('Language', '').upper() # Correctly retrieve the language - return "" # Return an empty string if no audio track is found + # Helper function to extract the audio language from the text-based MediaInfo + def get_audio_lang(media_info_text): + # Find the audio section and extract the language + audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if audio_section: + return audio_section.group(1).upper() # Return the language in uppercase + return "" # Return empty if not found # Handle non-BDMV cases if meta['is_disc'] != "BDMV": try: - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) + with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + media_info_text = f.read() - audio_tracks = mi['media']['track'] - has_eng_audio = has_english_audio(audio_tracks) + # Check for English audio + has_eng_audio = has_english_audio(media_info_text) # If English audio is not present, get the audio language if not has_eng_audio: - audio_lang = get_audio_lang(audio_tracks) + audio_lang = get_audio_lang(media_info_text) if audio_lang: # Insert the audio language before the resolution in the name aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - except (FileNotFoundError, KeyError, IndexError) as e: - print(f"Error processing MediaInfo: {e}") - - else: - bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) - has_eng_audio = has_english_audio(bdinfo_audio, is_bdmv=True) - if not has_eng_audio: - audio_lang = get_audio_lang(bdinfo_audio, is_bdmv=True) - if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - - # Handle TV show episode title inclusion - if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() and meta['episode'].strip(): - aither_name = aither_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") return aither_name From b7bfcf1e0c5ed2702c4998efc9d7223a96e84563 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Sep 2024 00:38:28 +1000 Subject: [PATCH 203/741] Correct BDMV handling --- src/trackers/AITHER.py | 65 +++++++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 4382e47d8..39f58df65 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -120,38 +120,51 @@ async def get_flag(self, meta, flag_name): async def edit_name(self, meta): aither_name = meta['name'] - # Helper function to check if English audio is present in the text-based MediaInfo - def has_english_audio(media_info_text): - # Look for the audio section and extract the language line - audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) - if audio_section: - language = audio_section.group(1) - if language.lower().startswith('en'): # Check if it's English - return True + # Helper function to check if English audio is present + def has_english_audio(tracks=None, media_info_text=None): + if meta['is_disc'] == "BDMV" and tracks: + for track in tracks: + if track.get('language', '').lower() == 'english': + return True + elif media_info_text: + audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if audio_section: + language = audio_section.group(1) + if language.lower().startswith('en'): # Check if it's English + return True return False - # Helper function to extract the audio language from the text-based MediaInfo - def get_audio_lang(media_info_text): - # Find the audio section and extract the language - audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) - if audio_section: - return audio_section.group(1).upper() # Return the language in uppercase - return "" # Return empty if not found - - # Handle non-BDMV cases - if meta['is_disc'] != "BDMV": + # Helper function to extract the audio language from MediaInfo text or BDMV structure + def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): + if meta['is_disc'] == "BDMV" and tracks: + return tracks[0].get('language', '').upper() if tracks else "" + elif media_info_text: + match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if match: + return match.group(1).upper() + return "" # Return empty string if no audio track is found + + is_bdmv = meta['is_disc'] == "BDMV" # noqa #F841 + media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + + if meta['is_disc'] == "BDMV": + bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) + has_eng_audio = has_english_audio(bdinfo_audio, is_bdmv=True) + if not has_eng_audio: + audio_lang = get_audio_lang(bdinfo_audio, is_bdmv=True) + if audio_lang: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + else: + # Handle non-BDMV content try: - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MEDIAINFO.txt", 'r', encoding='utf-8') as f: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: media_info_text = f.read() - # Check for English audio - has_eng_audio = has_english_audio(media_info_text) - - # If English audio is not present, get the audio language - if not has_eng_audio: - audio_lang = get_audio_lang(media_info_text) + # Check for English audio in the text-based MediaInfo + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) if audio_lang: - # Insert the audio language before the resolution in the name aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) except (FileNotFoundError, KeyError) as e: print(f"Error processing MEDIAINFO.txt: {e}") From e95ef30b2e57372656429ee2f362b16163bfa1fb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Sep 2024 01:20:46 +1000 Subject: [PATCH 204/741] Update description.txt with meta from unit3d trackers https://github.com/Audionut/Upload-Assistant/issues/45 --- src/prep.py | 138 +++++++++++++++++++++++++++++----------------------- 1 file changed, 77 insertions(+), 61 deletions(-) diff --git a/src/prep.py b/src/prep.py index 14349660a..e4ec5ebd4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -126,7 +126,7 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data if tmdb not in [None, '0']: - meta['tmdb'] = tmdb + meta['tmdb_manual'] = tmdb if imdb not in [None, '0']: meta['imdb'] = str(imdb).zfill(7) if tvdb not in [None, '0']: @@ -3112,87 +3112,103 @@ def clean_filename(self, name): return name async def gen_desc(self, meta): + def clean_text(text): + return text.replace('\r\n', '').replace('\n', '').strip() - desclink = meta.get('desclink', None) - descfile = meta.get('descfile', None) + desclink = meta.get('desclink') + descfile = meta.get('descfile') ptp_desc = "" - desc_source = [] imagelist = [] + + desc_sources = ['ptp', 'blu', 'aither', 'lst', 'oe'] + desc_source = [source.upper() for source in desc_sources if meta.get(source)] + desc_source = desc_source[0] if len(desc_source) == 1 else None + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) - if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp_manual') is not None: - desc_source.append('PTP') - if meta.get('blu_manual') is not None: - desc_source.append('BLU') - if len(desc_source) != 1: - desc_source = None - else: - desc_source = desc_source[0] - if meta.get('ptp', None) is not None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: + if (desclink, descfile, meta['desc']) == (None, None, None): + if meta.get('ptp') and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: if meta.get('skip_gen_desc', False): console.print("[cyan]Something went wrong with PTP description.") return meta + ptp = PTP(config=self.config) ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) - if ptp_desc.replace('\r\n', '').replace('\n', '').strip() != "": - description.write(ptp_desc) - description.write("\n") + if clean_text(ptp_desc): + description.write(ptp_desc + "\n") meta['description'] = 'PTP' - meta['imagelist'] = imagelist # Save the imagelist to meta if needed + meta['imagelist'] = imagelist - if ptp_desc == "" and meta.get('blu_desc', '').rstrip() not in [None, ''] and desc_source in ['BLU', None]: - if meta.get('blu_desc', '').strip().replace('\r\n', '').replace('\n', '') != '': - description.write(meta['blu_desc']) - meta['description'] = 'BLU' + # Handle BLU description + if not ptp_desc and clean_text(meta.get('blu_desc', '')) and desc_source in ['BLU', None]: + description.write(meta['blu_desc'] + "\n") + meta['description'] = 'BLU' - if meta.get('desc_template', None) is not None: - from jinja2 import Template - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: - desc_templater = Template(f.read()) - template_desc = desc_templater.render(meta) - if template_desc.strip() != "": - description.write(template_desc) - description.write("\n") - console.print(f"[INFO] Description from template '{meta['desc_template']}' used:\n{template_desc}") - - if meta['nfo'] is not False: - description.write("[code]") - nfo = glob.glob("*.nfo")[0] - description.write(open(nfo, 'r', encoding="utf-8").read()) - description.write("[/code]") - description.write("\n") - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from NFO file '{nfo}' used:\n{nfo_content}") # noqa: F405 + # Handle LST description + if not ptp_desc and clean_text(meta.get('lst_desc', '')) and desc_source in ['LST', None]: + description.write(meta['lst_desc'] + "\n") + meta['description'] = 'LST' - if desclink is not None: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) - split = os.path.split(parsed.path) - if split[0] != '/': - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}") - else: - raw = parsed._replace(path=f"/raw{parsed.path}") - raw = urllib.parse.urlunparse(raw) - description.write(requests.get(raw).text) - description.write("\n") - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from link '{desclink}' used:\n{desclink_content}") # noqa: F405 + # Handle AITHER description + if not ptp_desc and clean_text(meta.get('aither_desc', '')) and desc_source in ['AITHER', None]: + description.write(meta['aither_desc'] + "\n") + meta['description'] = 'AITHER' + + # Handle OE description + if not ptp_desc and clean_text(meta.get('oe_desc', '')) and desc_source in ['OE', None]: + description.write(meta['oe_desc'] + "\n") + meta['description'] = 'OE' - if descfile is not None: - if os.path.isfile(descfile): - text = open(descfile, 'r').read() - description.write(text) + if meta.get('desc_template'): + from jinja2 import Template + try: + with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + template = Template(f.read()) + template_desc = template.render(meta) + if clean_text(template_desc): + description.write(template_desc + "\n") + console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") + except FileNotFoundError: + console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + + if meta.get('nfo'): + nfo_files = glob.glob("*.nfo") + if nfo_files: + nfo = nfo_files[0] + with open(nfo, 'r', encoding="utf-8") as nfo_file: + nfo_content = nfo_file.read() + description.write(f"[code]{nfo_content}[/code]\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from file '{descfile}' used:\n{text}") + console.print(f"[INFO] NFO file '{nfo}' used.") - if meta['desc'] is not None: - description.write(meta['desc']) - description.write("\n") + if desclink: + try: + parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + split = os.path.split(parsed.path) + raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw_url = urllib.parse.urlunparse(raw) + desclink_content = requests.get(raw_url).text + description.write(desclink_content + "\n") + meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from link '{desclink}' used.") + except Exception as e: + console.print(f"[ERROR] Failed to fetch description from link: {e}") + + if descfile and os.path.isfile(descfile): + with open(descfile, 'r') as f: + file_content = f.read() + description.write(file_content) + meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from file '{descfile}' used.") + + if meta.get('desc'): + description.write(meta['desc'] + "\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Custom description used:\n{meta['desc']}") + console.print("[INFO] Custom description used.") description.write("\n") + return meta async def tag_override(self, meta): From be294afbc3c773a015be21a78b94e3b86bc7d6d7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Sep 2024 12:36:23 +1000 Subject: [PATCH 205/741] Pass disctype --- src/trackers/ACM.py | 4 ++-- src/trackers/AITHER.py | 4 ++-- src/trackers/AL.py | 4 ++-- src/trackers/ANT.py | 4 ++-- src/trackers/BHD.py | 4 ++-- src/trackers/BHDTV.py | 4 ++-- src/trackers/BLU.py | 4 ++-- src/trackers/CBR.py | 4 ++-- src/trackers/FL.py | 8 ++------ src/trackers/FNP.py | 4 ++-- src/trackers/HDB.py | 4 ++-- src/trackers/HDT.py | 8 ++------ src/trackers/HP.py | 4 ++-- src/trackers/HUNO.py | 4 ++-- src/trackers/JPTV.py | 4 ++-- src/trackers/LCD.py | 4 ++-- src/trackers/LST.py | 4 ++-- src/trackers/LT.py | 4 ++-- src/trackers/MTV.py | 4 ++-- src/trackers/NBL.py | 4 ++-- src/trackers/OE.py | 4 ++-- src/trackers/OTW.py | 4 ++-- src/trackers/PSS.py | 4 ++-- src/trackers/PTER.py | 4 ++-- src/trackers/PTP.py | 4 ++-- src/trackers/R4E.py | 4 ++-- src/trackers/RF.py | 4 ++-- src/trackers/RTF.py | 4 ++-- src/trackers/SHRI.py | 14 ++------------ src/trackers/SN.py | 4 ++-- src/trackers/STC.py | 4 ++-- src/trackers/STT.py | 4 ++-- src/trackers/TDC.py | 4 ++-- src/trackers/THR.py | 4 ++-- src/trackers/TIK.py | 2 +- src/trackers/TL.py | 2 +- src/trackers/TTG.py | 4 ++-- src/trackers/UNIT3D_TEMPLATE.py | 4 ++-- src/trackers/UTP.py | 4 ++-- 39 files changed, 76 insertions(+), 94 deletions(-) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index cbf191477..67ba285f1 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -186,7 +186,7 @@ def get_subs_tag(self, subs): return ' [No Eng subs]' return f" [{subs[0]} subs only]" - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -266,7 +266,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index b0d219aa3..ba954b83b 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -31,7 +31,7 @@ def __init__(self, config): 'Will1869', 'x0r', 'YIFY'] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) @@ -199,7 +199,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/AL.py b/src/trackers/AL.py index cadd0dbca..28f4f76bf 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -69,7 +69,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -148,7 +148,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 1929172a8..9e06f931f 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -63,7 +63,7 @@ async def get_flags(self, meta): flags.append('Remux') return flags - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) torrent_filename = "BASE" torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") @@ -173,7 +173,7 @@ def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): async def edit_desc(self, meta): return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 25bea57c3..77c8efd6b 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -213,7 +213,7 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") category = meta['category'] diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index d675fdaa5..e53788fb0 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -30,7 +30,7 @@ def __init__(self, config): self.banned_groups = [] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -198,7 +198,7 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): console.print("[red]Dupes must be checked Manually") return ['Dupes must be checked Manually'] # hopefully someone else has the time to implement this. diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 9af559cf5..6a7d1ba04 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -35,7 +35,7 @@ def __init__(self, config): pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) blu_name = meta['name'] desc_header = "" @@ -190,7 +190,7 @@ async def derived_dv_layer(self, meta): name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") return name, desc_header - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 8a58fd0b8..b6d8938a2 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -143,7 +143,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { diff --git a/src/trackers/FL.py b/src/trackers/FL.py index a7c67d028..f3356dfda 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -100,11 +100,7 @@ async def edit_name(self, meta): fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -194,7 +190,7 @@ async def upload(self, meta): raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 4608925ce..1a16141c0 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -62,7 +62,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -140,7 +140,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 9a4777a94..7e454f1d9 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -196,7 +196,7 @@ async def edit_name(self, meta): return hdb_name - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -317,7 +317,7 @@ async def upload(self, meta): raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") url = "https://hdbits.org/api/torrents" diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index a46abb831..a30975c12 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -102,11 +102,7 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -196,7 +192,7 @@ async def upload(self, meta): raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: common = COMMON(config=self.config) diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 3250213be..32b62236b 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -62,7 +62,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -140,7 +140,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 9d80b2c27..77057e5b2 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -29,7 +29,7 @@ def __init__(self, config): self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.unit3d_edit_desc(meta, self.tracker, self.signature) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -256,7 +256,7 @@ async def is_plex_friendly(self, meta): return 0 - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 1502d8787..9252aa088 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -67,7 +67,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta) @@ -147,7 +147,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 6ca5cac61..3ddec7fbf 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -145,7 +145,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 7278f7ed4..e71457490 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -70,7 +70,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')) @@ -164,7 +164,7 @@ async def get_flag(self, meta, flag_name): return 1 if meta.get(flag_name, False) else 0 - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 367428636..fdabb9176 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -86,7 +86,7 @@ async def edit_name(self, meta): lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") return lt_name - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category'], meta) @@ -163,7 +163,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 656a59f0c..07d2133c6 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -37,7 +37,7 @@ def __init__(self, config): ] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") @@ -540,7 +540,7 @@ async def login(self, cookiefile): console.print(resp.url) return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 35dd0fc50..3711c54c2 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -43,7 +43,7 @@ async def edit_desc(self, meta): # Leave this in so manual works return - async def upload(self, meta): + async def upload(self, meta, disctype): if meta['category'] != 'TV': console.print("[red]Only TV Is allowed at NBL") return @@ -82,7 +82,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") if int(meta.get('tvmaze_id', 0)) != 0: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index e1b7453ed..055cab4db 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -38,7 +38,7 @@ def __init__(self, config): 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -167,7 +167,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 8c12ccd22..b0219dfc0 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -62,7 +62,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -140,7 +140,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index b19a594ae..f0a2ade02 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -64,7 +64,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -142,7 +142,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index 50e44367a..cc4868a36 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -59,7 +59,7 @@ async def validate_cookies(self, meta): console.print("[bold red]Missing Cookie File. (data/cookies/PTER.txt)") return False - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] common = COMMON(config=self.config) cookiefile = f"{meta['base_dir']}/data/cookies/PTER.txt" @@ -317,7 +317,7 @@ async def is_zhongzi(self, meta): return 'yes' return None - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f13e5d39b..da6290770 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -306,7 +306,7 @@ async def get_tags(self, check_against): tags.append(each) return tags - async def search_existing(self, groupID, meta): + async def search_existing(self, groupID, meta, disctype): # Map resolutions to SD / HD / UHD quality = None if meta.get('sd', 0) == 1: # 1 is SD @@ -841,7 +841,7 @@ async def fill_upload_form(self, groupID, meta): return url, data - async def upload(self, meta, url, data): + async def upload(self, meta, url, data, disctype): torrent_filename = f"[{self.tracker}]{meta['clean_name']}.torrent" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}" torrent = Torrent.read(torrent_path) diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index c3ba5abe5..c57f18a48 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -27,7 +27,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category'], meta['tmdb']) @@ -136,7 +136,7 @@ async def is_docu(self, genres): is_docu = True return is_docu - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") url = "https://racing4everyone.eu/api/torrents/filter" diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 1019c2253..c6fedad83 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -142,7 +142,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 07b78d7e8..b5ddf485f 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -29,7 +29,7 @@ def __init__(self, config): self.banned_groups = [] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -93,7 +93,7 @@ async def upload(self, meta): console.print("[cyan]Request Data:") console.print(json_data) - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") headers = { diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 689bfc62e..ff9d7cd92 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -18,12 +18,6 @@ class SHRI(): Upload """ - ############################################################### - ######## EDIT ME ######## # noqa #E266 - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'SHRI' @@ -68,11 +62,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa #E266 - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -150,7 +140,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 04547ce89..74484b6f9 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -34,7 +34,7 @@ async def get_type_id(self, type): }.get(type, '0') return type_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) # await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -121,7 +121,7 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 8e8c9ef52..e93bb263d 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -26,7 +26,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -156,7 +156,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 8117e1440..2029c4cb2 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -27,7 +27,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -134,7 +134,7 @@ async def get_res_id(self, resolution): }.get(resolution, '11') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index cd795249e..6c68ca4df 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -61,7 +61,7 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -139,7 +139,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 84a97ad3f..9ff295069 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = [""] pass - async def upload(self, session, meta): + async def upload(self, session, meta, disctype): await self.edit_torrent(meta) cat_id = await self.get_cat_id(meta) subs = self.get_subtitles(meta) @@ -257,7 +257,7 @@ async def edit_desc(self, meta): desc.close() return pronfo - def search_existing(self, session, imdb_id): + def search_existing(self, session, imdb_id, disctype): from bs4 import BeautifulSoup imdb_id = imdb_id.replace('tt', '') search_url = f"https://www.torrenthr.org/browse.php?search={imdb_id}&blah=2&incldead=1" diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index a3f2b36f0..2702233a7 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -451,7 +451,7 @@ async def edit_desc(self, meta): desc_text.append(f" Source.............: {meta.get('disctype', 'Unknown')}\n") else: desc_text.append(f" Source.............: {meta.get('dvd_size', 'Unknown')}\n") - desc_text.append(f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url]\n") + desc_text.append(f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url] [color=red]Don't forget the actual distributor link\n") desc_text.append(f" Average Bitrate....: {total_bitrate}\n") desc_text.append(" Ripping Program....: [color=red]Specify - if it's your rip or custom version, otherwise 'Not my rip'[/color]\n") desc_text.append("\n") diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 15d6935b3..67e66298e 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -75,7 +75,7 @@ async def get_cat_id(self, common, meta): raise NotImplementedError('Failed to determine TL category!') - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(common, meta) diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 94b27bc7d..6ed31de13 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -105,7 +105,7 @@ async def get_anon(self, anon): anon = 'yes' return anon - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -177,7 +177,7 @@ async def upload(self, meta): raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index e778b6df0..435b30bd6 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -72,7 +72,7 @@ async def get_res_id(self, resolution): ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 ############################################################### - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -150,7 +150,7 @@ async def upload(self, meta): console.print(data) open_torrent.close() - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index d6bf86d65..eeaf552ff 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -28,7 +28,7 @@ def __init__(self, config): self.banned_groups = [] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) @@ -137,7 +137,7 @@ async def get_res_id(self, resolution): }.get(resolution, '1') return resolution_id - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { From 114facc4135084372b042b141bc533df8716c667 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Sep 2024 12:43:37 +1000 Subject: [PATCH 206/741] Push TIK description to file --- src/prep.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6a666e98e..e32d6bd41 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3135,7 +3135,7 @@ def clean_text(text): ptp_desc = "" imagelist = [] - desc_sources = ['ptp', 'blu', 'aither', 'lst', 'oe'] + desc_sources = ['ptp', 'blu', 'aither', 'lst', 'oe', 'tik'] desc_source = [source.upper() for source in desc_sources if meta.get(source)] desc_source = desc_source[0] if len(desc_source) == 1 else None @@ -3155,26 +3155,26 @@ def clean_text(text): meta['description'] = 'PTP' meta['imagelist'] = imagelist - # Handle BLU description if not ptp_desc and clean_text(meta.get('blu_desc', '')) and desc_source in ['BLU', None]: description.write(meta['blu_desc'] + "\n") meta['description'] = 'BLU' - # Handle LST description if not ptp_desc and clean_text(meta.get('lst_desc', '')) and desc_source in ['LST', None]: description.write(meta['lst_desc'] + "\n") meta['description'] = 'LST' - # Handle AITHER description if not ptp_desc and clean_text(meta.get('aither_desc', '')) and desc_source in ['AITHER', None]: description.write(meta['aither_desc'] + "\n") meta['description'] = 'AITHER' - # Handle OE description if not ptp_desc and clean_text(meta.get('oe_desc', '')) and desc_source in ['OE', None]: description.write(meta['oe_desc'] + "\n") meta['description'] = 'OE' + if not ptp_desc and clean_text(meta.get('tike_desc', '')) and desc_source in ['TIK', None]: + description.write(meta['tik_desc'] + "\n") + meta['description'] = 'TIK' + if meta.get('desc_template'): from jinja2 import Template try: From 19a193039e57cd7d85741e4f47f057ce1dc97506 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 18 Sep 2024 21:09:50 +1000 Subject: [PATCH 207/741] Pass disctype outside of API --- upload.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/upload.py b/upload.py index 16f0f65f4..ffba33211 100644 --- a/upload.py +++ b/upload.py @@ -360,11 +360,11 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue if await tracker_class.validate_credentials(meta) is True: - dupes = await tracker_class.search_existing(meta) + dupes = await tracker_class.search_existing(meta, disctype) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) if meta['upload'] is True: - await tracker_class.upload(meta) + await tracker_class.upload(meta, disctype) await client.add_to_client(meta, tracker_class.tracker) if tracker == "MANUAL": @@ -414,11 +414,11 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): console.print("[yellow]Logging in to THR") session = thr.login(session) console.print("[yellow]Searching for Dupes") - dupes = thr.search_existing(session, meta.get('imdb_id')) + dupes = thr.search_existing(session, disctype, meta.get('imdb_id')) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) if meta['upload'] is True: - await thr.upload(session, meta) + await thr.upload(session, meta, disctype) await client.add_to_client(meta, "THR") except Exception: console.print(traceback.print_exc()) @@ -454,14 +454,14 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): meta['upload'] = True else: console.print("[yellow]Searching for Existing Releases") - dupes = await ptp.search_existing(groupID, meta) + dupes = await ptp.search_existing(groupID, meta, disctype) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) if meta['upload'] is True: ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData) + await ptp.upload(meta, ptpUrl, ptpData, disctype) await asyncio.sleep(5) await client.add_to_client(meta, "PTP") except Exception: From 8c542f4c92b491030e0b71fe9260a35ab7e1c077 Mon Sep 17 00:00:00 2001 From: l3mon4id <170472008+l3mon4id@users.noreply.github.com> Date: Thu, 19 Sep 2024 19:24:06 +0200 Subject: [PATCH 208/741] Simple ULCX implementation --- README.md | 2 +- src/trackers/ULCX.py | 256 +++++++++++++++++++++++++++++++++++++++++++ upload.py | 6 +- 3 files changed, 261 insertions(+), 3 deletions(-) create mode 100644 src/trackers/ULCX.py diff --git a/README.md b/README.md index 544a57612..ec02dd2b8 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS/ULCX - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py new file mode 100644 index 000000000..8999a6721 --- /dev/null +++ b/src/trackers/ULCX.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import platform + +import requests +from src.console import console +from src.trackers.COMMON import COMMON + + +class ULCX: + def __init__(self, config): + self.config = config + self.tracker = "ULCX" + self.source_flag = "ULCX" + self.upload_url = "https://upload.cx/api/torrents/upload" + self.search_url = "https://upload.cx/api/torrents/filter" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.banned_groups = [ + "Tigole", + "x0r", + "Judas", + "SPDVD", + "MeGusta", + "YIFY", + "SWTYBLZ", + "TAoE", + "TSP", + "TSPxL", + "LAMA", + "4K4U", + "ION10", + "Will1869", + "TGx", + "Sicario", + "QxR", + "Hi10", + "EMBER", + "FGT", + "AROMA", + "d3g", + "nikt0", + "Grym", + "RARBG", + "iVy", + "NuBz", + "NAHOM", + "EDGE2020", + "FnP", + ] + + async def get_cat_id(self, category_name): + category_id = { + "MOVIE": "1", + "TV": "2", + }.get(category_name, "0") + return category_id + + async def get_type_id(self, type): + type_id = { + "DISC": "1", + "REMUX": "2", + "WEBDL": "4", + "WEBRIP": "5", + "HDTV": "6", + "ENCODE": "3", + }.get(type, "0") + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + "8640p": "10", + "4320p": "1", + "2160p": "2", + "1440p": "3", + "1080p": "3", + "1080i": "4", + "720p": "5", + "576p": "6", + "576i": "7", + "480p": "8", + "480i": "9", + }.get(resolution, "10") + return resolution_id + + async def upload(self, meta): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta["category"]) + type_id = await self.get_type_id(meta["type"]) + resolution_id = await self.get_res_id(meta["resolution"]) + await common.unit3d_edit_desc(meta, self.tracker, signature=self.signature) + region_id = await common.unit3d_region_ids(meta.get("region")) + distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) + if meta["anon"] != 0 or self.config["TRACKERS"][self.tracker].get( + "anon", False + ): + anon = 1 + else: + anon = 0 + + modq = await self.get_flag(meta, "modq") + + if meta["bdinfo"] is not None: + mi_dump = None + bd_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", + "r", + encoding="utf-8", + ).read() + else: + mi_dump = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", + "r", + encoding="utf-8", + ).read() + bd_dump = None + desc = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", + "r", + ).read() + open_torrent = open( + f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + "rb", + ) + files = {"torrent": open_torrent} + data = { + "name": meta["name"], + "description": desc, + "mediainfo": mi_dump, + "bdinfo": bd_dump, + "category_id": cat_id, + "type_id": type_id, + "resolution_id": resolution_id, + "tmdb": meta["tmdb"], + "imdb": meta["imdb_id"].replace("tt", ""), + "tvdb": meta["tvdb_id"], + "mal": meta["mal_id"], + "igdb": 0, + "anonymous": anon, + "stream": meta["stream"], + "sd": meta["sd"], + "keywords": meta["keywords"], + "personal_release": int(meta.get("personalrelease", False)), + "internal": 0, + "featured": 0, + "free": 0, + "doubleup": 0, + "sticky": 0, + "mod_queue_opt_in": modq, + } + # Internal + if self.config["TRACKERS"][self.tracker].get("internal", False): + if meta["tag"] != "" and ( + meta["tag"][1:] + in self.config["TRACKERS"][self.tracker].get("internal_groups", []) + ): + data["internal"] = 1 + + if region_id != 0: + data["region_id"] = region_id + if distributor_id != 0: + data["distributor_id"] = distributor_id + if meta.get("category") == "TV": + data["season_number"] = meta.get("season_int", "0") + data["episode_number"] = meta.get("episode_int", "0") + headers = { + "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" + } + params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} + + if not meta["debug"]: + success = "Unknown" + try: + response = requests.post( + url=self.upload_url, + files=files, + data=data, + headers=headers, + params=params, + ) + response.raise_for_status() + response_json = response.json() + success = response_json.get("success", False) + data = response_json.get("data", {}) + except Exception as e: + console.print( + f"[red]Encountered Error: {e}[/red]\n[bold yellow]May have uploaded, please go check.." + ) + + if success == "Unknown": + console.print( + "[bold yellow]Status of upload is unknown, please go check.." + ) + success = False + elif success: + console.print("[bold green]Torrent uploaded successfully!") + else: + console.print("[bold red]Torrent upload failed.") + + if data: + if ( + "name" in data + and "The name has already been taken." in data["name"] + ): + console.print("[red]Name has already been taken.") + if ( + "info_hash" in data + and "The info hash has already been taken." in data["info_hash"] + ): + console.print("[red]Info hash has already been taken.") + else: + console.print("[cyan]Request Data:") + console.print(data) + + try: + open_torrent.close() + except Exception as e: + console.print(f"[red]Failed to close torrent file: {e}[/red]") + + return success + + async def get_flag(self, meta, flag_name): + config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 + + return 1 if meta.get(flag_name, False) else 0 + + async def search_existing(self, meta): + dupes = {} + console.print("[yellow]Searching for existing torrents on site...") + params = { + "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), + "tmdbId": meta["tmdb"], + "categories[]": await self.get_cat_id(meta["category"]), + "types[]": await self.get_type_id(meta["type"]), + "resolutions[]": await self.get_res_id(meta["resolution"]), + "name": "", + } + if meta.get("edition", "") != "": + params["name"] = params["name"] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response["data"]: + result = each["attributes"]["name"] + size = each["attributes"]["size"] + dupes[result] = size + except Exception as e: + console.print( + f"[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect. Error: {e}" + ) + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index d3984f5f3..5efd25d38 100644 --- a/upload.py +++ b/upload.py @@ -40,6 +40,7 @@ from src.trackers.AL import AL from src.trackers.SHRI import SHRI from src.trackers.PSS import PSS +from src.trackers.ULCX import ULCX import json from pathlib import Path import asyncio @@ -248,18 +249,19 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'PSS'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'PSS', 'ULCX'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, - 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI, 'PSS': PSS} + 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX} tracker_capabilities = { 'LST': {'mod_q': True, 'draft': True}, 'BLU': {'mod_q': True, 'draft': False}, 'AITHER': {'mod_q': True, 'draft': False}, 'BHD': {'draft_live': True}, + 'ULCX': {'mod_q': True} } async def check_mod_q_and_draft(tracker_class, meta, debug): From 05add3cf05c31d147475b5559ac0553fa87dd4a2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 09:57:27 +1000 Subject: [PATCH 209/741] Add to example --- data/example-config.py | 10 ++++++++-- src/trackers/ULCX.py | 33 +++------------------------------ 2 files changed, 11 insertions(+), 32 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 1f92cfb66..50fa868a4 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -36,9 +36,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS + # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS, ULCX # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS", + "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, PSS, ULCX", "BLU": { "useAPI": False, # Set to True if using BLU @@ -240,6 +240,12 @@ "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", # "anon" : False }, + "ULCX": { + "api_key": "ULCX api key", + "announce_url": "https://upload.cx/announce/customannounceurl", + # "anon" : False, + # "modq" : False ## Not working yet + }, "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 8999a6721..de5c146f7 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -17,36 +17,9 @@ def __init__(self, config): self.search_url = "https://upload.cx/api/torrents/filter" self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [ - "Tigole", - "x0r", - "Judas", - "SPDVD", - "MeGusta", - "YIFY", - "SWTYBLZ", - "TAoE", - "TSP", - "TSPxL", - "LAMA", - "4K4U", - "ION10", - "Will1869", - "TGx", - "Sicario", - "QxR", - "Hi10", - "EMBER", - "FGT", - "AROMA", - "d3g", - "nikt0", - "Grym", - "RARBG", - "iVy", - "NuBz", - "NAHOM", - "EDGE2020", - "FnP", + "Tigole", "x0r", "Judas", "SPDVD", "MeGusta", "YIFY", "SWTYBLZ", "TAoE", "TSP", "TSPxL", "LAMA", "4K4U", "ION10", + "Will1869", "TGx", "Sicario", "QxR", "Hi10", "EMBER", "FGT", "AROMA", "d3g", "nikt0", "Grym", "RARBG", "iVy", "NuBz", + "NAHOM", "EDGE2020", "FnP", ] async def get_cat_id(self, category_name): From 6fcb59c02d622825f4e023840c230637813fefd1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 10:25:00 +1000 Subject: [PATCH 210/741] Remove unused argument --- src/args.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/args.py b/src/args.py index 988c22715..0db8fbe3a 100644 --- a/src/args.py +++ b/src/args.py @@ -67,12 +67,11 @@ def parse(self, args, meta): parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") + parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64']) parser.add_argument('-nh', '--nohash', action='store_true', required=False, help="Don't hash .torrent") parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") - parser.add_argument('-ps', '--piece-size-max', dest='piece_size_max', nargs='*', required=False, help="Maximum piece size in MiB", choices=[1, 2, 4, 8, 16], type=int) parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD, LST)") parser.add_argument('-mq', '--modq', action='store_true', required=False, help="Send to modQ") - parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64', '128']) parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs='*', required=False, help="Add to qbit with this category") From 525d7f1594625333b0ab1b3456e80cbeeb2181be Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 11:17:29 +1000 Subject: [PATCH 211/741] Change .torrent limit to warning --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index e4ec5ebd4..8ae30b4a7 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2400,7 +2400,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): piece_size = our_max_size break elif torrent_file_size > 102400: - cli_ui.error('WARNING: .torrent size will exceed 100 KiB!') + cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size From 6e95a24daaa0c6eb0d17389d4530f892bc8494c3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 11:34:42 +1000 Subject: [PATCH 212/741] BHD - UTF-8 description handling --- src/trackers/BHD.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 25bea57c3..9fc62d311 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -49,7 +49,7 @@ async def upload(self, meta): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { 'mediainfo': mi_dump, @@ -180,8 +180,8 @@ async def get_type(self, meta): return type_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta.get('discs', []) != []: discs = meta['discs'] if discs[0]['type'] == "DVD": From 2f42eb81ff9e98e275b0a1ed9954408c128a3cf0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 11:53:52 +1000 Subject: [PATCH 213/741] BHD - use imdb_id for dupe checking --- src/trackers/BHD.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 9fc62d311..cff21b2eb 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -220,9 +220,9 @@ async def search_existing(self, meta): if category == 'MOVIE': category = "Movies" data = { - 'tmdb_id': meta['tmdb'], + 'action': 'search', + 'imdb_id': meta['imdb_id'], 'categories': category, - 'types': await self.get_type(meta), } # Search all releases if SD if meta['sd'] == 1: @@ -232,7 +232,7 @@ async def search_existing(self, meta): if meta.get('tv_pack', 0) == 1: data['pack'] = 1 data['search'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}?action=search" + url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" try: response = requests.post(url=url, data=data) response = response.json() From ec68b64c51b7a823439eeea3b5dbef2c6a90a81a Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Sep 2024 13:11:44 +1000 Subject: [PATCH 214/741] BHD - fix tmdb searching instead --- src/trackers/BHD.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index cff21b2eb..7723934d3 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -24,7 +24,7 @@ def __init__(self, config): self.tracker = 'BHD' self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' - self.signature = "\n[center][url=https://beyond-hd.me/forums/topic/toolpython-l4gs-upload-assistant.5456/post/138087#post-138087]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] pass @@ -218,11 +218,15 @@ async def search_existing(self, meta): console.print("[yellow]Searching for existing torrents on site...") category = meta['category'] if category == 'MOVIE': + tmdbID = "movie" category = "Movies" + if category == "TV": + tmdbID = "tv" data = { 'action': 'search', - 'imdb_id': meta['imdb_id'], + 'tmdb_id': f"{tmdbID}/{meta['tmdb']}", 'categories': category, + 'types': await self.get_type(meta), } # Search all releases if SD if meta['sd'] == 1: From 01fba6ef07861b2304ded95ee0d6777faa2379c9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Sep 2024 09:39:30 +1000 Subject: [PATCH 215/741] Update example-config.py Fixes https://github.com/Audionut/Upload-Assistant/issues/50 and https://github.com/Audionut/Upload-Assistant/issues/45 --- data/example-config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index 50fa868a4..7ba72a873 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -70,6 +70,7 @@ "announce_url": "" }, "AITHER": { + "useAPI": False, # Set to True if using Aither "api_key": "AITHER api key", "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False, @@ -151,6 +152,7 @@ # "anon" : False }, "LST": { + "useAPI": False, # Set to True if using LST "api_key": "LST api key", "announce_url": "https://lst.gg/announce/customannounceurl", # "anon" : False, @@ -186,6 +188,7 @@ "announce_url": "https://hdts-announce.ru/announce.php", # DO NOT EDIT THIS LINE }, "OE": { + "useAPI": False, # Set to True if using OE "api_key": "OE api key", "announce_url": "https://onlyencodes.cc/announce/customannounceurl", # "anon" : False From 91020578cdf417612f5f53372036f23a7fcf29a5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 24 Sep 2024 17:17:09 +1000 Subject: [PATCH 216/741] Remove unneeded call Should fix https://github.com/Audionut/Upload-Assistant/issues/52 --- src/trackers/AITHER.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 39f58df65..28f61697e 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -149,9 +149,9 @@ def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): if meta['is_disc'] == "BDMV": bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) - has_eng_audio = has_english_audio(bdinfo_audio, is_bdmv=True) + has_eng_audio = has_english_audio(bdinfo_audio) if not has_eng_audio: - audio_lang = get_audio_lang(bdinfo_audio, is_bdmv=True) + audio_lang = get_audio_lang(bdinfo_audio) if audio_lang: aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) else: From ed709419f9c86c466ed7effb1146aebf4116345b Mon Sep 17 00:00:00 2001 From: Khakis Date: Tue, 24 Sep 2024 13:27:16 -0500 Subject: [PATCH 217/741] Update AITHER.py correct issue when dual audio of only the 1st audio track being used when determining the aither release name --- src/trackers/AITHER.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 28f61697e..d2c2f290b 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -120,16 +120,15 @@ async def get_flag(self, meta, flag_name): async def edit_name(self, meta): aither_name = meta['name'] - # Helper function to check if English audio is present def has_english_audio(tracks=None, media_info_text=None): if meta['is_disc'] == "BDMV" and tracks: for track in tracks: if track.get('language', '').lower() == 'english': return True elif media_info_text: - audio_section = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) - if audio_section: - language = audio_section.group(1) + audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + for i, language in enumerate(audio_section): + language = language.lower().strip() if language.lower().startswith('en'): # Check if it's English return True return False From 7f813bbdfdb1432809dfa69034e24ffd5755d20f Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 25 Sep 2024 19:40:39 +1000 Subject: [PATCH 218/741] OTW - update source flag --- src/trackers/OTW.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 8c12ccd22..71f85276c 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -21,7 +21,7 @@ class OTW(): def __init__(self, config): self.config = config self.tracker = 'OTW' - self.source_flag = 'OTW' + self.source_flag = 'OLD' self.upload_url = 'https://oldtoons.world/api/torrents/upload' self.search_url = 'https://oldtoons.world/api/torrents/filter' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" From 69c0bce2a025444a538eddeccb281c3e5b4529ba Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 25 Sep 2024 21:33:22 +1000 Subject: [PATCH 219/741] Update PSS banned groups --- src/trackers/PSS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index b19a594ae..66ca5b0ae 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -26,7 +26,7 @@ def __init__(self, config): self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'NeXus', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', - 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', + 'QxR', 'Ralphy', 'RARBG', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'mSD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'CK4', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD'] pass From 3a4bebe646fbbbf802d1ad5672b9cfc4bab4a21c Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 26 Sep 2024 17:24:47 +1000 Subject: [PATCH 220/741] define service_longname when service specified --- src/prep.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 8ae30b4a7..5c9170b90 100644 --- a/src/prep.py +++ b/src/prep.py @@ -606,6 +606,9 @@ async def gather_prep(self, meta, mode): meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) + elif meta.get('service'): + services = self.get_service(get_services_only=True) + meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) meta['hdr'] = self.get_hdr(mi, bdinfo) meta['distributor'] = self.get_distributor(meta['distributor']) @@ -3004,8 +3007,7 @@ async def get_season_episode(self, video, meta): return meta - def get_service(self, video, tag, audio, guess_title): - service = guessit(video).get('streaming_service', "") + def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_services_only=False): services = { '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', @@ -3053,6 +3055,10 @@ def get_service(self, video, tag, audio, guess_title): 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' } + if get_services_only: + return services + service = guessit(video).get('streaming_service', "") + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") From 32db464e4f7f9bf3cb7629cec34f3f2ea8149d57 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 26 Sep 2024 19:59:40 +1000 Subject: [PATCH 221/741] description encoding --- src/trackers/ACM.py | 6 +++--- src/trackers/AITHER.py | 2 +- src/trackers/AL.py | 2 +- src/trackers/BHDTV.py | 6 +++--- src/trackers/BLU.py | 2 +- src/trackers/CBR.py | 2 +- src/trackers/FL.py | 6 +++--- src/trackers/FNP.py | 2 +- src/trackers/HDB.py | 6 +++--- src/trackers/HDT.py | 4 ++-- src/trackers/HP.py | 2 +- src/trackers/HUNO.py | 2 +- src/trackers/JPTV.py | 2 +- src/trackers/LCD.py | 2 +- src/trackers/LST.py | 2 +- src/trackers/LT.py | 2 +- src/trackers/MTV.py | 6 +++--- src/trackers/OE.py | 2 +- src/trackers/OTW.py | 2 +- src/trackers/PSS.py | 2 +- src/trackers/PTER.py | 4 ++-- src/trackers/PTP.py | 2 +- src/trackers/R4E.py | 2 +- src/trackers/RF.py | 2 +- src/trackers/SHRI.py | 2 +- src/trackers/SN.py | 6 +++--- src/trackers/STC.py | 2 +- src/trackers/STT.py | 2 +- src/trackers/TDC.py | 2 +- src/trackers/THR.py | 4 ++-- src/trackers/TIK.py | 6 +++--- src/trackers/TL.py | 2 +- src/trackers/TTG.py | 6 +++--- src/trackers/ULCX.py | 2 +- src/trackers/UNIT3D_TEMPLATE.py | 2 +- src/trackers/UTP.py | 2 +- 36 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 67ba285f1..1970a8e14 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -210,7 +210,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { @@ -334,8 +334,8 @@ async def edit_name(self, meta): return name async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '': diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index eb7e3d1a9..693379d03 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -52,7 +52,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 28f4f76bf..dcc7b6774 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -90,7 +90,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index e53788fb0..7dd05ed7d 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -59,7 +59,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file': open_torrent} @@ -185,8 +185,8 @@ async def get_res_id(self, resolution): return resolution_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: desc.write(base.replace("[img=250]", "[img=250x250]")) images = meta['image_list'] if len(images) > 0: diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 6a7d1ba04..6ce0dba5d 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -60,7 +60,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index b6d8938a2..c090e80af 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -49,7 +49,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { diff --git a/src/trackers/FL.py b/src/trackers/FL.py index f3356dfda..5d376c917 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -133,7 +133,7 @@ async def upload(self, meta, disctype): torrentFileName = meta.get('uuid') # Download new .torrent from site - fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() + fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() @@ -295,8 +295,8 @@ async def download_new_torrent(self, session, id, torrent_path): return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: from src.bbcode import BBCODE bbcode = BBCODE() diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 1a16141c0..eb6ebaa42 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -82,7 +82,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 7e454f1d9..a59c42e84 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -215,7 +215,7 @@ async def upload(self, meta, disctype): return # Download new .torrent from site - hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" torrent = Torrent.read(torrent_path) @@ -415,8 +415,8 @@ async def download_new_torrent(self, id, torrent_path): return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index a30975c12..a1117b67d 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -123,7 +123,7 @@ async def upload(self, meta, disctype): hdt_name = hdt_name_manually # Upload - hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() + hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" with open(torrent_path, 'rb') as torrentFile: @@ -292,7 +292,7 @@ async def get_csrfToken(self, session, url): async def edit_desc(self, meta): # base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: if meta['is_disc'] != 'BDMV': # Beautify MediaInfo for HDT using custom template video = meta['filelist'][0] diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 32b62236b..76acbb837 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -82,7 +82,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 77057e5b2..c1a5298f2 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -52,7 +52,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 9252aa088..76e8e78f9 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -89,7 +89,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() # bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 3ddec7fbf..5b7397d3d 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -49,7 +49,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { diff --git a/src/trackers/LST.py b/src/trackers/LST.py index e71457490..83fc5e1b3 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -93,7 +93,7 @@ async def upload(self, meta, disctype): mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() if meta.get('service') == "hentai": desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc diff --git a/src/trackers/LT.py b/src/trackers/LT.py index fdabb9176..c6e0e4be1 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -107,7 +107,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 07d2133c6..9e6ea24f8 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -130,7 +130,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" - desc = open(desc_path, 'r').read() + desc = open(desc_path, 'r', encoding='utf-8').read() torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" with open(torrent_file_path, 'rb') as f: @@ -226,8 +226,8 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts return meta['image_list'], False # No need to retry, successful upload async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: # adding bd_dump to description if it exits and adding empty string to mediainfo if meta['bdinfo'] is not None: mi_dump = None diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 055cab4db..ec332dc97 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -56,7 +56,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 7e70370ca..766ebd767 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -82,7 +82,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 036e28be1..0f0fde007 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -84,7 +84,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index cc4868a36..88fea3a80 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -149,8 +149,8 @@ async def get_type_medium_id(self, meta): return medium_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON common = COMMON(config=self.config) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index da6290770..dee15cc87 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -765,7 +765,7 @@ async def fill_upload_form(self, groupID, meta): await common.edit_torrent(meta, self.tracker, self.source_flag) resolution, other_resolution = self.get_resolution(meta) await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() ptp_subtitles = self.get_subtitles(meta) ptp_trumpable = None if not any(x in [3, 50] for x in ptp_subtitles) or meta['hardcoded-subs']: diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index c57f18a48..82a8c8080 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -44,7 +44,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/RF.py b/src/trackers/RF.py index c6fedad83..c90f8b4db 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -48,7 +48,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index ff9d7cd92..6862b431f 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -82,7 +82,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 74484b6f9..199ff68e0 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -60,7 +60,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as f: tfile = f.read() @@ -106,8 +106,8 @@ async def upload(self, meta, disctype): console.print(data) async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: desc.write(base) images = meta['image_list'] if len(images) > 0: diff --git a/src/trackers/STC.py b/src/trackers/STC.py index e93bb263d..fb17b2c0a 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -44,7 +44,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 2029c4cb2..2f8ee800d 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -45,7 +45,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index 6c68ca4df..b2dd45c8e 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -81,7 +81,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 9ff295069..4a91b66e7 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -59,7 +59,7 @@ async def upload(self, session, meta, disctype): f.close() # bd_file = None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r', encoding='utf-8') as f: desc = f.read() f.close() @@ -169,7 +169,7 @@ async def edit_torrent(self, meta): async def edit_desc(self, meta): pronfo = False - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta['tag'] == "": tag = "" diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 2702233a7..6a8b9d982 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -63,16 +63,16 @@ async def upload(self, meta, disctype): bd_dump = None if meta.get('desclink'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() print(f"Custom Description Link: {desc}") elif meta.get('descfile'): - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() print(f"Custom Description File Path: {desc}") else: await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 67e66298e..f563a6839 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -81,7 +81,7 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(common, meta) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+') + open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+', encoding='utf-8') info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] is not None else 'MEDIAINFO_CLEANPATH' open_info = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", 'r', encoding='utf-8') diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 6ed31de13..9337e8a83 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -126,7 +126,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') - ttg_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + ttg_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: @@ -277,8 +277,8 @@ async def login(self, cookiefile): return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON common = COMMON(config=self.config) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index de5c146f7..39555dfd0 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -90,7 +90,7 @@ async def upload(self, meta): bd_dump = None desc = open( f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", - "r", + "r", encoding='utf-8', ).read() open_torrent = open( f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 435b30bd6..d3bc06777 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -92,7 +92,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index eeaf552ff..30d16fea3 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -48,7 +48,7 @@ async def upload(self, meta, disctype): else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { From 18249025c3175d4fac251d65243d24a4510ceec8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 26 Sep 2024 22:26:11 +1000 Subject: [PATCH 222/741] Skip prompts when ID or unattended --- src/prep.py | 55 +++++++++++++++++++++++------------------- src/trackers/COMMON.py | 29 +++++++++++----------- src/trackers/PTP.py | 31 ++++++++++++------------ 3 files changed, 61 insertions(+), 54 deletions(-) diff --git a/src/prep.py b/src/prep.py index e5b53b3b3..1a141ae60 100644 --- a/src/prep.py +++ b/src/prep.py @@ -145,7 +145,8 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): if valid_images: meta['image_list'] = valid_images if meta.get('image_list'): # Double-check if image_list is set before handling it - await self.handle_image_list(meta, tracker_name) + if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta['unattended']: + await self.handle_image_list(meta, tracker_name) if filename: meta[f'{tracker_name.lower()}_filename'] = filename @@ -164,6 +165,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, + meta, id=meta[tracker_key] ) else: @@ -194,27 +196,28 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") - if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): - meta['skip_gen_desc'] = True - found_match = True + if not meta['unattended']: + if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): + meta['skip_gen_desc'] = True + found_match = True - # Retrieve PTP description and image list - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta.get('is_disc', False)) - meta['description'] = ptp_desc + # Retrieve PTP description and image list + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) - if valid_images: - meta['image_list'] = valid_images - await self.handle_image_list(meta, tracker_name) + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist) + if valid_images: + meta['image_list'] = valid_images + await self.handle_image_list(meta, tracker_name) - meta['skip_gen_desc'] = True - console.print("[green]PTP images added to metadata.[/green]") + meta['skip_gen_desc'] = True + console.print("[green]PTP images added to metadata.[/green]") - else: - found_match = False - meta['skip_gen_desc'] = True - meta['description'] = None + else: + found_match = False + meta['skip_gen_desc'] = True + meta['description'] = None else: console.print("[yellow]Skipping PTP as no match found[/yellow]") @@ -233,14 +236,13 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met found_match = False # Retrieve PTP description and image list - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) meta['description'] = ptp_desc if not meta.get('image_list'): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist) if valid_images: meta['image_list'] = valid_images - await self.handle_image_list(meta, tracker_name) meta['skip_gen_desc'] = True console.print("[green]PTP images added to metadata.[/green]") @@ -304,12 +306,15 @@ async def handle_image_list(self, meta, tracker_name): if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', ''): console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") - keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") - if not keep_images: - meta['image_list'] = [] - console.print(f"[yellow]Images discarded from {tracker_name}.") + if meta['unattended']: + keep_images = True else: - console.print(f"[green]Images retained from {tracker_name}.") + keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + if not keep_images: + meta['image_list'] = [] + console.print(f"[yellow]Images discarded from {tracker_name}.") + else: + console.print(f"[green]Images retained from {tracker_name}.") async def gather_prep(self, meta, mode): meta['mode'] = mode diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 19cc873ce..a22af01a3 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -178,7 +178,7 @@ async def prompt_user_for_confirmation(self, message): return True return False - async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): + async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=None, file_name=None): tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 imagelist = [] @@ -265,19 +265,20 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, f console.print(f"[blue]Extracted description: [yellow]{description}", markup=False) # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") - - if edit_choice.lower() == 'e': - edited_description = click.edit(description) - if edited_description: - description = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {description}", markup=False) - elif edit_choice.lower() == 'd': - description = None - console.print("[yellow]Description discarded.[/yellow]") - else: - console.print("[green]Keeping the original description.[/green]") + if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta.get('unattended'): + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}", markup=False) + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index dee15cc87..b9e58760f 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -185,7 +185,7 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): except Exception: return None, None, None - async def get_ptp_description(self, ptp_torrent_id, is_disc): + async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): params = { 'id': ptp_torrent_id, 'action': 'get_description' @@ -209,20 +209,21 @@ async def get_ptp_description(self, ptp_torrent_id, is_disc): console.print("[bold green]Successfully grabbed description from PTP") console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity - # Allow user to edit or discard the description - console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") - - if edit_choice.lower() == 'e': - edited_description = click.edit(desc) - if edited_description: - desc = edited_description.strip() - console.print(f"[green]Final description after editing:[/green] {desc}") - elif edit_choice.lower() == 'd': - desc = None - console.print("[yellow]Description discarded.[/yellow]") - else: - console.print("[green]Keeping the original description.[/green]") + if not meta.get('ptp') or meta['unattended']: + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") + + if edit_choice.lower() == 'e': + edited_description = click.edit(desc) + if edited_description: + desc = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {desc}") + elif edit_choice.lower() == 'd': + desc = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") return desc, imagelist From 6d08a9387ccc9de9981d2bfb8f3e535e9a427cb1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 26 Sep 2024 22:38:13 +1000 Subject: [PATCH 223/741] Fix PTP found_match and meta when unattended --- src/prep.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 1a141ae60..2df9bc8a1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -218,7 +218,15 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met found_match = False meta['skip_gen_desc'] = True meta['description'] = None - + else: + found_match = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist) + if valid_images: + meta['image_list'] = valid_images else: console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False @@ -231,6 +239,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met if imdb_id: meta['imdb'] = str(imdb_id).zfill(7) console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") + found_match = True else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") found_match = False From 08db864ae72d0bd01a833e1dee4677c519d6bfad Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 28 Sep 2024 21:18:17 +1000 Subject: [PATCH 224/741] Fixed torrent creation with keep folder --- src/prep.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/prep.py b/src/prep.py index 2df9bc8a1..4c8e3a540 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2458,17 +2458,14 @@ def validate_piece_size(self): def create_torrent(self, meta, path, output_filename): # Handle directories and file inclusion logic if meta['isdir']: - if meta['keep_folder']: - cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') - else: - os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") - no_sample_globs = [] - for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): - no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) - if len(no_sample_globs) == 1: - path = meta['filelist'][0] + os.chdir(path) + globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + no_sample_globs = [] + for file in globs: + if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) + if len(no_sample_globs) == 1: + path = meta['filelist'][0] if meta['is_disc']: include, exclude = "", "" else: From 04f4074e75408ce4bb6f6451e02d9352bc275291 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 29 Sep 2024 13:58:51 +1000 Subject: [PATCH 225/741] Refactor description handling Description arguments will not work when specifying a tracker argument. When automated tracker searching (ie: no tracker specified), discarding the found description will allow custom description use. --- src/prep.py | 183 ++++++++++++++++------------------------- src/trackers/COMMON.py | 7 ++ src/trackers/PTP.py | 6 +- 3 files changed, 83 insertions(+), 113 deletions(-) diff --git a/src/prep.py b/src/prep.py index 4c8e3a540..b9c660e9f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -174,6 +174,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met tracker_name, tracker_instance.torrent_url, tracker_instance.search_url, + meta, file_name=search_term ) @@ -198,12 +199,10 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") if not meta['unattended']: if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): - meta['skip_gen_desc'] = True found_match = True # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc if not meta.get('image_list'): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist) @@ -211,17 +210,14 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) - meta['skip_gen_desc'] = True - console.print("[green]PTP images added to metadata.[/green]") - else: found_match = False - meta['skip_gen_desc'] = True - meta['description'] = None + else: found_match = True ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc + meta['skip_gen_desc'] = True if not meta.get('image_list'): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist) @@ -230,32 +226,28 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met else: console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False - meta['skip_gen_desc'] = True - meta['description'] = None + else: ptp_torrent_id = meta['ptp'] - console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") + console.print("[cyan]Using specified PTP ID to get IMDb ID[/cyan]") imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) if imdb_id: meta['imdb'] = str(imdb_id).zfill(7) console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") found_match = True + meta['skipit'] = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + meta['skip_gen_desc'] = True + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist) + if valid_images: + meta['image_list'] = valid_images + console.print("[green]PTP images added to metadata.[/green]") else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") found_match = False - # Retrieve PTP description and image list - ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) - meta['description'] = ptp_desc - - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) - if valid_images: - meta['image_list'] = valid_images - - meta['skip_gen_desc'] = True - console.print("[green]PTP images added to metadata.[/green]") - elif tracker_name == "HDB": if meta.get('hdb') is not None: meta[manual_key] = meta[tracker_key] @@ -3144,104 +3136,71 @@ def clean_filename(self, name): return name async def gen_desc(self, meta): - def clean_text(text): - return text.replace('\r\n', '').replace('\n', '').strip() - - desclink = meta.get('desclink') - descfile = meta.get('descfile') - ptp_desc = "" - imagelist = [] + if not meta.get('skip_gen_desc', False): + def clean_text(text): + return text.replace('\r\n', '').replace('\n', '').strip() - desc_sources = ['ptp', 'blu', 'aither', 'lst', 'oe', 'tik'] - desc_source = [source.upper() for source in desc_sources if meta.get(source)] - desc_source = desc_source[0] if len(desc_source) == 1 else None - - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.seek(0) - - if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp') and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: - if meta.get('skip_gen_desc', False): - console.print("[cyan]Something went wrong with PTP description.") - return meta + desclink = meta.get('desclink') + descfile = meta.get('descfile') - ptp = PTP(config=self.config) - ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) - if clean_text(ptp_desc): - description.write(ptp_desc + "\n") - meta['description'] = 'PTP' - meta['imagelist'] = imagelist + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.seek(0) - if not ptp_desc and clean_text(meta.get('blu_desc', '')) and desc_source in ['BLU', None]: - description.write(meta['blu_desc'] + "\n") - meta['description'] = 'BLU' - - if not ptp_desc and clean_text(meta.get('lst_desc', '')) and desc_source in ['LST', None]: - description.write(meta['lst_desc'] + "\n") - meta['description'] = 'LST' - - if not ptp_desc and clean_text(meta.get('aither_desc', '')) and desc_source in ['AITHER', None]: - description.write(meta['aither_desc'] + "\n") - meta['description'] = 'AITHER' - - if not ptp_desc and clean_text(meta.get('oe_desc', '')) and desc_source in ['OE', None]: - description.write(meta['oe_desc'] + "\n") - meta['description'] = 'OE' - - if not ptp_desc and clean_text(meta.get('tike_desc', '')) and desc_source in ['TIK', None]: - description.write(meta['tik_desc'] + "\n") - meta['description'] = 'TIK' - - if meta.get('desc_template'): - from jinja2 import Template - try: - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: - template = Template(f.read()) - template_desc = template.render(meta) - if clean_text(template_desc): - description.write(template_desc + "\n") - console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") - except FileNotFoundError: - console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") - - if meta.get('nfo'): - nfo_files = glob.glob("*.nfo") - if nfo_files: - nfo = nfo_files[0] - with open(nfo, 'r', encoding="utf-8") as nfo_file: - nfo_content = nfo_file.read() - description.write(f"[code]{nfo_content}[/code]\n") + if meta.get('desc_template'): + from jinja2 import Template + try: + with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + template = Template(f.read()) + template_desc = template.render(meta) + if clean_text(template_desc): + description.write(template_desc + "\n") + console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") + except FileNotFoundError: + console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + + if meta.get('nfo'): + nfo_files = glob.glob("*.nfo") + if nfo_files: + nfo = nfo_files[0] + with open(nfo, 'r', encoding="utf-8") as nfo_file: + nfo_content = nfo_file.read() + description.write(f"[code]{nfo_content}[/code]\n") + meta['description'] = "CUSTOM" + console.print(f"[INFO] NFO file '{nfo}' used.") + + if desclink: + try: + parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + split = os.path.split(parsed.path) + raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw_url = urllib.parse.urlunparse(raw) + desclink_content = requests.get(raw_url).text + description.write(desclink_content + "\n") + meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from link '{desclink}' used.") + except Exception as e: + console.print(f"[ERROR] Failed to fetch description from link: {e}") + + if descfile and os.path.isfile(descfile): + with open(descfile, 'r') as f: + file_content = f.read() + description.write(file_content) meta['description'] = "CUSTOM" - console.print(f"[INFO] NFO file '{nfo}' used.") + console.print(f"[INFO] Description from file '{descfile}' used.") - if desclink: - try: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) - split = os.path.split(parsed.path) - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") - raw_url = urllib.parse.urlunparse(raw) - desclink_content = requests.get(raw_url).text - description.write(desclink_content + "\n") + if meta.get('desc'): + description.write(meta['desc'] + "\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from link '{desclink}' used.") - except Exception as e: - console.print(f"[ERROR] Failed to fetch description from link: {e}") - - if descfile and os.path.isfile(descfile): - with open(descfile, 'r') as f: - file_content = f.read() - description.write(file_content) - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from file '{descfile}' used.") - - if meta.get('desc'): - description.write(meta['desc'] + "\n") - meta['description'] = "CUSTOM" - console.print("[INFO] Custom description used.") + console.print("[INFO] Custom description used.") - description.write("\n") + description.write("\n") + return meta + else: + description_text = meta.get('description') if meta.get('description') else "" + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(description_text + "\n") - return meta + return meta async def tag_override(self, meta): with open(f"{meta['base_dir']}/data/tags.json", 'r', encoding="utf-8") as f: diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a22af01a3..5d564186a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -273,12 +273,19 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N edited_description = click.edit(description) if edited_description: description = edited_description.strip() + meta['description'] = description + meta['skip_gen_desc'] = True console.print(f"[green]Final description after editing:[/green] {description}", markup=False) elif edit_choice.lower() == 'd': description = None console.print("[yellow]Description discarded.[/yellow]") else: console.print("[green]Keeping the original description.[/green]") + meta['description'] = description + meta['skip_gen_desc'] = True + else: + meta['description'] = description + meta['skip_gen_desc'] = True return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index b9e58760f..44f2e696e 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -209,7 +209,7 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): console.print("[bold green]Successfully grabbed description from PTP") console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity - if not meta.get('ptp') or meta['unattended']: + if not meta.get('skipit') or meta['unattended']: # Allow user to edit or discard the description console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") @@ -218,12 +218,16 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): edited_description = click.edit(desc) if edited_description: desc = edited_description.strip() + meta['description'] = desc + meta['skip_gen_desc'] = True console.print(f"[green]Final description after editing:[/green] {desc}") elif edit_choice.lower() == 'd': desc = None console.print("[yellow]Description discarded.[/yellow]") else: console.print("[green]Keeping the original description.[/green]") + meta['description'] = ptp_desc + meta['skip_gen_desc'] = True return desc, imagelist From 2a41189210b2891abe2b5fa4ff69d7f0176a9297 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 29 Sep 2024 14:24:42 +1000 Subject: [PATCH 226/741] Update image host retry logic --- src/prep.py | 209 +++++++++++++++++++++++++++------------------------- 1 file changed, 107 insertions(+), 102 deletions(-) diff --git a/src/prep.py b/src/prep.py index b9c660e9f..f2df7ef3b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2521,7 +2521,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): """ Upload Screenshots """ - def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False): + def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): import nest_asyncio nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") @@ -2544,7 +2544,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i return existing_images, total_screens if img_host == "imgbox": - # Handle Imgbox uploads without the main progress bar console.print("[green]Uploading Screens to Imgbox...") image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) if not image_list: @@ -2565,114 +2564,120 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i ) as progress: while True: upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - + for image in image_glob[-screens:]: - try: - timeout = 60 - if img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - break - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") + retry_count = 0 + while retry_count < max_retries: + try: + timeout = 10 + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]PT Screens failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + break + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = response['data']['url_viewer'] + else: + console.print(f"[red]Unsupported image host: {img_host}") break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - else: - console.print(f"[red]Unsupported image host: {img_host}") - break - # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') + # Update progress bar and print the result on the same line + progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') - # Add the image details to the list - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - image_list.append(image_dict) - progress.advance(upload_task) - i += 1 + # Add the image details to the list + image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_list.append(image_dict) + progress.advance(upload_task) + i += 1 + break # Break the retry loop if successful - except Exception as e: - console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") - break + except Exception as e: + retry_count += 1 + console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") + if retry_count >= max_retries: + console.print(f"[red]Max retries reached for {img_host}. Moving to next image host.") + break - time.sleep(0.5) + time.sleep(0.5) - if i >= total_screens: - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") - return image_list, i + if i >= total_screens: + return_dict['image_list'] = image_list + console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return image_list, i - # If we broke out of the loop due to a failure, switch to the next host and retry - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i + # If we broke out of the loop due to a failure, switch to the next host and retry + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i # Ensure that if all attempts fail, a valid tuple is returned return image_list, i From 336ee62e52d4b052690f845c2f1fcfb99662dfc6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 29 Sep 2024 16:54:47 +1000 Subject: [PATCH 227/741] Try track successful uploaded images --- src/prep.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index f2df7ef3b..4eeae29f7 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2529,6 +2529,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i img_host = meta['imghost'] # Use the correctly updated image host from meta image_list = [] + successfully_uploaded = set() # Keep track of uploaded images across hosts if custom_img_list: image_glob = custom_img_list @@ -2563,9 +2564,10 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i TimeRemainingColumn() ) as progress: while True: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - - for image in image_glob[-screens:]: + remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(remaining_images)) + + for image in remaining_images: retry_count = 0 while retry_count < max_retries: try: @@ -2654,6 +2656,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # Add the image details to the list image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} image_list.append(image_dict) + successfully_uploaded.add(image) # Track successfully uploaded images progress.advance(upload_task) i += 1 break # Break the retry loop if successful @@ -2672,7 +2675,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") return image_list, i - # If we broke out of the loop due to a failure, switch to the next host and retry + # If we broke out of the loop due to a failure, switch to the next host and retry only remaining images img_host_num += 1 img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') if not img_host: From 6b6a2d3637481afea578acc01539912e15e668c1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 29 Sep 2024 19:22:52 +1000 Subject: [PATCH 228/741] exponential_backoff Correct handling to next image host and successful upload marking --- src/prep.py | 264 ++++++++++++++++++++++++++++------------------------ 1 file changed, 143 insertions(+), 121 deletions(-) diff --git a/src/prep.py b/src/prep.py index 4eeae29f7..925274bb5 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2529,7 +2529,8 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i img_host = meta['imghost'] # Use the correctly updated image host from meta image_list = [] - successfully_uploaded = set() # Keep track of uploaded images across hosts + successfully_uploaded = set() # Track successfully uploaded images + initial_timeout = 5 # Set the initial timeout for backoff if custom_img_list: image_glob = custom_img_list @@ -2544,146 +2545,167 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens - if img_host == "imgbox": - console.print("[green]Uploading Screens to Imgbox...") - image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) - if not image_list: - console.print("[yellow]Imgbox failed, trying next image host") - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i + def exponential_backoff(retry_count, initial_timeout): + # First retry uses the initial timeout + if retry_count == 1: + backoff_time = initial_timeout else: - return image_list, i # Return after successful Imgbox upload - else: + # Each subsequent retry increases the timeout by 50% + backoff_time = initial_timeout * (1.5 ** (retry_count - 1)) + + # Add a small random jitter to avoid synchronization + backoff_time += random.uniform(0, 1) + + # Sleep for the calculated backoff time + time.sleep(backoff_time) + + return backoff_time + + while True: + # Get remaining images that have not been uploaded yet + remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] + with Progress( TextColumn("[bold green]Uploading Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - while True: - remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(remaining_images)) + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(remaining_images)) - for image in remaining_images: - retry_count = 0 - while retry_count < max_retries: - try: - timeout = 10 - if img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - break - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - else: - console.print(f"[red]Unsupported image host: {img_host}") + for image in remaining_images: + retry_count = 0 # Reset retry count for each image + upload_success = False # Track if the image was successfully uploaded + + while retry_count < max_retries and not upload_success: + try: + timeout = exponential_backoff(retry_count + 1, initial_timeout) # Backoff increases both delay and timeout + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + upload_success = True # Mark the upload as successful + + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + upload_success = True # Mark the upload as successful + + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]PT Screens failed, trying next image host") break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + upload_success = True # Mark the upload as successful + + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + break + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + upload_success = True # Mark the upload as successful + + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = response['data']['url_viewer'] + upload_success = True # Mark the upload as successful - # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i + 1}/{total_screens}: {raw_url}", end='\r') + else: + console.print(f"[red]Unsupported image host: {img_host}") + break - # Add the image details to the list + # Add the image details to the list after a successful upload + if upload_success: image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} image_list.append(image_dict) - successfully_uploaded.add(image) # Track successfully uploaded images + successfully_uploaded.add(image) # Track the uploaded image progress.advance(upload_task) i += 1 - break # Break the retry loop if successful + break # Break retry loop after a successful upload - except Exception as e: - retry_count += 1 - console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") - if retry_count >= max_retries: - console.print(f"[red]Max retries reached for {img_host}. Moving to next image host.") - break + except Exception as e: + retry_count += 1 + console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") - time.sleep(0.5) + # Backoff strategy + exponential_backoff(retry_count, initial_timeout) - if i >= total_screens: - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") - return image_list, i + if retry_count >= max_retries: + console.print(f"[red]Max retries reached for {img_host}. Moving to next image host.") + break # Break out of retry loop after max retries - # If we broke out of the loop due to a failure, switch to the next host and retry only remaining images - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i + # If max retries are reached, break out of the image loop and move to the next host + if not upload_success: + break + + # Switch to the next host if retries fail for any image + if not upload_success: + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + else: + # If successful, stop switching hosts + break - # Ensure that if all attempts fail, a valid tuple is returned - return image_list, i + # Ensure that if all attempts fail, a valid tuple is returned + return image_list, i async def imgbox_upload(self, chdir, image_glob): os.chdir(chdir) From cf81e4faa48e231354cd17c92504d00c14fca625 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 29 Sep 2024 20:01:00 +1000 Subject: [PATCH 229/741] Catch slower connections 5 seconds worked here with a 4K image set, but slower connections would probably struggle. --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 925274bb5..588a30068 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2530,7 +2530,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i image_list = [] successfully_uploaded = set() # Track successfully uploaded images - initial_timeout = 5 # Set the initial timeout for backoff + initial_timeout = 10 # Set the initial timeout for backoff if custom_img_list: image_glob = custom_img_list @@ -2550,8 +2550,8 @@ def exponential_backoff(retry_count, initial_timeout): if retry_count == 1: backoff_time = initial_timeout else: - # Each subsequent retry increases the timeout by 50% - backoff_time = initial_timeout * (1.5 ** (retry_count - 1)) + # Each subsequent retry increases the timeout by 70% + backoff_time = initial_timeout * (1.7 ** (retry_count - 1)) # Add a small random jitter to avoid synchronization backoff_time += random.uniform(0, 1) From 9c5e7847c10c95a977e5a7731363ec1967f0b14e Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 30 Sep 2024 07:04:15 +1000 Subject: [PATCH 230/741] Squashed commit of the following: commit 1cfc2d81acc136ac0c4067b8dec1957137ecf4c7 Author: Audionut Date: Mon Sep 30 07:03:55 2024 +1000 Also remove sky, as it seems to be only skyshowtime commit 916cbde79c7c00331c7852d8d7c4911c559430db Author: Audionut Date: Mon Sep 30 07:02:25 2024 +1000 Remove unknown services and duplicate shortnames commit df962b1a9839a15f2a3e6e8602bc2d63700b5d18 Author: tiberio87 Date: Sun Sep 29 19:39:15 2024 +0200 correct update prep.py commit f691920f003524664ef10adcf056e9bcb4d229db Merge: 8320e73 cf81e4f Author: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sun Sep 29 18:10:22 2024 +0200 Merge branch 'Audionut:master' into master commit 8320e73980b13b9046443dc9443681dc38c7950a Author: tiberio87 Date: Sun Sep 29 11:40:52 2024 +0200 edit TIMvision commit 37a895331d8d4e511186e302be063d2d265d3dc1 Author: tiberio87 Date: Sun Sep 29 11:39:09 2024 +0200 edit TIMV commit dfb9c7afd3de5da347692820420d02ef89ab6d78 Author: tiberio87 Date: Sun Sep 29 10:06:08 2024 +0200 edit and add italian streaming service commit 8864a10333ba108a96f61f6c9317848cf58aa338 Merge: 92826a9 04f4074 Author: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sun Sep 29 08:06:00 2024 +0200 Merge branch 'Audionut:master' into master commit 92826a9d98fcdad4af92a4136a3aad2baf6f3969 Merge: de3f285 08db864 Author: Tiberio <90521592+tiberio87@users.noreply.github.com> Date: Sat Sep 28 17:56:22 2024 +0200 Merge branch 'Audionut:master' into master commit de3f2859062dc4ff91be757ebd0e9a3df36e83a8 Author: tiberio87 Date: Sat Sep 28 08:10:32 2024 +0200 add italian streaming service --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 588a30068..32dff7ede 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3085,7 +3085,7 @@ def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_se 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', + 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', @@ -3093,7 +3093,7 @@ def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_se 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', + 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', From e51d577fcaacae78c67d5fa8c893821aece22e35 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 30 Sep 2024 14:42:32 +1000 Subject: [PATCH 231/741] imgbox, imgbox, imgbox Further commit to come --- src/prep.py | 137 ++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 73 deletions(-) diff --git a/src/prep.py b/src/prep.py index 32dff7ede..7d85fc8fd 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2546,23 +2546,16 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i return existing_images, total_screens def exponential_backoff(retry_count, initial_timeout): - # First retry uses the initial timeout + # Exponential backoff logic with jitter if retry_count == 1: backoff_time = initial_timeout else: - # Each subsequent retry increases the timeout by 70% - backoff_time = initial_timeout * (1.7 ** (retry_count - 1)) - - # Add a small random jitter to avoid synchronization + backoff_time = initial_timeout * (1.4 ** (retry_count - 1)) backoff_time += random.uniform(0, 1) - - # Sleep for the calculated backoff time time.sleep(backoff_time) - return backoff_time while True: - # Get remaining images that have not been uploaded yet remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] with Progress( @@ -2571,16 +2564,57 @@ def exponential_backoff(retry_count, initial_timeout): "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(remaining_images)) + upload_task = progress.add_task("[green]Uploading Screens...", total=len(remaining_images)) + console.print(f"[cyan]Uploading screens to {img_host}...") for image in remaining_images: - retry_count = 0 # Reset retry count for each image - upload_success = False # Track if the image was successfully uploaded + retry_count = 0 + upload_success = False while retry_count < max_retries and not upload_success: try: - timeout = exponential_backoff(retry_count + 1, initial_timeout) # Backoff increases both delay and timeout - if img_host == "ptpimg": + timeout = exponential_backoff(retry_count + 1, initial_timeout) + if img_host == "imgbox": + try: + async def imgbox_upload(image): + gallery = pyimgbox.Gallery(thumb_width=350, square_thumbs=False) + async for submission in gallery.add(image): + return submission + + submission = asyncio.run(imgbox_upload(image)) + + if not submission['success']: + console.print(f"[yellow]Imgbox upload failed: {submission['error']}, trying next image host") + retry_count += 1 + if retry_count >= max_retries: + console.print("[red]Max retries reached for imgbox. Moving to next image host.") + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + break # Move to the next host + continue # Retry imgbox until max_retries + + img_url = submission['thumbnail_url'] + raw_url = submission['image_url'] + web_url = submission['web_url'] + upload_success = True + + except Exception as e: + console.print(f"[yellow]Failed to upload {image} to imgbox. Exception: {str(e)}") + retry_count += 1 + if retry_count >= max_retries: + console.print("[red]Max retries reached for imgbox. Moving to next image host.") + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + break + continue + + elif img_host == "ptpimg": payload = { 'format': 'json', 'api_key': self.config['DEFAULT']['ptpimg_api'] @@ -2594,7 +2628,7 @@ def exponential_backoff(retry_count, initial_timeout): img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" raw_url = img_url web_url = img_url - upload_success = True # Mark the upload as successful + upload_success = True elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" @@ -2607,7 +2641,7 @@ def exponential_backoff(retry_count, initial_timeout): img_url = response['data']['image']['url'] raw_url = img_url web_url = img_url - upload_success = True # Mark the upload as successful + upload_success = True elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" @@ -2625,7 +2659,7 @@ def exponential_backoff(retry_count, initial_timeout): img_url = response['data']['image']['url'] raw_url = img_url web_url = img_url - upload_success = True # Mark the upload as successful + upload_success = True elif img_host == "pixhost": url = "https://api.pixhost.to/images" @@ -2644,7 +2678,7 @@ def exponential_backoff(retry_count, initial_timeout): raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') img_url = response['th_url'] web_url = response['show_url'] - upload_success = True # Mark the upload as successful + upload_success = True elif img_host == "lensdump": url = "https://lensdump.com/api/1/upload" @@ -2662,13 +2696,8 @@ def exponential_backoff(retry_count, initial_timeout): img_url = response['data']['image']['url'] raw_url = img_url web_url = response['data']['url_viewer'] - upload_success = True # Mark the upload as successful + upload_success = True - else: - console.print(f"[red]Unsupported image host: {img_host}") - break - - # Add the image details to the list after a successful upload if upload_success: image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} image_list.append(image_dict) @@ -2680,63 +2709,25 @@ def exponential_backoff(retry_count, initial_timeout): except Exception as e: retry_count += 1 console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") - - # Backoff strategy exponential_backoff(retry_count, initial_timeout) if retry_count >= max_retries: console.print(f"[red]Max retries reached for {img_host}. Moving to next image host.") - break # Break out of retry loop after max retries - - # If max retries are reached, break out of the image loop and move to the next host - if not upload_success: - break - - # Switch to the next host if retries fail for any image - if not upload_success: - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + break # Move to the next host after max retries + + # Exit the loop after switching hosts + if img_host_num > 1 and not upload_success: + continue # Continue to the next host else: - # If successful, stop switching hosts - break + break # Break if upload was successful - # Ensure that if all attempts fail, a valid tuple is returned return image_list, i - async def imgbox_upload(self, chdir, image_glob): - os.chdir(chdir) - image_list = [] - - # Initialize the progress bar - with Progress( - TextColumn("[bold green]Uploading Screens to Imgbox..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - upload_task = progress.add_task("Uploading...", total=len(image_glob)) - - async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: - async for submission in gallery.add(image_glob): - if not submission['success']: - console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") - return [] - else: - image_dict = {} - image_dict['web_url'] = submission['web_url'] - image_dict['img_url'] = submission['thumbnail_url'] - image_dict['raw_url'] = submission['image_url'] - image_list.append(image_dict) - - # Update the progress bar - progress.advance(upload_task) - - return image_list - async def get_name(self, meta): type = meta.get('type', "") title = meta.get('title', "") From 2b8d973e0c7ae498051f60afa25b86acaf5c66e8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 30 Sep 2024 15:07:05 +1000 Subject: [PATCH 232/741] Working image hosts todo: Catch hard site errors and just skip Fix image_glob progression with host skip --- src/prep.py | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7d85fc8fd..89e392062 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2550,7 +2550,7 @@ def exponential_backoff(retry_count, initial_timeout): if retry_count == 1: backoff_time = initial_timeout else: - backoff_time = initial_timeout * (1.4 ** (retry_count - 1)) + backoff_time = initial_timeout * (1.5 ** (retry_count - 1)) backoff_time += random.uniform(0, 1) time.sleep(backoff_time) return backoff_time @@ -2574,27 +2574,29 @@ def exponential_backoff(retry_count, initial_timeout): while retry_count < max_retries and not upload_success: try: timeout = exponential_backoff(retry_count + 1, initial_timeout) + + # Add imgbox handling here if img_host == "imgbox": try: - async def imgbox_upload(image): + async def imgbox_upload(image_glob): gallery = pyimgbox.Gallery(thumb_width=350, square_thumbs=False) - async for submission in gallery.add(image): + async for submission in gallery.add(image_glob): return submission - submission = asyncio.run(imgbox_upload(image)) + submission = asyncio.run(imgbox_upload(image_glob)) if not submission['success']: - console.print(f"[yellow]Imgbox upload failed: {submission['error']}, trying next image host") - retry_count += 1 - if retry_count >= max_retries: - console.print("[red]Max retries reached for imgbox. Moving to next image host.") - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i - break # Move to the next host - continue # Retry imgbox until max_retries + console.print(f"[red]Imgbox upload failed: {submission['error']}") + img_host_num += 1 + next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') + console.print(f"[blue]Moving to next image host: {next_img_host}.") + + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i + break img_url = submission['thumbnail_url'] raw_url = submission['image_url'] @@ -2605,7 +2607,8 @@ async def imgbox_upload(image): console.print(f"[yellow]Failed to upload {image} to imgbox. Exception: {str(e)}") retry_count += 1 if retry_count >= max_retries: - console.print("[red]Max retries reached for imgbox. Moving to next image host.") + next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') + console.print(f"[red]Max retries reached for imgbox. Moving to next image host: {next_img_host}.") img_host_num += 1 img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') if not img_host: @@ -2698,12 +2701,13 @@ async def imgbox_upload(image): web_url = response['data']['url_viewer'] upload_success = True + # Only increment `i` after a successful upload if upload_success: image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} image_list.append(image_dict) successfully_uploaded.add(image) # Track the uploaded image progress.advance(upload_task) - i += 1 + i += 1 # Increment the image counter only after success break # Break retry loop after a successful upload except Exception as e: @@ -2712,13 +2716,14 @@ async def imgbox_upload(image): exponential_backoff(retry_count, initial_timeout) if retry_count >= max_retries: - console.print(f"[red]Max retries reached for {img_host}. Moving to next image host.") + next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') + console.print(f"[red]Max retries reached for {img_host}. Moving to next image host: {next_img_host}.") img_host_num += 1 img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') if not img_host: console.print("[red]All image hosts failed. Unable to complete uploads.") return image_list, i - break # Move to the next host after max retries + break # Exit the loop after switching hosts if img_host_num > 1 and not upload_success: From af5fd1eba71e95ec9a29eb9e2283dfd94204b763 Mon Sep 17 00:00:00 2001 From: Zips-sipZ Date: Mon, 30 Sep 2024 22:41:01 +0200 Subject: [PATCH 233/741] Update ULCX.py Updating ULCX.py to use the default UNIT3D base template to resolve current issues. --- src/trackers/ULCX.py | 290 +++++++++++++++++-------------------------- 1 file changed, 112 insertions(+), 178 deletions(-) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 39555dfd0..2b1b816c8 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -1,229 +1,163 @@ # -*- coding: utf-8 -*- # import discord import asyncio +import requests import platform +from str2bool import str2bool -import requests -from src.console import console from src.trackers.COMMON import COMMON +from src.console import console + +class ULCX(): -class ULCX: def __init__(self, config): self.config = config - self.tracker = "ULCX" - self.source_flag = "ULCX" - self.upload_url = "https://upload.cx/api/torrents/upload" - self.search_url = "https://upload.cx/api/torrents/filter" + self.tracker = 'ULCX' + self.source_flag = 'ULCX' + self.upload_url = 'https://upload.cx/api/torrents/upload' + self.search_url = 'https://upload.cx/api/torrents/filter' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = [ - "Tigole", "x0r", "Judas", "SPDVD", "MeGusta", "YIFY", "SWTYBLZ", "TAoE", "TSP", "TSPxL", "LAMA", "4K4U", "ION10", - "Will1869", "TGx", "Sicario", "QxR", "Hi10", "EMBER", "FGT", "AROMA", "d3g", "nikt0", "Grym", "RARBG", "iVy", "NuBz", - "NAHOM", "EDGE2020", "FnP", - ] + self.banned_groups = ['Tigole', 'x0r', 'Judas', 'SPDVD', 'MeGusta', 'YIFY', 'SWTYBLZ', 'TAoE', 'TSP', 'TSPxL', 'LAMA', '4K4U', + 'ION10', 'Will1869', 'TGx', 'Sicario', 'QxR', 'Hi10', 'EMBER', 'FGT', 'AROMA', 'd3g', 'nikt0', 'Grym', + 'RARBG', 'iVy', 'FnP', 'EDGE2020', 'NuBz', 'NAHOM'] + pass async def get_cat_id(self, category_name): category_id = { - "MOVIE": "1", - "TV": "2", - }.get(category_name, "0") + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - "DISC": "1", - "REMUX": "2", - "WEBDL": "4", - "WEBRIP": "5", - "HDTV": "6", - "ENCODE": "3", - }.get(type, "0") + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - "8640p": "10", - "4320p": "1", - "2160p": "2", - "1440p": "3", - "1080p": "3", - "1080i": "4", - "720p": "5", - "576p": "6", - "576i": "7", - "480p": "8", - "480i": "9", - }.get(resolution, "10") + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') return resolution_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta["category"]) - type_id = await self.get_type_id(meta["type"]) - resolution_id = await self.get_res_id(meta["resolution"]) - await common.unit3d_edit_desc(meta, self.tracker, signature=self.signature) - region_id = await common.unit3d_region_ids(meta.get("region")) - distributor_id = await common.unit3d_distributor_ids(meta.get("distributor")) - if meta["anon"] != 0 or self.config["TRACKERS"][self.tracker].get( - "anon", False - ): - anon = 1 - else: + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 + else: + anon = 1 - modq = await self.get_flag(meta, "modq") - - if meta["bdinfo"] is not None: + if meta['bdinfo'] is not None: mi_dump = None - bd_dump = open( - f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", - "r", - encoding="utf-8", - ).read() + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: - mi_dump = open( - f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", - "r", - encoding="utf-8", - ).read() + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open( - f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", - "r", encoding='utf-8', - ).read() - open_torrent = open( - f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", - "rb", - ) - files = {"torrent": open_torrent} + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} data = { - "name": meta["name"], - "description": desc, - "mediainfo": mi_dump, - "bdinfo": bd_dump, - "category_id": cat_id, - "type_id": type_id, - "resolution_id": resolution_id, - "tmdb": meta["tmdb"], - "imdb": meta["imdb_id"].replace("tt", ""), - "tvdb": meta["tvdb_id"], - "mal": meta["mal_id"], - "igdb": 0, - "anonymous": anon, - "stream": meta["stream"], - "sd": meta["sd"], - "keywords": meta["keywords"], - "personal_release": int(meta.get("personalrelease", False)), - "internal": 0, - "featured": 0, - "free": 0, - "doubleup": 0, - "sticky": 0, - "mod_queue_opt_in": modq, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config["TRACKERS"][self.tracker].get("internal", False): - if meta["tag"] != "" and ( - meta["tag"][1:] - in self.config["TRACKERS"][self.tracker].get("internal_groups", []) - ): - data["internal"] = 1 + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 if region_id != 0: - data["region_id"] = region_id + data['region_id'] = region_id if distributor_id != 0: - data["distributor_id"] = distributor_id - if meta.get("category") == "TV": - data["season_number"] = meta.get("season_int", "0") - data["episode_number"] = meta.get("episode_int", "0") + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') headers = { - "User-Agent": f"Upload Assistant/2.1 ({platform.system()} {platform.release()})" + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - params = {"api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip()} - - if not meta["debug"]: - success = "Unknown" - try: - response = requests.post( - url=self.upload_url, - files=files, - data=data, - headers=headers, - params=params, - ) - response.raise_for_status() - response_json = response.json() - success = response_json.get("success", False) - data = response_json.get("data", {}) - except Exception as e: - console.print( - f"[red]Encountered Error: {e}[/red]\n[bold yellow]May have uploaded, please go check.." - ) - - if success == "Unknown": - console.print( - "[bold yellow]Status of upload is unknown, please go check.." - ) - success = False - elif success: - console.print("[bold green]Torrent uploaded successfully!") - else: - console.print("[bold red]Torrent upload failed.") - - if data: - if ( - "name" in data - and "The name has already been taken." in data["name"] - ): - console.print("[red]Name has already been taken.") - if ( - "info_hash" in data - and "The info hash has already been taken." in data["info_hash"] - ): - console.print("[red]Info hash has already been taken.") - else: - console.print("[cyan]Request Data:") - console.print(data) + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - open_torrent.close() - except Exception as e: - console.print(f"[red]Failed to close torrent file: {e}[/red]") - - return success - - async def get_flag(self, meta, flag_name): - config_flag = self.config["TRACKERS"][self.tracker].get(flag_name) - if config_flag is not None: - return 1 if config_flag else 0 - - return 1 if meta.get(flag_name, False) else 0 + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() - async def search_existing(self, meta): - dupes = {} + async def search_existing(self, meta, disctype): + dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - "api_token": self.config["TRACKERS"][self.tracker]["api_key"].strip(), - "tmdbId": meta["tmdb"], - "categories[]": await self.get_cat_id(meta["category"]), - "types[]": await self.get_type_id(meta["type"]), - "resolutions[]": await self.get_res_id(meta["resolution"]), - "name": "", + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } - if meta.get("edition", "") != "": - params["name"] = params["name"] + f" {meta['edition']}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" try: response = requests.get(url=self.search_url, params=params) response = response.json() - for each in response["data"]: - result = each["attributes"]["name"] - size = each["attributes"]["size"] - dupes[result] = size - except Exception as e: - console.print( - f"[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect. Error: {e}" - ) + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes From c76c04bd7e6e8197fbc6cbc66a98296e628c551e Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Mon, 30 Sep 2024 21:06:58 -0400 Subject: [PATCH 234/741] Add OnlyImg Support Add support to only image --- data/example-config.py | 2 ++ src/args.py | 2 +- src/prep.py | 22 +++++++++++++++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index ca13194b5..dea74e195 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -11,6 +11,7 @@ "ptpimg_api": "ptpimg api key", "lensdump_api": "lensdump api key", "ptscreens_api": "ptscreens api key", + "oeimg_api": "oeimg api key" # Order of image hosts, and backup image hosts "img_host_1": "imgbb", @@ -19,6 +20,7 @@ "img_host_4": "pixhost", "img_host_5": "lensdump", "img_host_6": "ptscreens", + "img_host_7": "oeimg" "screens": "6", diff --git a/src/args.py b/src/args.py index 66286fd85..a4456c1d5 100644 --- a/src/args.py +++ b/src/args.py @@ -58,7 +58,7 @@ def parse(self, args, meta): parser.add_argument('-manual_dvds', '--manual_dvds', nargs='*', required=False, help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", type=str, dest='manual_dvds', default="") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") parser.add_argument('-df', '--descfile', nargs='*', required=False, help="Custom Description (path to file)") - parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens']) + parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens', 'oeimg']) parser.add_argument('-siu', '--skip-imagehost-upload', dest='skip_imghost_upload', action='store_true', required=False, help="Skip Uploading to an image host") parser.add_argument('-th', '--torrenthash', nargs='*', required=False, help="Torrent Hash to re-use from your client's session directory") parser.add_argument('-nfo', '--nfo', action='store_true', required=False, help="Use .nfo in directory for description") diff --git a/src/prep.py b/src/prep.py index 89e392062..2ef625ed3 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1269,6 +1269,8 @@ def _is_vob_good(n, loops, num_screens): i += 1 elif self.img_host == "ptscreens": i += 1 + elif self.img_host == "oeimg" + i += 1 else: console.print("[red]Image too large for your image host, retaking") retake = True @@ -1384,7 +1386,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non i += 1 elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake is False: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and retake is False: i += 1 elif self.img_host == "freeimage.host": console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") @@ -2664,6 +2666,24 @@ async def imgbox_upload(image_glob): web_url = img_url upload_success = True + elif img_host == "oeimg": + url = "https://imgoe.download/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]OEimg failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = response['data']['image']['url'] + web_url = response['data']['url_viewer'] + upload_success = True + elif img_host == "pixhost": url = "https://api.pixhost.to/images" data = { From edede08f2ea12bff757d9094546f48d37656c217 Mon Sep 17 00:00:00 2001 From: edge20200 <126633394+edge20200@users.noreply.github.com> Date: Tue, 1 Oct 2024 02:23:54 -0400 Subject: [PATCH 235/741] Update prep.py --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 2ef625ed3..6dadca9e8 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1269,7 +1269,7 @@ def _is_vob_good(n, loops, num_screens): i += 1 elif self.img_host == "ptscreens": i += 1 - elif self.img_host == "oeimg" + elif self.img_host == "oeimg": i += 1 else: console.print("[red]Image too large for your image host, retaking") From 9b57d8cd3caf25b82a76b4df8ad74c0f83cf8c23 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 1 Oct 2024 21:09:27 +1000 Subject: [PATCH 236/741] Lint example config --- data/example-config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index dea74e195..2fdde409c 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -11,7 +11,7 @@ "ptpimg_api": "ptpimg api key", "lensdump_api": "lensdump api key", "ptscreens_api": "ptscreens api key", - "oeimg_api": "oeimg api key" + "oeimg_api": "oeimg api key", # Order of image hosts, and backup image hosts "img_host_1": "imgbb", @@ -20,7 +20,7 @@ "img_host_4": "pixhost", "img_host_5": "lensdump", "img_host_6": "ptscreens", - "img_host_7": "oeimg" + "img_host_7": "oeimg", "screens": "6", From 105c17f1d64ad57ca7ac9899818652e491c4c105 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 2 Oct 2024 07:55:57 +1000 Subject: [PATCH 237/741] format_version needs to be defined in video Fixes https://github.com/Audionut/Upload-Assistant/issues/64 --- src/prep.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/prep.py b/src/prep.py index 6dadca9e8..3d0d0635a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -782,6 +782,7 @@ def filter_mediainfo(data): "UniqueID": track.get("UniqueID"), "Format": track.get("Format"), "Format_Profile": track.get("Format_Profile"), + "Format_Version": track.get("Format_Version"), "Format_Level": track.get("Format_Level"), "Format_Tier": track.get("Format_Tier"), "HDR_Format": track.get("HDR_Format"), From 8904fb455fe793674b32033490ffcd42fafdc4c7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 3 Oct 2024 15:31:40 +1000 Subject: [PATCH 238/741] Move and fix MTV image host check fixes https://github.com/Audionut/Upload-Assistant/issues/68 --- src/prep.py | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3d0d0635a..099885003 100644 --- a/src/prep.py +++ b/src/prep.py @@ -82,18 +82,48 @@ async def prompt_user_for_confirmation(self, message: str) -> bool: except EOFError: sys.exit(1) - async def check_images_concurrently(self, imagelist): + async def check_images_concurrently(self, imagelist, meta): + approved_image_hosts = ['ptpimg', 'imgbox'] + invalid_host_found = False # Track if any image is on a non-approved host + + # Function to check each image's URL and host async def check_and_collect(image_dict): img_url = image_dict.get('img_url') or image_dict.get('raw_url') - if img_url and await self.check_image_link(img_url): + if not img_url: + return None + + # Verify the image link + if await self.check_image_link(img_url): + # Check if the image is hosted on an approved image host + if not any(host in img_url for host in approved_image_hosts): + nonlocal invalid_host_found + invalid_host_found = True # Mark that we found an invalid host + return image_dict else: - console.print(f"[yellow]Image link failed verification and will be skipped: {img_url}[/yellow]") return None + # Run image verification concurrently tasks = [check_and_collect(image_dict) for image_dict in imagelist] results = await asyncio.gather(*tasks) - return [image for image in results if image is not None] + + # Collect valid images + valid_images = [image for image in results if image is not None] + + # Convert default_trackers string into a list + default_trackers = self.config['TRACKERS'].get('default_trackers', '') + trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] + + # Ensure meta['trackers'] is a list + if isinstance(meta.get('trackers', ''), str): + meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] + + # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list + if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', []): + if invalid_host_found: + console.print("[yellow]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") + + return valid_images async def check_image_link(self, url): async with aiohttp.ClientSession() as session: @@ -296,17 +326,6 @@ async def handle_image_list(self, meta, tracker_name): for img in meta['image_list']: console.print(f"[blue]{img}[/blue]") - approved_image_hosts = ['ptpimg', 'imgbox'] - - # Check if the images are already hosted on an approved image host - if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): - image_list = meta['image_list'] # noqa #F841 - else: - default_trackers = self.config['TRACKERS'].get('default_trackers', '') - trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] - if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', ''): - console.print("[red]Warning: Some images are not hosted on an MTV approved image host. MTV will fail if you keep these images.") - if meta['unattended']: keep_images = True else: From 2dadab5d6a5f8097498dbe93d2d20bbe6b402f7a Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 3 Oct 2024 15:33:21 +1000 Subject: [PATCH 239/741] add metas --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 099885003..b0e8f87d0 100644 --- a/src/prep.py +++ b/src/prep.py @@ -171,7 +171,7 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): if not meta.get('image_list'): # Only handle images if image_list is not already populated if imagelist: # Ensure imagelist is not empty before setting - valid_images = await self.check_images_concurrently(imagelist) + valid_images = await self.check_images_concurrently(imagelist, meta) if valid_images: meta['image_list'] = valid_images if meta.get('image_list'): # Double-check if image_list is set before handling it @@ -235,7 +235,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) if valid_images: meta['image_list'] = valid_images await self.handle_image_list(meta, tracker_name) @@ -270,7 +270,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = ptp_desc meta['skip_gen_desc'] = True if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) if valid_images: meta['image_list'] = valid_images console.print("[green]PTP images added to metadata.[/green]") From 6e6fb0547c83a831b9ba5b9caac5646bfcbd7a03 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 3 Oct 2024 18:14:00 +1000 Subject: [PATCH 240/741] Save proper description when no ID specified --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index b0e8f87d0..59c260260 100644 --- a/src/prep.py +++ b/src/prep.py @@ -233,6 +233,8 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + meta['skip_gen_desc'] = True if not meta.get('image_list'): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist, meta) From 28209747ea56a4dddb515a81ada8c6302d4f0e00 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 4 Oct 2024 09:14:34 +1000 Subject: [PATCH 241/741] Aither DVD naming --- .github/workflows/docker-image.yml | 2 +- src/trackers/AITHER.py | 49 +++++++++++------------------- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 0a33e0f0e..068e29525 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - tik + - aither-naming env: REGISTRY: ghcr.io diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 693379d03..ad5a50396 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -119,13 +119,12 @@ async def get_flag(self, meta, flag_name): async def edit_name(self, meta): aither_name = meta['name'] + media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + resolution = meta.get('resolution') + video_codec = meta.get('video_codec') def has_english_audio(tracks=None, media_info_text=None): - if meta['is_disc'] == "BDMV" and tracks: - for track in tracks: - if track.get('language', '').lower() == 'english': - return True - elif media_info_text: + if media_info_text: audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) for i, language in enumerate(audio_section): language = language.lower().strip() @@ -133,40 +132,28 @@ def has_english_audio(tracks=None, media_info_text=None): return True return False - # Helper function to extract the audio language from MediaInfo text or BDMV structure def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): - if meta['is_disc'] == "BDMV" and tracks: - return tracks[0].get('language', '').upper() if tracks else "" - elif media_info_text: + if media_info_text: match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) if match: return match.group(1).upper() - return "" # Return empty string if no audio track is found + return "" - is_bdmv = meta['is_disc'] == "BDMV" # noqa #F841 - media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + try: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() - if meta['is_disc'] == "BDMV": - bdinfo_audio = meta.get('bdinfo', {}).get('audio', []) - has_eng_audio = has_english_audio(bdinfo_audio) - if not has_eng_audio: - audio_lang = get_audio_lang(bdinfo_audio) + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) if audio_lang: aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - else: - # Handle non-BDMV content - try: - media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" - with open(media_info_path, 'r', encoding='utf-8') as f: - media_info_text = f.read() - - # Check for English audio in the text-based MediaInfo - if not has_english_audio(media_info_text=media_info_text): - audio_lang = get_audio_lang(media_info_text=media_info_text) - if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - except (FileNotFoundError, KeyError) as e: - print(f"Error processing MEDIAINFO.txt: {e}") + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") + + if meta['is_disc'] == "DVD": + aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {resolution}", 1) + aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) return aither_name From 35d7cb66a6f6585a7352ca2443d599c10193332a Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 4 Oct 2024 09:23:48 +1000 Subject: [PATCH 242/741] Aither TV catch --- src/trackers/AITHER.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index ad5a50396..02183805b 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -155,6 +155,9 @@ def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {resolution}", 1) aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) + if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': + aither_name = aither_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + return aither_name async def get_cat_id(self, category_name): From 148820140c018c70169ae51d5974ee546ded0f19 Mon Sep 17 00:00:00 2001 From: Zips-sipZ Date: Fri, 4 Oct 2024 20:17:54 +0200 Subject: [PATCH 243/741] Removing ULCX as modq capable Site doesn't support it (yet) and it's causing issues for users using this currently --- upload.py | 1 - 1 file changed, 1 deletion(-) diff --git a/upload.py b/upload.py index 2338d6f73..6c4e72bd2 100644 --- a/upload.py +++ b/upload.py @@ -264,7 +264,6 @@ async def do_the_thing(base_dir): 'BLU': {'mod_q': True, 'draft': False}, 'AITHER': {'mod_q': True, 'draft': False}, 'BHD': {'draft_live': True}, - 'ULCX': {'mod_q': True} } async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): From f7c1decbaf125a8a3ebc654829daec34c879bbe4 Mon Sep 17 00:00:00 2001 From: Zips-sipZ Date: Fri, 4 Oct 2024 20:23:01 +0200 Subject: [PATCH 244/741] Update example-config.py Setting defaults for API to False where they were true to prevent issues with API calls when not on those sites. Removed a line for ULCX of a function not in use that shouldn't be in the example-config --- data/example-config.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 2fdde409c..9cf6684a8 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -229,7 +229,7 @@ # "anon" : False }, "HDB": { - "useAPI": True, + "useAPI": False, "username": "HDB username", "passkey": "HDB passkey", "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", @@ -241,7 +241,7 @@ # "anon" : "False" }, "TIK": { - "useAPI": True, # Set to True if using TIK + "useAPI": False, # Set to True if using TIK "api_key": "", "announce_url": "https://cinematik.net/announce/", "anon": False, @@ -256,7 +256,6 @@ "api_key": "ULCX api key", "announce_url": "https://upload.cx/announce/customannounceurl", # "anon" : False, - # "modq" : False ## Not working yet }, "MANUAL": { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se From 6abb08ad214b29784004b4d93785aed93f4296ae Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 5 Oct 2024 11:02:59 +1000 Subject: [PATCH 245/741] Services --- src/prep.py | 53 +++++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/src/prep.py b/src/prep.py index 59c260260..412ec9838 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3104,34 +3104,39 @@ def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_se 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', - 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', - 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', - 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', - 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', - 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', - 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', - 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', - 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', - 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', + 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'CNGO': 'CNGO', 'Cinego': 'CNGO', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', + 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', + 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', + 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', 'DC Universe': 'DCU', 'DDY': 'DDY', + 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', 'Deadhouse Films': 'DHF', + 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', + 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', + 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', + 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', + 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', + 'Filmio': 'FMIO', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', - 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', - 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', - 'MA': 'MA', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', - 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', - 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', - 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', - 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', - 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', - 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', - 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', + 'hoichoi': 'HoiChoi', 'ID': 'ID', 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', + 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', 'JOYN': 'JOYN', 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', + 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Movies Anywhere': 'MA', + 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', + 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', + 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', + 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', + 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', + 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', + 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', + 'OBB Railnet': 'RNET', 'RSTR': 'RSTR', 'RTE': 'RTE', 'RTE One': 'RTE', 'RTLP': 'RTLP', 'RTL+': 'RTLP', 'RUUTU': 'RUUTU', + 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', + 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', + 'Spike': 'SPIK', 'Spike TV': 'SPKE', 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', + 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', 'Sveriges Television': 'SVT', 'SWER': 'SWER', + 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', From 1acf016e3931f90956fa4c9776b75ef1b0e0e004 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 5 Oct 2024 11:06:57 +1000 Subject: [PATCH 246/741] lint --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 412ec9838..8d972a5b6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3113,7 +3113,7 @@ def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_se 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', - 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', + 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', 'Filmio': 'FMIO', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', @@ -3130,7 +3130,7 @@ def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_se 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', 'OBB Railnet': 'RNET', 'RSTR': 'RSTR', 'RTE': 'RTE', 'RTE One': 'RTE', 'RTLP': 'RTLP', 'RTL+': 'RTLP', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', From 9ee3e145a25b7ac3b8bf7b91aee2932457da9134 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 5 Oct 2024 11:22:57 +1000 Subject: [PATCH 247/741] Update HDT Fixes from https://github.com/L4GSP1KE/Upload-Assistant/issues/310#issuecomment-1951382027 Requires manual cookie management, see instructions in the linked issue --- src/trackers/HDT.py | 37 ++++--------------------------------- 1 file changed, 4 insertions(+), 33 deletions(-) diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index a1117b67d..01f034a05 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -168,7 +168,7 @@ async def upload(self, meta, disctype): data['anonymous'] = 'true' # Send - url = "https://hd-torrents.org/upload.php" + url = "https://hd-torrents.net/upload.php" if meta['debug']: console.print(url) console.print(data) @@ -184,7 +184,7 @@ async def upload(self, meta, disctype): search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text).group(1) if search: # modding existing torrent for adding to client instead of downloading torrent from site. - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "https://hd-torrents.org/details.php?id=" + search) + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "") else: console.print(data) console.print("\n\n") @@ -199,7 +199,7 @@ async def search_existing(self, meta, disctype): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") session.cookies.update(await common.parseCookieFile(cookiefile)) - search_url = "https://hd-torrents.org/torrents.php" + search_url = "https://hd-torrents.net/torrents.php" csrfToken = await self.get_csrfToken(session, search_url) if int(meta['imdb_id'].replace('tt', '')) != 0: params = { @@ -237,7 +237,7 @@ async def validate_credentials(self, meta): async def validate_cookies(self, meta, cookiefile): common = COMMON(config=self.config) - url = "https://hd-torrents.org/index.php" + url = "https://hd-torrents.net/index.php" cookiefile = f"{meta['base_dir']}/data/cookies/HDT.txt" if os.path.exists(cookiefile): with requests.Session() as session: @@ -254,35 +254,6 @@ async def validate_cookies(self, meta, cookiefile): else: return False - """ - Old login method, disabled because of site's DDOS protection. Better to use exported cookies. - - - async def login(self, cookiefile): - with requests.Session() as session: - url = "https://hd-torrents.org/login.php" - csrfToken = await self.get_csrfToken(session, url) - data = { - 'csrfToken' : csrfToken, - 'uid' : self.username, - 'pwd' : self.password, - 'submit' : 'Confirm' - } - response = session.post('https://hd-torrents.org/login.php', data=data) - await asyncio.sleep(0.5) - index = 'https://hd-torrents.org/index.php' - response = session.get(index) - if response.text.find("Logout") != -1: - console.print('[green]Successfully logged into HDT') - with open(cookiefile, 'wb') as cf: - pickle.dump(session.cookies, cf) - else: - console.print('[bold red]Something went wrong while trying to log into HDT. Make sure your username and password are correct') - await asyncio.sleep(1) - console.print(response.url) - return - """ - async def get_csrfToken(self, session, url): r = session.get(url) await asyncio.sleep(0.5) From e2c65907b4baa0a1ea2c37294f2d84aedcc4b491 Mon Sep 17 00:00:00 2001 From: azulu Date: Sat, 5 Oct 2024 15:55:22 +0000 Subject: [PATCH 248/741] WEB encodes are now labeled correctly. --- src/trackers/HUNO.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index c1a5298f2..74857b6c9 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -221,11 +221,12 @@ async def get_cat_id(self, category_name): async def get_type_id(self, meta): basename = self.get_basename(meta) type = meta['type'] + video_encode = meta['video_encode'] if type == 'REMUX': return '2' elif type in ('WEBDL', 'WEBRIP'): - return '15' if 'x265' in basename else '3' + return '15' if 'x265' in video_encode else '3' elif type in ('ENCODE', 'HDTV'): return '15' elif type == 'DISC': From 95c73a0eeb3b0ae47042bb5da534d6f1022fc57a Mon Sep 17 00:00:00 2001 From: azulu Date: Sat, 5 Oct 2024 16:11:42 +0000 Subject: [PATCH 249/741] Removed unused decleration. --- src/trackers/HUNO.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 74857b6c9..558d4b23a 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -219,7 +219,6 @@ async def get_cat_id(self, category_name): return category_id async def get_type_id(self, meta): - basename = self.get_basename(meta) type = meta['type'] video_encode = meta['video_encode'] From 6358abe58bb33d104194a12584635e3bed32d0c1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 6 Oct 2024 20:33:34 +1000 Subject: [PATCH 250/741] Proper commercial check --- src/prep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 8d972a5b6..d66ca912f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -874,6 +874,7 @@ def filter_mediainfo(data): "Channels": track.get("Channels"), "ChannelPositions": track.get("ChannelPositions"), "ChannelLayout": track.get("ChannelLayout"), + "ChannelLayout_Original": track.get("ChannelLayout_Original"), "SamplesPerFrame": track.get("SamplesPerFrame"), "SamplingRate": track.get("SamplingRate"), "SamplingCount": track.get("SamplingCount"), @@ -1890,7 +1891,7 @@ def get_audio_v2(self, mi, meta, bdinfo): track = tracks[track_num] if len(tracks) > track_num else {} format = track.get('Format', '') - commercial = track.get('Format_Commercial', '') + commercial = track.get('Format_Commercial', '') or track.get('Format_Commercial_IfAny', '') if track.get('Language', '') == "zxx": meta['silent'] = True From b8d3267d94dca71794192ee8bbb330b5bcba73b6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 8 Oct 2024 15:35:17 +1000 Subject: [PATCH 251/741] graceful handling unit3d API change --- src/trackers/COMMON.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5d564186a..fe7ec5424 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -210,7 +210,11 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N try: # Handle response when searching by file name (which might return a 'data' array) data = json_response.get('data', []) - if data: + if data == "404": + console.print("[yellow]No data found (404). Returning None.[/yellow]") + return None, None, None, None, None, None, None, None, None + + if data and isinstance(data, list): # Ensure data is a list before accessing it attributes = data[0].get('attributes', {}) # Extract data from the attributes @@ -244,7 +248,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) - + # Skip the ID selection prompt if searching by ID console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") From b07a4f370f22ecae6c5918a917add3a70a00b40e Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 8 Oct 2024 15:37:46 +1000 Subject: [PATCH 252/741] skip discs in aither language check --- src/trackers/AITHER.py | 53 +++++++++++++++++++++--------------------- src/trackers/COMMON.py | 2 +- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 02183805b..d58d14286 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -123,33 +123,34 @@ async def edit_name(self, meta): resolution = meta.get('resolution') video_codec = meta.get('video_codec') - def has_english_audio(tracks=None, media_info_text=None): - if media_info_text: - audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) - for i, language in enumerate(audio_section): - language = language.lower().strip() - if language.lower().startswith('en'): # Check if it's English - return True - return False - - def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): - if media_info_text: - match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) - if match: - return match.group(1).upper() - return "" + if not meta['is_disc']: + def has_english_audio(tracks=None, media_info_text=None): + if media_info_text: + audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + for i, language in enumerate(audio_section): + language = language.lower().strip() + if language.lower().startswith('en'): # Check if it's English + return True + return False + + def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): + if media_info_text: + match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if match: + return match.group(1).upper() + return "" - try: - media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" - with open(media_info_path, 'r', encoding='utf-8') as f: - media_info_text = f.read() - - if not has_english_audio(media_info_text=media_info_text): - audio_lang = get_audio_lang(media_info_text=media_info_text) - if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - except (FileNotFoundError, KeyError) as e: - print(f"Error processing MEDIAINFO.txt: {e}") + try: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) + if audio_lang: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") if meta['is_disc'] == "DVD": aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {resolution}", 1) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index fe7ec5424..f452ee3dd 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -248,7 +248,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) - + # Skip the ID selection prompt if searching by ID console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") From 09f514833f3f9e713fbe207fe4fc96fa582326e5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 8 Oct 2024 19:42:35 +1000 Subject: [PATCH 253/741] imgbox blows Fixes https://github.com/Audionut/Upload-Assistant/issues/77 --- src/prep.py | 97 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 70 insertions(+), 27 deletions(-) diff --git a/src/prep.py b/src/prep.py index d66ca912f..850337f90 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2591,11 +2591,15 @@ def exponential_backoff(retry_count, initial_timeout): ) as progress: upload_task = progress.add_task("[green]Uploading Screens...", total=len(remaining_images)) console.print(f"[cyan]Uploading screens to {img_host}...") - + # console.print(f"[debug] Remaining images to upload: {remaining_images}") for image in remaining_images: retry_count = 0 upload_success = False + # Ensure the correct image path is assigned here + image_path = os.path.normpath(os.path.join(os.getcwd(), image)) # noqa F841 + # console.print(f"[debug] Normalized image path: {image_path}") + while retry_count < max_retries and not upload_success: try: timeout = exponential_backoff(retry_count + 1, initial_timeout) @@ -2603,44 +2607,44 @@ def exponential_backoff(retry_count, initial_timeout): # Add imgbox handling here if img_host == "imgbox": try: - async def imgbox_upload(image_glob): - gallery = pyimgbox.Gallery(thumb_width=350, square_thumbs=False) - async for submission in gallery.add(image_glob): - return submission + console.print("[blue]Uploading images to imgbox...") + + # Use the current event loop to run imgbox_upload + loop = asyncio.get_event_loop() + + # Run the imgbox upload in the current event loop + image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob)) # Pass all images - submission = asyncio.run(imgbox_upload(image_glob)) + # Ensure the image_list contains valid URLs before continuing + if image_list and all('img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list): + # console.print(f"[green]Successfully uploaded all images to imgbox.") + upload_success = True - if not submission['success']: - console.print(f"[red]Imgbox upload failed: {submission['error']}") + # Track the successfully uploaded images without appending again to image_list + for img in image_glob: + successfully_uploaded.add(img) # Track the uploaded images + + # Exit the loop after a successful upload + return image_list, i + + else: + console.print("[red]Imgbox upload failed, moving to the next image host.") img_host_num += 1 next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') console.print(f"[blue]Moving to next image host: {next_img_host}.") - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') if not img_host: console.print("[red]All image hosts failed. Unable to complete uploads.") return image_list, i - break - - img_url = submission['thumbnail_url'] - raw_url = submission['image_url'] - web_url = submission['web_url'] - upload_success = True except Exception as e: - console.print(f"[yellow]Failed to upload {image} to imgbox. Exception: {str(e)}") - retry_count += 1 - if retry_count >= max_retries: - next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') - console.print(f"[red]Max retries reached for imgbox. Moving to next image host: {next_img_host}.") - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i - break - continue + console.print(f"[yellow]Failed to upload images to imgbox. Exception: {str(e)}") + img_host_num += 1 + img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') + if not img_host: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i elif img_host == "ptpimg": payload = { @@ -2776,6 +2780,45 @@ async def imgbox_upload(image_glob): return image_list, i + async def imgbox_upload(self, chdir, image_glob): + try: + os.chdir(chdir) + image_list = [] + + console.print(f"[debug] Starting upload of {len(image_glob)} images to imgbox...") + + # Start a gallery context + async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: + for image in image_glob: + console.print(f"[blue]Uploading image: {image}") + + try: + # Add the image to the gallery and await the response + async for submission in gallery.add([image]): + if not submission['success']: + console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") + return [] # Return empty list in case of failure + else: + # Append the successful result to the image list + image_dict = { + 'web_url': submission['web_url'], + 'img_url': submission['thumbnail_url'], + 'raw_url': submission['image_url'] + } + image_list.append(image_dict) + # console.print(f"[green]Successfully uploaded image: {image}") + + except Exception as e: + console.print(f"[red]Error during upload for {image}: {str(e)}") + return [] # Return empty list in case of error + + console.print(f"[green]Successfully uploaded all {len(image_list)} images to imgbox.") + return image_list # Return the complete list when all images are done + + except Exception as e: + console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") + return [] # Return empty list in case of an unexpected failure + async def get_name(self, meta): type = meta.get('type', "") title = meta.get('title', "") From 0b51bc289a7685113adad3d62b573d22f32fd06f Mon Sep 17 00:00:00 2001 From: Khakis Date: Tue, 8 Oct 2024 08:54:55 -0500 Subject: [PATCH 254/741] Dual audio Fixes for issue 53 --- src/args.py | 1 + src/prep.py | 57 ++++++++++++++++++++++++++++------------------------- upload.py | 2 +- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/src/args.py b/src/args.py index a4456c1d5..117a38daf 100644 --- a/src/args.py +++ b/src/args.py @@ -40,6 +40,7 @@ def parse(self, args, meta): parser.add_argument('--no-aka', dest='no_aka', action='store_true', required=False, help="Remove AKA from title") parser.add_argument('--no-dub', dest='no_dub', action='store_true', required=False, help="Remove Dubbed from title") parser.add_argument('--no-tag', dest='no_tag', action='store_true', required=False, help="Remove Group Tag from title") + parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) diff --git a/src/prep.py b/src/prep.py index 850337f90..faf8b4e30 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1924,42 +1924,45 @@ def get_audio_v2(self, mi, meta, bdinfo): else: chan = f"{channels}.0" - if meta.get('original_language', '') != 'en': - eng, orig = False, False - try: - for t in mi.get('media', {}).get('track', []): - if t.get('@type') != "Audio": - continue + if meta.get('dual_audio', False): # If dual_audio flag is set, skip other checks + dual = "Dual-Audio" + else: + if meta.get('original_language', '') != 'en': + eng, orig = False, False + try: + for t in mi.get('media', {}).get('track', []): + if t.get('@type') != "Audio": + continue - audio_language = t.get('Language', '') + audio_language = t.get('Language', '') - # Check for English Language Track - if audio_language == "en" and "commentary" not in t.get('Title', '').lower(): - eng = True + # Check for English Language Track + if audio_language.startswith("en") and "commentary" not in t.get('Title', '').lower(): + eng = True - # Check for original Language Track - if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): - orig = True + # Check for original Language Track + if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in t.get('Title', '').lower(): + orig = True - # Catch Chinese / Norwegian / Spanish variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb', 'es-419', 'es-ES', 'es'] - if audio_language in variants and meta['original_language'] in variants: - orig = True + # Catch Chinese / Norwegian Variants + variants = ['zh', 'cn', 'cmn', 'no', 'nb'] + if any(audio_language.startswith(var) for var in variants) and any(meta['original_language'].startswith(var) for var in variants): + orig = True - # Check for additional, bloated Tracks - if audio_language != meta['original_language'] and audio_language != "en": - if meta['original_language'] not in variants and audio_language not in variants: + # Check for additional, bloated Tracks + if audio_language != meta['original_language'] and not audio_language.startswith("en"): + # If audio_language is empty, set to 'und' (undefined) audio_language = "und" if audio_language == "" else audio_language console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") time.sleep(5) - if eng and orig: - dual = "Dual-Audio" - elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): - dual = "Dubbed" - except Exception: - console.print(traceback.format_exc()) - pass + if eng and orig: + dual = "Dual-Audio" + elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): + dual = "Dubbed" + except Exception: + console.print(traceback.format_exc()) + pass for t in mi.get('media', {}).get('track', []): if t.get('@type') != "Audio": diff --git a/upload.py b/upload.py index 6c4e72bd2..696c3a3e0 100644 --- a/upload.py +++ b/upload.py @@ -173,7 +173,7 @@ async def do_the_thing(base_dir): overwrite_list = [ 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', - 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' + 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' ] if meta.get(key, None) != value and key in overwrite_list: saved_meta[key] = meta[key] From 2e420a5e8e6df4553a02ec2880316dbee03f8d69 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 9 Oct 2024 20:39:11 +1000 Subject: [PATCH 255/741] Don't forget video file when checking edition --- src/prep.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index faf8b4e30..7f093ec06 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2370,15 +2370,15 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): print(f"Edition After Manual Edition: {edition}") - if "REPACK" in edition.upper() or "V2" in video: + if "REPACK" in (video or edition.upper()) or "V2" in video: repack = "REPACK" - if "REPACK2" in edition.upper() or "V3" in video: + if "REPACK2" in (video or edition.upper()) or "V3" in video: repack = "REPACK2" - if "REPACK3" in edition.upper() or "V4" in video: + if "REPACK3" in (video or edition.upper()) or "V4" in video: repack = "REPACK3" - if "PROPER" in edition.upper(): + if "PROPER" in (video or edition.upper()): repack = "PROPER" - if "RERIP" in edition.upper(): + if "RERIP" in (video or edition.upper()): repack = "RERIP" print(f"Repack after Checks: {repack}") From 5ab02741d601cafc8ac7a8051fb083bded6832b8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 9 Oct 2024 20:56:02 +1000 Subject: [PATCH 256/741] Don't use images containing thumbs Can be pulled from existing descriptions, and don't work here. --- src/bbcode.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/bbcode.py b/src/bbcode.py index 983469c4e..fcc5f3e6e 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -210,7 +210,10 @@ def clean_unit3d_description(self, desc, site): "https://i.ibb.co/2NVWb0c/uploadrr.webp", # Add any other known bot image URLs here ] - imagelist = [img for img in imagelist if img['img_url'] not in bot_image_urls] + imagelist = [ + img for img in imagelist + if img['img_url'] not in bot_image_urls and not re.search(r'thumbs', img['img_url'], re.IGNORECASE) + ] # Restore spoiler tags if spoiler_placeholders: From 11ccec6395c1bf0b3d63369d69f6f9af39594913 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 10 Oct 2024 08:24:50 +1000 Subject: [PATCH 257/741] Opus correct capitalization --- src/bbcode.py | 2 +- src/prep.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index fcc5f3e6e..8702befc0 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -211,7 +211,7 @@ def clean_unit3d_description(self, desc, site): # Add any other known bot image URLs here ] imagelist = [ - img for img in imagelist + img for img in imagelist if img['img_url'] not in bot_image_urls and not re.search(r'thumbs', img['img_url'], re.IGNORECASE) ] diff --git a/src/prep.py b/src/prep.py index 7f093ec06..6ac06c88c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1981,7 +1981,7 @@ def get_audio_v2(self, mi, meta, bdinfo): "E-AC-3": "DD+", "MLP FBA": "TrueHD", "FLAC": "FLAC", - "Opus": "OPUS", + "Opus": "Opus", "Vorbis": "VORBIS", "PCM": "LPCM", From 4043bd3d4f05b6db764e84aee7f867ac38986ad9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 10 Oct 2024 08:38:32 +1000 Subject: [PATCH 258/741] Remove color codes Using markup=False so that the console didn't error with funky bbcode means color console printing no longer worked. --- src/trackers/COMMON.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index f452ee3dd..6616f10f8 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -202,7 +202,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N try: json_response = response.json() - # console.print(f"[blue]Raw API Response: {json_response}[/blue]", markup=False) + # console.print(f"Raw API Response: {json_response}", markup=False) except ValueError: return None, None, None, None, None, None, None, None, None @@ -266,7 +266,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N bbcode = BBCODE() description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) console.print(f"[green]Successfully grabbed description from {tracker}") - console.print(f"[blue]Extracted description: [yellow]{description}", markup=False) + console.print(f"Extracted description: {description}", markup=False) # Allow user to edit or discard the description if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta.get('unattended'): @@ -279,7 +279,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N description = edited_description.strip() meta['description'] = description meta['skip_gen_desc'] = True - console.print(f"[green]Final description after editing:[/green] {description}", markup=False) + console.print(f"Final description after editing: {description}", markup=False) elif edit_choice.lower() == 'd': description = None console.print("[yellow]Description discarded.[/yellow]") From 1aaa681914197cde79ef58547660384ad36a8aa5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 10 Oct 2024 09:02:18 +1000 Subject: [PATCH 259/741] Add mal_id to print Seems API only returns 0 though --- src/trackers/COMMON.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 6616f10f8..9639d2b01 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -143,15 +143,15 @@ async def unit3d_distributor_ids(self, distributor): }.get(distributor, 0) return distributor_id - async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, filename=None, tracker_name=None): + async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, mal=None, filename=None, tracker_name=None): if not tracker_name: tracker_name = "Tracker" # Fallback if tracker_name is not provided if imdb: imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros - console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") + # console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") - if any([tmdb, imdb, tvdb]): + if any([tmdb, imdb, tvdb, mal]): console.print(f"[cyan]Found the following IDs on {tracker_name}:") if tmdb: console.print(f"TMDb ID: {tmdb}") @@ -159,6 +159,8 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, fi console.print(f"IMDb ID: https://www.imdb.com/title/tt{imdb}") if tvdb: console.print(f"TVDb ID: {tvdb}") + if mal: + console.print(f"MAL ID: {mal}") if filename: console.print(f"Filename: {filename}") # Ensure filename is printed if available @@ -250,13 +252,13 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) # Skip the ID selection prompt if searching by ID - console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}[/green]") + console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}, MAL: {mal}[/green]") if tmdb or imdb or tvdb: if not id: # Only prompt the user for ID selection if not searching by ID try: - if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, file_name): + if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, mal, file_name): console.print("[yellow]User chose to skip based on IDs.[/yellow]") return None, None, None, None, None, None, None, None, None except (KeyboardInterrupt, EOFError): From c11b7a127997748e1781bea50731302118057a1c Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 10 Oct 2024 09:48:46 +1000 Subject: [PATCH 260/741] mal should override mal_id It was working when set initially, but mal was not overriding mal_id when trying to correct --- src/prep.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6ac06c88c..fc4926e12 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1647,6 +1647,8 @@ async def tmdb_other_meta(self, meta): meta['tmdb_directors'] = self.get_directors(movie) if meta.get('anime', False) is False: meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] meta['poster'] = response.get('poster_path', "") meta['tmdb_poster'] = response.get('poster_path', "") meta['overview'] = response['overview'] @@ -1694,6 +1696,8 @@ async def tmdb_other_meta(self, meta): meta['genres'] = self.get_genres(response) meta['tmdb_directors'] = self.get_directors(tv) meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] meta['poster'] = response.get('poster_path', '') meta['overview'] = response['overview'] @@ -1767,8 +1771,8 @@ def get_anime(self, response, meta): mal_id = 0 if meta.get('mal_id', 0) != 0: mal_id = meta.get('mal_id') - if meta.get('mal') not in ('0', 0, None): - mal_id = meta.get('mal', 0) + if meta.get('mal') is not None: + mal_id = meta.get('mal') return mal_id, alt_name, anime def get_romaji(self, tmdb_name, mal): @@ -3010,6 +3014,8 @@ async def get_season_episode(self, video, meta): romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) if mal_id: meta['mal_id'] = mal_id + if meta.get('mal') is not None: + mal_id = meta.get('mal') if meta.get('tmdb_manual', None) is None: year = parsed.get('anime_year', str(seasonYear)) meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) From 8f2790558d212d003d8c9cd1322a78625d8f73b3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 10 Oct 2024 17:00:20 +1000 Subject: [PATCH 261/741] MTV - srrDB trumps --- src/trackers/MTV.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 9e6ea24f8..5aef4e990 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -267,7 +267,10 @@ async def edit_group_desc(self, meta): return description async def edit_name(self, meta): - mtv_name = meta['uuid'] + if meta['scene'] is True: + mtv_name = meta['video'] + else: + mtv_name = meta['uuid'] # Try to use original filename if possible if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): if not meta['isdir']: From 40746624f400c2317157cbb2c5979004841363a1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 09:28:38 +1000 Subject: [PATCH 262/741] Remove old json config handling and update feedback --- upload.py | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/upload.py b/upload.py index 696c3a3e0..023a4dddf 100644 --- a/upload.py +++ b/upload.py @@ -66,28 +66,13 @@ from data.config import config except Exception: if not os.path.exists(os.path.abspath(f"{base_dir}/data/config.py")): - try: - if os.path.exists(os.path.abspath(f"{base_dir}/data/config.json")): - with open(f"{base_dir}/data/config.json", 'r', encoding='utf-8-sig') as f: - json_config = json.load(f) - f.close() - with open(f"{base_dir}/data/config.py", 'w') as f: - f.write(f"config = {json.dumps(json_config, indent=4)}") - f.close() - cli_ui.info(cli_ui.green, "Successfully updated config from .json to .py") - cli_ui.info(cli_ui.green, "It is now safe for you to delete", cli_ui.yellow, "data/config.json", "if you wish") - from data.config import config - else: - raise NotImplementedError - except Exception: - cli_ui.info(cli_ui.red, "We have switched from .json to .py for config to have a much more lenient experience") - cli_ui.info(cli_ui.red, "Looks like the auto updater didnt work though") - cli_ui.info(cli_ui.red, "Updating is just 2 easy steps:") - cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) - cli_ui.info(cli_ui.red, "2: Add", cli_ui.green, "config = ", cli_ui.red, "to the beginning of", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) - exit() + cli_ui.info(cli_ui.red, "Configuration file 'config.py' not found.") + cli_ui.info(cli_ui.red, "Please ensure the file is located at:", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.py")) + cli_ui.info(cli_ui.red, "Follow the setup instructions: https://github.com/Audionut/Upload-Assistant") + exit() else: console.print(traceback.print_exc()) + client = Clients(config=config) parser = Args(config) From 1e191b2712eb1c900e866bed46f2e0302c21d2e1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 09:40:07 +1000 Subject: [PATCH 263/741] Manual docker dispatch --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 068e29525..48a1fbf9e 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - aither-naming + workflow_dispatch: env: REGISTRY: ghcr.io From a8ac3357a088950f8e7e6627fcc7e54a07b9fa8a Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 17:15:16 +1000 Subject: [PATCH 264/741] Print feedback for MTV bug See https://github.com/Audionut/Upload-Assistant/issues/47 --- src/trackers/MTV.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 5aef4e990..b4c1c8c69 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -172,7 +172,8 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if "authkey.php" in response.url: console.print("[red]No DL link in response, It may have uploaded, check manually.") else: - console.print("[red]Upload Failed. It doesn't look like you are logged in.") + console.print("[red]Upload Failed. Either you are not logged in......") + console.print("[red]or you are hitting this site bug: https://www.morethantv.me/forum/thread/3338?") except Exception: console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) From 49b6f11e1f74050e045e99be2ce9d64475e2f2f7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 20:14:37 +1000 Subject: [PATCH 265/741] OE banned groups --- src/trackers/OE.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index ec332dc97..4a23d068a 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -25,17 +25,21 @@ def __init__(self, config): self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' self.torrent_url = 'https://onlyencodes.cc/api/torrents/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', - 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', - 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', - 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', - 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', - 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'nHD', 'nikt0', - 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', - 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', - 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', - 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', - 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = [ + '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', + 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', + 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', + 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', + 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', + 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', + 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', + 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', + 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', + 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', + 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', + 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', + 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT' + ] pass async def upload(self, meta, disctype): From d85ae967d96a7979ecfcfad8d84ca6af14dda495 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 20:24:58 +1000 Subject: [PATCH 266/741] Missed some color codes --- src/trackers/COMMON.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 9639d2b01..c0fe369e5 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -273,7 +273,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N # Allow user to edit or discard the description if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta.get('unattended'): console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") - edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is:") if edit_choice.lower() == 'e': edited_description = click.edit(description) From 65b7938609269523e4ddcfc7536748280a9d8603 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 20:45:50 +1000 Subject: [PATCH 267/741] Modify MTV naming --- src/trackers/MTV.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index b4c1c8c69..fd89da844 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -270,12 +270,6 @@ async def edit_group_desc(self, meta): async def edit_name(self, meta): if meta['scene'] is True: mtv_name = meta['video'] - else: - mtv_name = meta['uuid'] - # Try to use original filename if possible - if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): - if not meta['isdir']: - mtv_name = os.path.splitext(mtv_name)[0] else: mtv_name = meta['name'] if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE') and "DD" in meta['audio']: @@ -286,6 +280,9 @@ async def edit_name(self, meta): if 'DD+' in meta.get('audio', '') and 'DDP' in meta['uuid']: mtv_name = mtv_name.replace('DD+', 'DDP') mtv_name = mtv_name.replace('Dubbed', '').replace('Dual-Audio', 'DUAL') + if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): + if not meta['isdir']: + mtv_name = os.path.splitext(mtv_name)[0] # Add -NoGrp if missing tag if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" From 4730bff59e530226e225f6cbbad79f9aa8ac11b4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 21:00:07 +1000 Subject: [PATCH 268/741] MTV full sized screens --- src/trackers/MTV.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index fd89da844..dd0c8c0b3 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -242,12 +242,9 @@ async def edit_desc(self, meta): desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") images = meta['image_list'] if len(images) > 0: - desc.write("[spoiler=Screenshots]") for each in range(len(images)): raw_url = images[each]['raw_url'] - img_url = images[each]['img_url'] - desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") - desc.write("[/spoiler]") + desc.write(f"[img={raw_url}][/img]\n") desc.write(f"\n\n{base}") desc.close() return From 03ca251a33656e692df7e5dacd512e7c8523c825 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 21:01:13 +1000 Subject: [PATCH 269/741] Lint --- src/trackers/OE.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 4a23d068a..9d0b3b0e9 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -33,9 +33,9 @@ def __init__(self, config): 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', - 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', + 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', - 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', + 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT' From 5ffe065d7c9578dc01d84d65a9cde9179ff3fd89 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 11 Oct 2024 21:21:32 +1000 Subject: [PATCH 270/741] MTV fix extension check Stops truncation on "2.0-Rmp4L" --- src/trackers/MTV.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index dd0c8c0b3..abb6eccd8 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -279,13 +279,16 @@ async def edit_name(self, meta): mtv_name = mtv_name.replace('Dubbed', '').replace('Dual-Audio', 'DUAL') if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): if not meta['isdir']: - mtv_name = os.path.splitext(mtv_name)[0] + # Check if there is a valid file extension, otherwise, skip the split + if '.' in mtv_name and mtv_name.split('.')[-1].isalpha() and len(mtv_name.split('.')[-1]) <= 4: + mtv_name = os.path.splitext(mtv_name)[0] # Add -NoGrp if missing tag if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" mtv_name = ' '.join(mtv_name.split()) mtv_name = re.sub(r"[^0-9a-zA-ZÀ-ÿ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') + console.print(f"[yellow]Sent this name: {mtv_name}[/yellow]") return mtv_name async def get_res_id(self, resolution): From 70610a193eecd1a38d3f336da30672f67ec0330b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 13:27:46 +1000 Subject: [PATCH 271/741] OE rules compliance --- src/trackers/OE.py | 129 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 127 insertions(+), 2 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 9d0b3b0e9..05d3d988e 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -4,7 +4,10 @@ import requests from str2bool import str2bool import platform - +import re +import os +import cli_ui +from src.bbcode import BBCODE from src.trackers.COMMON import COMMON from src.console import console @@ -45,11 +48,13 @@ def __init__(self, config): async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await self.edit_desc(meta, self.tracker, self.signature) cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: @@ -92,6 +97,11 @@ async def upload(self, meta, disctype): if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') @@ -118,6 +128,37 @@ async def upload(self, meta, disctype): async def edit_name(self, meta): oe_name = meta.get('name') + media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + + if not meta['is_disc']: + def has_english_audio(media_info_text=None): + if media_info_text: + audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + for i, language in enumerate(audio_section): + language = language.lower().strip() + if language.lower().startswith('en'): # Check if it's English + return True + return False + + def get_audio_lang(media_info_text=None): + if media_info_text: + match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if match: + return match.group(1).upper() + return "" + + try: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) + if audio_lang: + oe_name = oe_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") + return oe_name async def get_cat_id(self, category_name): @@ -171,6 +212,90 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id + async def edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: + if desc_header != "": + descfile.write(desc_header) + + if not meta['is_disc']: + def process_languages(tracks): + audio_languages = [] + subtitle_languages = [] + + for track in tracks: + if track.get('@type') == 'Audio': + language = track.get('Language') + if not language or language is None: + audio_lang = cli_ui.ask_string('No audio language present, you must enter one:') + if audio_lang: + audio_languages.append(audio_lang) + else: + audio_languages.append("") + elif track.get('@type') == 'Text': + language = track.get('Language') + if not language or language is None: + subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') + if subtitle_lang: + subtitle_languages.append(subtitle_lang) + else: + subtitle_languages.append("") + + return audio_languages, subtitle_languages + + media_data = meta.get('mediainfo', {}) + if media_data: + tracks = media_data.get('media', {}).get('track', []) + if tracks: + audio_languages, subtitle_languages = process_languages(tracks) + if audio_languages: + descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") + + subtitle_tracks = [track for track in tracks if track.get('@type') == 'Text'] + if subtitle_tracks and subtitle_languages: + descfile.write(f"Subtitles: {', '.join(subtitle_languages)}\n") + else: + console.print("[red]No media information available in meta.[/red]") + + # Existing disc metadata handling + bbcode = BBCODE() + if meta.get('discs', []) != []: + discs = meta['discs'] + if discs[0]['type'] == "DVD": + descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n\n") + if len(discs) >= 2: + for each in discs[1:]: + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n\n") + elif each['type'] == "HDDVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n\n") + + desc = base + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + if comparison is False: + desc = bbcode.convert_comparison_to_collapse(desc, 1000) + + desc = desc.replace('[img]', '[img=300]') + descfile.write(desc) + images = meta['image_list'] + if len(images) > 0: + descfile.write("[center]") + for each in range(len(images[:int(meta['screens'])])): + web_url = images[each]['web_url'] + raw_url = images[each]['raw_url'] + descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + descfile.write("[/center]") + + if signature is not None: + descfile.write(signature) + return + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") From 51ca4fb1d26fca73dd962e946021eb2c04b2d659 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 14:59:46 +1000 Subject: [PATCH 272/741] Sanitize disc screenshot names Errored on title such as "Title: Blah Blah " --- src/prep.py | 100 +++++++++++++++++++++++---------------------- src/trackers/OE.py | 2 +- 2 files changed, 53 insertions(+), 49 deletions(-) diff --git a/src/prep.py b/src/prep.py index fc4926e12..1f7699bf2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1071,12 +1071,19 @@ def is_scene(self, video, imdb=None): """ Generate Screenshots """ + def sanitize_filename(self, filename): + # Replace invalid characters like colons with an underscore + return re.sub(r'[<>:"/\\|?*]', '_', filename) def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: return + + # Sanitize the filename + sanitized_filename = self.sanitize_filename(filename) + # Get longest m2ts length = 0 for each in bdinfo['files']: @@ -1094,7 +1101,7 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ keyframe = 'none' os.chdir(f"{base_dir}/tmp/{folder_id}") - i = len(glob.glob(f"{filename}-*.png")) + i = len(glob.glob(f"{sanitized_filename}-*.png")) if i >= num_screens: i = num_screens console.print('[bold green]Reusing screenshots') @@ -1104,55 +1111,52 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: - if bool(ffdebug) is True: - loglevel = 'verbose' - debug = False - else: - loglevel = 'quiet' - debug = True - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" - try: - ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) - ( - ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) + loglevel = 'verbose' if bool(ffdebug) else 'quiet' + debug = not ffdebug + with Progress( + TextColumn("[bold green]Saving Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + ss_times = [] + for i in range(num_screens + 1): + image = f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{i}.png" + try: + ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) + ( + ffmpeg + .input(file, ss=ss_times[-1], skip_frame=keyframe) + .output(image, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + except Exception: + console.print(traceback.format_exc()) - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - time.sleep(1) - progress.advance(screen_task) - # remove smallest image + self.optimize_images(image) + if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + i += 1 + elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + i += 1 + elif os.path.getsize(Path(image)) <= 75000: + console.print("[bold yellow]Image is incredibly small, retaking") + time.sleep(1) + elif self.img_host == "ptpimg": + i += 1 + elif self.img_host == "lensdump": + i += 1 + else: + console.print("[red]Image too large for your image host, retaking") + time.sleep(1) + progress.advance(screen_task) + + # Remove the smallest image smallest = None - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): + smallestsize = float('inf') + for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{sanitized_filename}-*"): screen_path = os.path.join(f"{base_dir}/tmp/{folder_id}/", screens) screensize = os.path.getsize(screen_path) if screensize < smallestsize: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 05d3d988e..e536179fc 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -254,7 +254,7 @@ def process_languages(tracks): subtitle_tracks = [track for track in tracks if track.get('@type') == 'Text'] if subtitle_tracks and subtitle_languages: - descfile.write(f"Subtitles: {', '.join(subtitle_languages)}\n") + descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") else: console.print("[red]No media information available in meta.[/red]") From 61415b7ed7d0442f3475dae6fb9b828d0958182a Mon Sep 17 00:00:00 2001 From: Khakis Date: Sat, 12 Oct 2024 01:06:58 -0500 Subject: [PATCH 273/741] Update PTP.py allow for subtitles that use en-US to be identified as english subtitles --- src/trackers/PTP.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 44f2e696e..133178be7 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -46,9 +46,9 @@ def __init__(self, config): ("Czech", "cze", "cz", "cs"): 30, ("Danish", "dan", "da"): 10, ("Dutch", "dut", "nl"): 9, - ("English", "eng", "en", "English (CC)", "English - SDH"): 3, - ("English - Forced", "English (Forced)", "en (Forced)"): 50, - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 51, + ("English", "eng", "en", "en-US", "English (CC)", "English - SDH"): 3, + ("English - Forced", "English (Forced)", "en (Forced)", "en-US (Forced)"): 50, + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)", "en-US (Intertitles)"): 51, ("Estonian", "est", "et"): 38, ("Finnish", "fin", "fi"): 15, ("French", "fre", "fr"): 5, From 83ff01c2b915fbc00c86a9caf5c18e3647faf3e1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 17:31:57 +1000 Subject: [PATCH 274/741] ANT - prettier blu-ray details --- src/trackers/ANT.py | 39 +++++++++++++++++++++++++++++++++++++-- src/trackers/OE.py | 4 ++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 9e06f931f..71078ecb0 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -12,6 +12,7 @@ from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console +import re class ANT(): @@ -119,14 +120,48 @@ def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): else: anon = 1 + def extract_audio_languages(bd_summary): + audio_pattern = re.compile(r'Audio: ([\w\s]+) /') + audio_languages = audio_pattern.findall(bd_summary) + return audio_languages + + def extract_subtitle_languages(bd_summary): + subtitle_pattern = re.compile(r'Subtitle: ([\w\s]+) /') + subtitle_languages = subtitle_pattern.findall(bd_summary) + return subtitle_languages + + def insert_languages_into_mediainfo(mi_dump, audio_languages, subtitle_languages): + mi_lines = mi_dump.splitlines() + + audio_index = 0 + subtitle_index = 0 + + for i, line in enumerate(mi_lines): + if "Audio" in line: + if audio_index < len(audio_languages): + mi_lines.insert(i + 1, f"Language : {audio_languages[audio_index]}") + audio_index += 1 + + elif "Text" in line: + if subtitle_index < len(subtitle_languages): + mi_lines.insert(i + 1, f"Language : {subtitle_languages[subtitle_index]}") + subtitle_index += 1 + + return "\n".join(mi_lines) + if meta['bdinfo'] is not None: - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + bd_dump = open(bd_summary_file, 'r', encoding='utf-8').read() + audio_languages = extract_audio_languages(bd_dump) + subtitle_languages = extract_subtitle_languages(bd_dump) bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') file_name = meta['bdinfo']['files'][0]['file'].lower() m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) - mi_dump = media_info_output.replace('\r\n', '\n') + mi_dump_replace = media_info_output.replace('\r\n', '\n') + mi_dump_with_languages = insert_languages_into_mediainfo(mi_dump_replace, audio_languages, subtitle_languages) + mi_dump = mi_dump_with_languages else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') diff --git a/src/trackers/OE.py b/src/trackers/OE.py index e536179fc..0f5ad62cf 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -228,7 +228,7 @@ def process_languages(tracks): if track.get('@type') == 'Audio': language = track.get('Language') if not language or language is None: - audio_lang = cli_ui.ask_string('No audio language present, you must enter one:') + audio_lang = cli_ui.ask_string('No audio language present, you must enter one [English, French, Whatever]:') if audio_lang: audio_languages.append(audio_lang) else: @@ -236,7 +236,7 @@ def process_languages(tracks): elif track.get('@type') == 'Text': language = track.get('Language') if not language or language is None: - subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') + subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one [English, French, Whatever]:') if subtitle_lang: subtitle_languages.append(subtitle_lang) else: From 3fe4b9a31e6202b5e83890e5ea14aa119d0ea26a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 17:33:17 +1000 Subject: [PATCH 275/741] Revert "ANT - prettier blu-ray details" This reverts commit 83ff01c2b915fbc00c86a9caf5c18e3647faf3e1. --- src/trackers/ANT.py | 39 ++------------------------------------- src/trackers/OE.py | 4 ++-- 2 files changed, 4 insertions(+), 39 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 71078ecb0..9e06f931f 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -12,7 +12,6 @@ from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console -import re class ANT(): @@ -120,48 +119,14 @@ def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): else: anon = 1 - def extract_audio_languages(bd_summary): - audio_pattern = re.compile(r'Audio: ([\w\s]+) /') - audio_languages = audio_pattern.findall(bd_summary) - return audio_languages - - def extract_subtitle_languages(bd_summary): - subtitle_pattern = re.compile(r'Subtitle: ([\w\s]+) /') - subtitle_languages = subtitle_pattern.findall(bd_summary) - return subtitle_languages - - def insert_languages_into_mediainfo(mi_dump, audio_languages, subtitle_languages): - mi_lines = mi_dump.splitlines() - - audio_index = 0 - subtitle_index = 0 - - for i, line in enumerate(mi_lines): - if "Audio" in line: - if audio_index < len(audio_languages): - mi_lines.insert(i + 1, f"Language : {audio_languages[audio_index]}") - audio_index += 1 - - elif "Text" in line: - if subtitle_index < len(subtitle_languages): - mi_lines.insert(i + 1, f"Language : {subtitle_languages[subtitle_index]}") - subtitle_index += 1 - - return "\n".join(mi_lines) - if meta['bdinfo'] is not None: - bd_summary_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" - bd_dump = open(bd_summary_file, 'r', encoding='utf-8').read() - audio_languages = extract_audio_languages(bd_dump) - subtitle_languages = extract_subtitle_languages(bd_dump) + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') file_name = meta['bdinfo']['files'][0]['file'].lower() m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) - mi_dump_replace = media_info_output.replace('\r\n', '\n') - mi_dump_with_languages = insert_languages_into_mediainfo(mi_dump_replace, audio_languages, subtitle_languages) - mi_dump = mi_dump_with_languages + mi_dump = media_info_output.replace('\r\n', '\n') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 0f5ad62cf..e536179fc 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -228,7 +228,7 @@ def process_languages(tracks): if track.get('@type') == 'Audio': language = track.get('Language') if not language or language is None: - audio_lang = cli_ui.ask_string('No audio language present, you must enter one [English, French, Whatever]:') + audio_lang = cli_ui.ask_string('No audio language present, you must enter one:') if audio_lang: audio_languages.append(audio_lang) else: @@ -236,7 +236,7 @@ def process_languages(tracks): elif track.get('@type') == 'Text': language = track.get('Language') if not language or language is None: - subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one [English, French, Whatever]:') + subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') if subtitle_lang: subtitle_languages.append(subtitle_lang) else: From dd68d1e5a9e010d59899f03839758f3b19897fd0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 21:20:24 +1000 Subject: [PATCH 276/741] Fix PTP images when is_disc --- src/bbcode.py | 62 +++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 8702befc0..0c69fa9b7 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -61,6 +61,36 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') + # Catch Stray Images and Prepare Image List + imagelist = [] + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) + comps.extend(hides) + nocomp = desc + comp_placeholders = [] + + # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images + for i, comp in enumerate(comps): + nocomp = nocomp.replace(comp, '') + desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") + comp_placeholders.append(comp) + + # Remove Images in IMG tags: + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + + # Extract loose images and add to imagelist as dictionaries + loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + if loose_images: + for img_url in loose_images: + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url # Since there is no distinction here, use the same URL for all + } + imagelist.append(image_dict) + desc = desc.replace(img_url, '') + # Remove Mediainfo Tags / Attempt to regex out mediainfo mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) if mediainfo_tags: @@ -72,7 +102,7 @@ def clean_ptp_description(self, desc, is_disc): desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "", [] + return "", imagelist # Convert Quote tags: desc = re.sub(r"\[quote.*?\]", "[code]", desc) @@ -104,36 +134,6 @@ def clean_ptp_description(self, desc, is_disc): for each in remove_list: desc = desc.replace(each, '') - # Catch Stray Images and Prepare Image List - imagelist = [] - comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) - comps.extend(hides) - nocomp = desc - comp_placeholders = [] - - # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images - for i, comp in enumerate(comps): - nocomp = nocomp.replace(comp, '') - desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") - comp_placeholders.append(comp) - - # Remove Images in IMG tags: - desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) - desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - - # Extract loose images and add to imagelist as dictionaries - loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if loose_images: - for img_url in loose_images: - image_dict = { - 'img_url': img_url, - 'raw_url': img_url, - 'web_url': img_url # Since there is no distinction here, use the same URL for all - } - imagelist.append(image_dict) - desc = desc.replace(img_url, '') - # Re-place comparisons for i, comp in enumerate(comp_placeholders): comp = re.sub(r"\[\/?img[\s\S]*?\]", "", comp, flags=re.IGNORECASE) From 37ada83602defbe340a6632f3dca628162a9dee9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 21:32:43 +1000 Subject: [PATCH 277/741] Find and store image sizes fixes: https://github.com/Audionut/Upload-Assistant/issues/82 Stops MTV from borking if the size of all images > 25 MiB --- src/prep.py | 33 +++++++++++++++++++++++++++++++-- src/trackers/MTV.py | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 1f7699bf2..e5186c39f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -86,7 +86,11 @@ async def check_images_concurrently(self, imagelist, meta): approved_image_hosts = ['ptpimg', 'imgbox'] invalid_host_found = False # Track if any image is on a non-approved host - # Function to check each image's URL and host + # Ensure meta['image_sizes'] exists + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + + # Function to check each image's URL, host, and log size async def check_and_collect(image_dict): img_url = image_dict.get('img_url') or image_dict.get('raw_url') if not img_url: @@ -99,6 +103,18 @@ async def check_and_collect(image_dict): nonlocal invalid_host_found invalid_host_found = True # Mark that we found an invalid host + # Download the image to check its size + async with aiohttp.ClientSession() as session: + async with session.get(img_url) as response: + if response.status == 200: + image_content = await response.read() # Download the entire image content + image_size = len(image_content) # Calculate the size in bytes + # Store the image size in meta['image_sizes'] + meta['image_sizes'][img_url] = image_size + console.print(f"Size of {img_url}: {image_size / 1024:.2f} KiB") + else: + console.print(f"[red]Failed to get size for {img_url}. Skipping.") + return image_dict else: return None @@ -2568,6 +2584,10 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i successfully_uploaded = set() # Track successfully uploaded images initial_timeout = 10 # Set the initial timeout for backoff + # Initialize the meta key for image sizes if not already present + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + if custom_img_list: image_glob = custom_img_list existing_images = [] @@ -2761,9 +2781,18 @@ def exponential_backoff(retry_count, initial_timeout): # Only increment `i` after a successful upload if upload_success: - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_size = os.path.getsize(image) # Get the image size in bytes + image_dict = { + 'img_url': img_url, + 'raw_url': raw_url, + 'web_url': web_url + } image_list.append(image_dict) successfully_uploaded.add(image) # Track the uploaded image + + # Store size in meta, indexed by the img_url + meta['image_sizes'][img_url] = image_size # Keep sizes separate in meta['image_sizes'] + progress.advance(upload_task) i += 1 # Increment the image counter only after success break # Break retry loop after a successful upload diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index abb6eccd8..253249252 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -46,12 +46,41 @@ async def upload(self, meta, disctype): async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] + total_size_limit = 25 * 1024 * 1024 # 25 MiB in bytes + + # Helper function to calculate total size of the images + def calculate_total_size(image_list, image_sizes): + total_size = 0 + for image in image_list: + img_url = image['img_url'] + size = image_sizes.get(img_url, 0) # Get size from meta['image_sizes'], default to 0 if not found + total_size += size + return total_size + + # Helper function to remove images until the total size is under the limit + def enforce_size_limit(image_list, image_sizes): + total_size = calculate_total_size(image_list, image_sizes) + valid_images = [] + + for image in image_list: + if total_size <= total_size_limit: + valid_images.append(image) + else: + img_url = image['img_url'] + size = image_sizes.get(img_url, 0) + total_size -= size # Subtract size of the removed image + console.print(f"[red]Removed {img_url} to stay within the 25 MiB limit.") + + return valid_images # Check if the images are already hosted on an approved image host if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") image_list = meta['image_list'] # Use the existing images + # Enforce the total size limit on the existing image list + image_list = enforce_size_limit(image_list, meta['image_sizes']) + else: # Proceed with the retry logic if images are not hosted on an approved image host while img_host_index <= len(approved_image_hosts): @@ -64,8 +93,10 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): img_host_index += 1 continue - # If we successfully uploaded images, break out of the loop + # If we successfully uploaded images, enforce the size limit and break out of the loop if image_list is not None: + # Enforce the total size limit on the newly uploaded images + image_list = enforce_size_limit(image_list, meta['image_sizes']) break if image_list is None: From 6801e2ae6a3d59000f025aeb9a7242aa2526056a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 12 Oct 2024 21:58:45 +1000 Subject: [PATCH 278/741] Add title to audio tracks --- src/prep.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/prep.py b/src/prep.py index e5186c39f..d838d517a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -901,6 +901,7 @@ def filter_mediainfo(data): "Delay_Source": track.get("Delay_Source"), "Video_Delay": track.get("Video_Delay"), "StreamSize": track.get("StreamSize"), + "Title": track.get("Title"), "Language": track.get("Language"), "ServiceKind": track.get("ServiceKind"), "Default": track.get("Default"), From 4da0184294fdd859202266f9dd65ec173424fabb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 08:28:48 +1000 Subject: [PATCH 279/741] Check if title string is present before applying lower --- src/prep.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index d838d517a..678786f7f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1910,7 +1910,7 @@ def get_audio_v2(self, mi, meta, bdinfo): for i, t in enumerate(tracks): if t.get('@type') != "Audio": continue - if t.get('Language', '') == meta.get('original_language', '') and "commentary" not in t.get('Title', '').lower(): + if t.get('Language', '') == meta.get('original_language', '') and "commentary" not in (t.get('Title') or '').lower(): track_num = i break @@ -1962,11 +1962,11 @@ def get_audio_v2(self, mi, meta, bdinfo): audio_language = t.get('Language', '') # Check for English Language Track - if audio_language.startswith("en") and "commentary" not in t.get('Title', '').lower(): + if audio_language.startswith("en") and "commentary" not in (t.get('Title') or '').lower(): eng = True # Check for original Language Track - if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in t.get('Title', '').lower(): + if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in (t.get('Title') or '').lower(): orig = True # Catch Chinese / Norwegian Variants @@ -1993,7 +1993,7 @@ def get_audio_v2(self, mi, meta, bdinfo): if t.get('@type') != "Audio": continue - if "commentary" in t.get('Title', '').lower(): + if "commentary" in (t.get('Title') or '').lower(): has_commentary = True # Convert commercial name to naming conventions From 7f5e7da36159401516038f8bd8050459c8dcb252 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 09:06:45 +1000 Subject: [PATCH 280/741] Break out of piece size loop Iteration 26431: piece_size=33554432, num_pieces=1783, torrent_file_size=115.65 KiB Iteration 26432: piece_size=67108864, num_pieces=892, torrent_file_size=98.25 KiB --- src/prep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 678786f7f..7dad430ec 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2473,9 +2473,10 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: - cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces and .torrent will be approximately {torrent_file_size / 1024:.2f} KiB! Using ({num_pieces}) pieces.") piece_size = our_max_size break + elif torrent_file_size > 92160: # Break if .torrent size exceeds 90 KiB + break elif torrent_file_size > 102400: cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') piece_size *= 2 From 69911e59e51ad9933feebc268b51b37adad8c688 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 09:28:39 +1000 Subject: [PATCH 281/741] Break with lesser pieces, not more Only print the size warning if we settled there. --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7dad430ec..ae8f73b23 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2470,18 +2470,18 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): if piece_size < our_min_size: piece_size = our_min_size break + elif torrent_file_size > 92160: # Break if .torrent size exceeds 90 KiB + break elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size break - elif torrent_file_size > 92160: # Break if .torrent size exceeds 90 KiB - break elif torrent_file_size > 102400: - cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size + cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') break num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) From dd25674dd57ea9e25c16a36758fa948887843929 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 10:10:36 +1000 Subject: [PATCH 282/741] Revert "Fix PTP images when is_disc" This reverts commit dd68d1e5a9e010d59899f03839758f3b19897fd0. --- src/bbcode.py | 62 +++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 0c69fa9b7..8702befc0 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -61,36 +61,6 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') - # Catch Stray Images and Prepare Image List - imagelist = [] - comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) - comps.extend(hides) - nocomp = desc - comp_placeholders = [] - - # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images - for i, comp in enumerate(comps): - nocomp = nocomp.replace(comp, '') - desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") - comp_placeholders.append(comp) - - # Remove Images in IMG tags: - desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) - desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - - # Extract loose images and add to imagelist as dictionaries - loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if loose_images: - for img_url in loose_images: - image_dict = { - 'img_url': img_url, - 'raw_url': img_url, - 'web_url': img_url # Since there is no distinction here, use the same URL for all - } - imagelist.append(image_dict) - desc = desc.replace(img_url, '') - # Remove Mediainfo Tags / Attempt to regex out mediainfo mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) if mediainfo_tags: @@ -102,7 +72,7 @@ def clean_ptp_description(self, desc, is_disc): desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "", imagelist + return "", [] # Convert Quote tags: desc = re.sub(r"\[quote.*?\]", "[code]", desc) @@ -134,6 +104,36 @@ def clean_ptp_description(self, desc, is_disc): for each in remove_list: desc = desc.replace(each, '') + # Catch Stray Images and Prepare Image List + imagelist = [] + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) + comps.extend(hides) + nocomp = desc + comp_placeholders = [] + + # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images + for i, comp in enumerate(comps): + nocomp = nocomp.replace(comp, '') + desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") + comp_placeholders.append(comp) + + # Remove Images in IMG tags: + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + + # Extract loose images and add to imagelist as dictionaries + loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + if loose_images: + for img_url in loose_images: + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url # Since there is no distinction here, use the same URL for all + } + imagelist.append(image_dict) + desc = desc.replace(img_url, '') + # Re-place comparisons for i, comp in enumerate(comp_placeholders): comp = re.sub(r"\[\/?img[\s\S]*?\]", "", comp, flags=re.IGNORECASE) From 86adc87b397f6b32f1cb0fb9562c6ac9ddf4fea4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 10:16:11 +1000 Subject: [PATCH 283/741] Don't process disc screens from PTP I couldn't get the image list meta into disc_screenshots without starting to make some wholesale changes. Will revisit later. --- src/prep.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/src/prep.py b/src/prep.py index ae8f73b23..af0bde8e4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -252,11 +252,12 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = ptp_desc meta['skip_gen_desc'] = True - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - await self.handle_image_list(meta, tracker_name) + if not meta['is_disc']: + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + await self.handle_image_list(meta, tracker_name) else: found_match = False @@ -267,10 +268,11 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['description'] = ptp_desc meta['skip_gen_desc'] = True - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) - if valid_images: - meta['image_list'] = valid_images + if not meta['is_disc']: + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist) + if valid_images: + meta['image_list'] = valid_images else: console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False @@ -287,11 +289,12 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) meta['description'] = ptp_desc meta['skip_gen_desc'] = True - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - console.print("[green]PTP images added to metadata.[/green]") + if not meta['is_disc']: + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + console.print("[green]PTP images added to metadata.[/green]") else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") found_match = False From b6c317aa441069b76b421dfb5de51357298bcace Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 12:44:10 +1000 Subject: [PATCH 284/741] More mediainfo --- src/prep.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/prep.py b/src/prep.py index af0bde8e4..3280cd7aa 100644 --- a/src/prep.py +++ b/src/prep.py @@ -827,12 +827,14 @@ def filter_mediainfo(data): "Format_Tier": track.get("Format_Tier"), "HDR_Format": track.get("HDR_Format"), "HDR_Format_Version": track.get("HDR_Format_Version"), + "HDR_Format_String": track.get("HDR_Format_String"), "HDR_Format_Profile": track.get("HDR_Format_Profile"), "HDR_Format_Level": track.get("HDR_Format_Level"), "HDR_Format_Settings": track.get("HDR_Format_Settings"), "HDR_Format_Compression": track.get("HDR_Format_Compression"), "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), "CodecID": track.get("CodecID"), + "CodecID_Hint": track.get("CodecID_Hint"), "Duration": track.get("Duration"), "BitRate": track.get("BitRate"), "Width": track.get("Width"), @@ -865,6 +867,7 @@ def filter_mediainfo(data): "colour_primaries_Source": track.get("colour_primaries_Source"), "transfer_characteristics": track.get("transfer_characteristics"), "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), + "transfer_characteristics_Original": track.get("transfer_characteristics_Original"), "matrix_coefficients": track.get("matrix_coefficients"), "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), @@ -893,6 +896,7 @@ def filter_mediainfo(data): "Channels": track.get("Channels"), "ChannelPositions": track.get("ChannelPositions"), "ChannelLayout": track.get("ChannelLayout"), + "Channels_Original": track.get("Channels_Original"), "ChannelLayout_Original": track.get("ChannelLayout_Original"), "SamplesPerFrame": track.get("SamplesPerFrame"), "SamplingRate": track.get("SamplingRate"), From 7bd908058c29f48baecf9e87f37b05ecf9f13a84 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 20:58:28 +1000 Subject: [PATCH 285/741] Catch non-critical attribute error when audio_language is empty --- src/prep.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3280cd7aa..64b8c732b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1968,21 +1968,22 @@ def get_audio_v2(self, mi, meta, bdinfo): audio_language = t.get('Language', '') - # Check for English Language Track - if audio_language.startswith("en") and "commentary" not in (t.get('Title') or '').lower(): - eng = True - - # Check for original Language Track - if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in (t.get('Title') or '').lower(): - orig = True - - # Catch Chinese / Norwegian Variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb'] - if any(audio_language.startswith(var) for var in variants) and any(meta['original_language'].startswith(var) for var in variants): - orig = True - - # Check for additional, bloated Tracks - if audio_language != meta['original_language'] and not audio_language.startswith("en"): + if isinstance(audio_language, str): + # Check for English Language Track + if audio_language.startswith("en") and "commentary" not in (t.get('Title') or '').lower(): + eng = True + + # Check for original Language Track + if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in (t.get('Title') or '').lower(): + orig = True + + # Catch Chinese / Norwegian Variants + variants = ['zh', 'cn', 'cmn', 'no', 'nb'] + if any(audio_language.startswith(var) for var in variants) and any(meta['original_language'].startswith(var) for var in variants): + orig = True + + # Only proceed if `audio_language` is valid after previous checks + if isinstance(audio_language, str) and audio_language and audio_language != meta['original_language'] and not audio_language.startswith("en"): # If audio_language is empty, set to 'und' (undefined) audio_language = "und" if audio_language == "" else audio_language console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") From ce208d224eefdae2ecd2dcf423c78d28aefcf321 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 13 Oct 2024 23:57:10 +1000 Subject: [PATCH 286/741] RF --NoGroup --- src/trackers/BHD.py | 2 +- src/trackers/RF.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 910ff6e8f..44a630c51 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -207,7 +207,7 @@ async def edit_desc(self, meta): for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") desc.write("[/center]") desc.write(self.signature) desc.close() diff --git a/src/trackers/RF.py b/src/trackers/RF.py index c90f8b4db..a8de6d00d 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -37,7 +37,7 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) - stt_name = await self.edit_name(meta) + rf_name = await self.edit_name(meta) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: @@ -52,7 +52,7 @@ async def upload(self, meta, disctype): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name': stt_name, + 'name': rf_name, 'description': desc, 'mediainfo': mi_dump, 'bdinfo': bd_dump, @@ -105,8 +105,10 @@ async def upload(self, meta, disctype): open_torrent.close() async def edit_name(self, meta): - stt_name = meta['name'] - return stt_name + rf_name = meta['name'] + if meta['tag'] == "": + rf_name = f"{rf_name}-NoGroup" + return rf_name async def get_cat_id(self, category_name): category_id = { From c27e64ede3b669b62b0eb687a3ab67bc07e7062b Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 08:48:59 +1000 Subject: [PATCH 287/741] Early breaks in torrent creation --- src/prep.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 64b8c732b..67f289458 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2478,13 +2478,17 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): if piece_size < our_min_size: piece_size = our_min_size break - elif torrent_file_size > 92160: # Break if .torrent size exceeds 90 KiB + elif torrent_file_size > 61440: # Break if .torrent size exceeds 60 KiB break elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: piece_size = our_max_size break + elif torrent_file_size > 81920: # Break if .torrent size exceeds 80 KiB + break + elif torrent_file_size < 10240: # Break if .torrent size less than 10 KiB + break elif torrent_file_size > 102400: piece_size *= 2 if piece_size > our_max_size: From cd8a5386a4b56c50c3612ee5e0fe3ede5ed163c2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 09:36:17 +1000 Subject: [PATCH 288/741] Account for tiny sized content --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 67f289458..204d975aa 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2487,7 +2487,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): break elif torrent_file_size > 81920: # Break if .torrent size exceeds 80 KiB break - elif torrent_file_size < 10240: # Break if .torrent size less than 10 KiB + elif torrent_file_size < 2048: # Break if .torrent size less than 2 KiB break elif torrent_file_size > 102400: piece_size *= 2 From 51e9f358157c529d2e7908b74469e687a983ddff Mon Sep 17 00:00:00 2001 From: TJZine Date: Sun, 13 Oct 2024 23:20:22 -0400 Subject: [PATCH 289/741] - Limits to half of available CPU cores when optimizing images with oxipng - adds new option to the config called "shared_seedbox" to enable this feature as it slows down speed of which screenshots are taken. --- data/example-config.py | 2 ++ src/prep.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index 9cf6684a8..526f95ee4 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -27,6 +27,8 @@ # Enable lossless PNG Compression (True/False) "optimize_images": True, + # Use only half available CPU cores to avoid memory allocation errors + "shared_seedbox": False, # The name of your default torrent client, set in the torrent client sections below "default_torrent_client": "Client1", diff --git a/src/prep.py b/src/prep.py index 204d975aa..033fb5c48 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1491,6 +1491,13 @@ def valid_ss_time(self, ss_times, num_screens, length): def optimize_images(self, image): if self.config['DEFAULT'].get('optimize_images', True) is True: + if self.config['DEFAULT'].get('shared_seedbox', True) is True: + # Get number of CPU cores + num_cores = multiprocessing.cpu_count() + # Limit the number of threads based half available cores + max_threads = num_cores // 2 + # Set cores for oxipng usage + os.environ['RAYON_NUM_THREADS'] = str(max_threads) if os.path.exists(image): try: pyver = platform.python_version_tuple() From 3da79569b05f23b029b72425873a2735f356ce37 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 14:24:10 +1000 Subject: [PATCH 290/741] Lint Minor description changes --- data/example-config.py | 4 +++- src/prep.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 526f95ee4..89d9acbe5 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -22,12 +22,14 @@ "img_host_6": "ptscreens", "img_host_7": "oeimg", - + # Number of screenshots to capture "screens": "6", + # Enable lossless PNG Compression (True/False) "optimize_images": True, # Use only half available CPU cores to avoid memory allocation errors + # Only when usig lossless compression "shared_seedbox": False, # The name of your default torrent client, set in the torrent client sections below diff --git a/src/prep.py b/src/prep.py index 033fb5c48..d97179bed 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1494,10 +1494,10 @@ def optimize_images(self, image): if self.config['DEFAULT'].get('shared_seedbox', True) is True: # Get number of CPU cores num_cores = multiprocessing.cpu_count() - # Limit the number of threads based half available cores + # Limit the number of threads based on half available cores max_threads = num_cores // 2 # Set cores for oxipng usage - os.environ['RAYON_NUM_THREADS'] = str(max_threads) + os.environ['RAYON_NUM_THREADS'] = str(max_threads) if os.path.exists(image): try: pyver = platform.python_version_tuple() From 46b2a48fd93e6d9e078ad054171b5bdc898865f7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 14:50:27 +1000 Subject: [PATCH 291/741] RF - capture mixed case nogroup and others --- src/trackers/RF.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/RF.py b/src/trackers/RF.py index a8de6d00d..87eac27cf 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -106,8 +106,11 @@ async def upload(self, meta, disctype): async def edit_name(self, meta): rf_name = meta['name'] - if meta['tag'] == "": + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): rf_name = f"{rf_name}-NoGroup" + return rf_name async def get_cat_id(self, category_name): From 1131cbb7eeec42e865bfff5f124513b8e1c49baa Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 14:53:24 +1000 Subject: [PATCH 292/741] Double appending a nogroup tag would be silly --- src/trackers/RF.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 87eac27cf..1418311e4 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -109,6 +109,8 @@ async def edit_name(self, meta): tag_lower = meta['tag'].lower() invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + rf_name = rf_name.lower().replace(f"-{invalid_tag}", "") rf_name = f"{rf_name}-NoGroup" return rf_name From ca3a39cd8fbff2117d496436a3402a50339b6295 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 14:55:50 +1000 Subject: [PATCH 293/741] Converting name to lowercase isn't any brighter --- src/trackers/RF.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 1418311e4..6404f9117 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -3,6 +3,7 @@ import asyncio import requests import platform +import re from str2bool import str2bool from src.trackers.COMMON import COMMON @@ -110,7 +111,7 @@ async def edit_name(self, meta): invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): for invalid_tag in invalid_tags: - rf_name = rf_name.lower().replace(f"-{invalid_tag}", "") + rf_name = rf_name.replace(f"-{invalid_tag}", "", flags=re.IGNORECASE) rf_name = f"{rf_name}-NoGroup" return rf_name From 83245b2c5ed0b71a71b315ce2a3f870a79d87016 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 16:21:21 +1000 Subject: [PATCH 294/741] Store image sizes when using imgbox fixes further discussion in https://github.com/Audionut/Upload-Assistant/issues/82 --- src/prep.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index d97179bed..df6931e5a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2665,7 +2665,7 @@ def exponential_backoff(retry_count, initial_timeout): loop = asyncio.get_event_loop() # Run the imgbox upload in the current event loop - image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob)) # Pass all images + image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob, meta)) # Pass all images # Ensure the image_list contains valid URLs before continuing if image_list and all('img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list): @@ -2841,33 +2841,32 @@ def exponential_backoff(retry_count, initial_timeout): return image_list, i - async def imgbox_upload(self, chdir, image_glob): + async def imgbox_upload(self, chdir, image_glob, meta): try: os.chdir(chdir) image_list = [] console.print(f"[debug] Starting upload of {len(image_glob)} images to imgbox...") - - # Start a gallery context async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: for image in image_glob: console.print(f"[blue]Uploading image: {image}") try: - # Add the image to the gallery and await the response async for submission in gallery.add([image]): if not submission['success']: console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") return [] # Return empty list in case of failure else: - # Append the successful result to the image list + image_size = os.path.getsize(image) image_dict = { 'web_url': submission['web_url'], 'img_url': submission['thumbnail_url'], 'raw_url': submission['image_url'] } image_list.append(image_dict) - # console.print(f"[green]Successfully uploaded image: {image}") + meta['image_sizes'][submission['image_url']] = image_size + + console.print(f"[green]Successfully uploaded image: {image}") except Exception as e: console.print(f"[red]Error during upload for {image}: {str(e)}") From f9ab49b9dece349deefb7890f86583da545e192b Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 16:49:30 +1000 Subject: [PATCH 295/741] MTV tagging --- src/trackers/MTV.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 253249252..5288abcce 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -391,7 +391,9 @@ async def get_origin_id(self, meta): async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) + # MTV takes issue with some of the pulled TMDB tags, and I'm not hand checking and attempting + # to regex however many tags need changing, so they're just geting skipped + # tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) # Resolution tags.append(meta['resolution'].lower()) if meta['sd'] == 1: @@ -401,8 +403,12 @@ async def get_tags(self, meta): else: tags.append('hd') # Streaming Service + # disney+ should be disneyplus, assume every other service is same. + # If I'm wrong, then they can either allowing editing tags or service will just get skipped also if str(meta['service_longname']) != "": - tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") + service_name = meta['service_longname'].lower().replace(' ', '.') + service_name = service_name.replace('+', 'plus') # Replace '+' with 'plus' + tags.append(f"{service_name}.source") # Release Type/Source for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): @@ -412,9 +418,9 @@ async def get_tags(self, meta): if meta.get('tv_pack', 0) == 0: # Episodes if meta['sd'] == 1: - tags.extend(['episode.release', 'sd.episode']) + tags.extend(['sd.episode']) else: - tags.extend(['episode.release', 'hd.episode']) + tags.extend(['hd.episode']) else: # Seasons if meta['sd'] == 1: From 3b4c1f3ea562d0fa78ad44ed28a71a71312bec8b Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 17:17:51 +1000 Subject: [PATCH 296/741] Start with largest piece size --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index df6931e5a..0b68d33e3 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2474,7 +2474,7 @@ def piece_size(self, value): def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 our_max_size = max_size if max_size else 67108864 # Default to 64 MiB if max_size is None - piece_size = 8388608 # Start with 8 MiB + piece_size = 67108864 # Start with 64 MiB num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size From 2b39f6f3f090757a67227a4c76b096b7ec760bec Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 20:45:20 +1000 Subject: [PATCH 297/741] clear trackers meta if none before iterating over it Fixes https://github.com/Audionut/Upload-Assistant/issues/81 --- src/prep.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/prep.py b/src/prep.py index 0b68d33e3..b569c2df2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -134,6 +134,10 @@ async def check_and_collect(image_dict): if isinstance(meta.get('trackers', ''), str): meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] + # if trackers is none then it need to be blanked + if meta.get('trackers') is None: + meta['trackers'] = [] + # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', []): if invalid_host_found: From 192403365e83093534c0b8935545abd4316c47f2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 14 Oct 2024 23:36:03 +1000 Subject: [PATCH 298/741] LST banned groups --- src/trackers/LST.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 83fc5e1b3..a9e55e382 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -28,7 +28,10 @@ def __init__(self, config): self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', - 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY'] + 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY', 'NaNi', + 'BONE', 'dAV1nci', 'iHYTECH', 'LAMA', 'Rifftrax', 'SasukeducK', 'ShAaNiG', 'WKS', 'YTS', 'HDT', 'FGT', + ['EVO', 'Raw Content Only'], + ] pass async def get_cat_id(self, category_name, keywords, service): From bace649d85170cdd1d02e4a85cedb04e3725a76f Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 08:19:38 +1000 Subject: [PATCH 299/741] Mediainfo - return empty lists instead of strings Fixes https://github.com/Audionut/Upload-Assistant/issues/90 --- src/prep.py | 262 ++++++++++++++++++++++++++-------------------------- 1 file changed, 131 insertions(+), 131 deletions(-) diff --git a/src/prep.py b/src/prep.py index b569c2df2..d97045b02 100644 --- a/src/prep.py +++ b/src/prep.py @@ -796,153 +796,153 @@ def filter_mediainfo(data): if track["@type"] == "General": filtered["media"]["track"].append({ "@type": track["@type"], - "UniqueID": track.get("UniqueID"), - "VideoCount": track.get("VideoCount"), - "AudioCount": track.get("AudioCount"), - "TextCount": track.get("TextCount"), - "MenuCount": track.get("MenuCount"), - "FileExtension": track.get("FileExtension"), - "Format": track.get("Format"), - "Format_Version": track.get("Format_Version"), - "FileSize": track.get("FileSize"), - "Duration": track.get("Duration"), - "OverallBitRate": track.get("OverallBitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "StreamSize": track.get("StreamSize"), - "IsStreamable": track.get("IsStreamable"), - "File_Created_Date": track.get("File_Created_Date"), - "File_Created_Date_Local": track.get("File_Created_Date_Local"), - "File_Modified_Date": track.get("File_Modified_Date"), - "File_Modified_Date_Local": track.get("File_Modified_Date_Local"), - "Encoded_Application": track.get("Encoded_Application"), - "Encoded_Library": track.get("Encoded_Library"), + "UniqueID": track.get("UniqueID", {}), + "VideoCount": track.get("VideoCount", {}), + "AudioCount": track.get("AudioCount", {}), + "TextCount": track.get("TextCount", {}), + "MenuCount": track.get("MenuCount", {}), + "FileExtension": track.get("FileExtension", {}), + "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "FileSize": track.get("FileSize", {}), + "Duration": track.get("Duration", {}), + "OverallBitRate": track.get("OverallBitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "StreamSize": track.get("StreamSize", {}), + "IsStreamable": track.get("IsStreamable", {}), + "File_Created_Date": track.get("File_Created_Date", {}), + "File_Created_Date_Local": track.get("File_Created_Date_Local", {}), + "File_Modified_Date": track.get("File_Modified_Date", {}), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local", {}), + "Encoded_Application": track.get("Encoded_Application", {}), + "Encoded_Library": track.get("Encoded_Library", {}), }) elif track["@type"] == "Video": filtered["media"]["track"].append({ "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Profile": track.get("Format_Profile"), - "Format_Version": track.get("Format_Version"), - "Format_Level": track.get("Format_Level"), - "Format_Tier": track.get("Format_Tier"), - "HDR_Format": track.get("HDR_Format"), - "HDR_Format_Version": track.get("HDR_Format_Version"), - "HDR_Format_String": track.get("HDR_Format_String"), - "HDR_Format_Profile": track.get("HDR_Format_Profile"), - "HDR_Format_Level": track.get("HDR_Format_Level"), - "HDR_Format_Settings": track.get("HDR_Format_Settings"), - "HDR_Format_Compression": track.get("HDR_Format_Compression"), - "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility"), - "CodecID": track.get("CodecID"), - "CodecID_Hint": track.get("CodecID_Hint"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "Width": track.get("Width"), - "Height": track.get("Height"), - "Stored_Height": track.get("Stored_Height"), - "Sampled_Width": track.get("Sampled_Width"), - "Sampled_Height": track.get("Sampled_Height"), - "PixelAspectRatio": track.get("PixelAspectRatio"), - "DisplayAspectRatio": track.get("DisplayAspectRatio"), - "FrameRate_Mode": track.get("FrameRate_Mode"), - "FrameRate": track.get("FrameRate"), - "FrameRate_Num": track.get("FrameRate_Num"), - "FrameRate_Den": track.get("FrameRate_Den"), - "FrameCount": track.get("FrameCount"), - "ColorSpace": track.get("ColorSpace"), - "ChromaSubsampling": track.get("ChromaSubsampling"), - "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position"), - "BitDepth": track.get("BitDepth"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "StreamSize": track.get("StreamSize"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "colour_description_present": track.get("colour_description_present"), - "colour_description_present_Source": track.get("colour_description_present_Source"), - "colour_range": track.get("colour_range"), - "colour_range_Source": track.get("colour_range_Source"), - "colour_primaries": track.get("colour_primaries"), - "colour_primaries_Source": track.get("colour_primaries_Source"), - "transfer_characteristics": track.get("transfer_characteristics"), - "transfer_characteristics_Source": track.get("transfer_characteristics_Source"), - "transfer_characteristics_Original": track.get("transfer_characteristics_Original"), - "matrix_coefficients": track.get("matrix_coefficients"), - "matrix_coefficients_Source": track.get("matrix_coefficients_Source"), - "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries"), - "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source"), - "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance"), - "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source"), - "MaxCLL": track.get("MaxCLL"), - "MaxCLL_Source": track.get("MaxCLL_Source"), - "MaxFALL": track.get("MaxFALL"), - "MaxFALL_Source": track.get("MaxFALL_Source"), + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Level": track.get("Format_Level", {}), + "Format_Tier": track.get("Format_Tier", {}), + "HDR_Format": track.get("HDR_Format", {}), + "HDR_Format_Version": track.get("HDR_Format_Version", {}), + "HDR_Format_String": track.get("HDR_Format_String", {}), + "HDR_Format_Profile": track.get("HDR_Format_Profile", {}), + "HDR_Format_Level": track.get("HDR_Format_Level", {}), + "HDR_Format_Settings": track.get("HDR_Format_Settings", {}), + "HDR_Format_Compression": track.get("HDR_Format_Compression", {}), + "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility", {}), + "CodecID": track.get("CodecID", {}), + "CodecID_Hint": track.get("CodecID_Hint", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "Width": track.get("Width", {}), + "Height": track.get("Height", {}), + "Stored_Height": track.get("Stored_Height", {}), + "Sampled_Width": track.get("Sampled_Width", {}), + "Sampled_Height": track.get("Sampled_Height", {}), + "PixelAspectRatio": track.get("PixelAspectRatio", {}), + "DisplayAspectRatio": track.get("DisplayAspectRatio", {}), + "FrameRate_Mode": track.get("FrameRate_Mode", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameRate_Num": track.get("FrameRate_Num", {}), + "FrameRate_Den": track.get("FrameRate_Den", {}), + "FrameCount": track.get("FrameCount", {}), + "ColorSpace": track.get("ColorSpace", {}), + "ChromaSubsampling": track.get("ChromaSubsampling", {}), + "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position", {}), + "BitDepth": track.get("BitDepth", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "StreamSize": track.get("StreamSize", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "colour_description_present": track.get("colour_description_present", {}), + "colour_description_present_Source": track.get("colour_description_present_Source", {}), + "colour_range": track.get("colour_range", {}), + "colour_range_Source": track.get("colour_range_Source", {}), + "colour_primaries": track.get("colour_primaries", {}), + "colour_primaries_Source": track.get("colour_primaries_Source", {}), + "transfer_characteristics": track.get("transfer_characteristics", {}), + "transfer_characteristics_Source": track.get("transfer_characteristics_Source", {}), + "transfer_characteristics_Original": track.get("transfer_characteristics_Original", {}), + "matrix_coefficients": track.get("matrix_coefficients", {}), + "matrix_coefficients_Source": track.get("matrix_coefficients_Source", {}), + "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries", {}), + "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source", {}), + "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance", {}), + "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source", {}), + "MaxCLL": track.get("MaxCLL", {}), + "MaxCLL_Source": track.get("MaxCLL_Source", {}), + "MaxFALL": track.get("MaxFALL", {}), + "MaxFALL_Source": track.get("MaxFALL_Source", {}), }) elif track["@type"] == "Audio": filtered["media"]["track"].append({ "@type": track["@type"], - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny"), - "Format_Settings_Endianness": track.get("Format_Settings_Endianness"), - "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate_Mode": track.get("BitRate_Mode"), - "BitRate": track.get("BitRate"), - "Channels": track.get("Channels"), - "ChannelPositions": track.get("ChannelPositions"), - "ChannelLayout": track.get("ChannelLayout"), - "Channels_Original": track.get("Channels_Original"), - "ChannelLayout_Original": track.get("ChannelLayout_Original"), - "SamplesPerFrame": track.get("SamplesPerFrame"), - "SamplingRate": track.get("SamplingRate"), - "SamplingCount": track.get("SamplingCount"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "Compression_Mode": track.get("Compression_Mode"), - "Delay": track.get("Delay"), - "Delay_Source": track.get("Delay_Source"), - "Video_Delay": track.get("Video_Delay"), - "StreamSize": track.get("StreamSize"), - "Title": track.get("Title"), - "Language": track.get("Language"), - "ServiceKind": track.get("ServiceKind"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), - "extra": track.get("extra"), + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny", {}), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness", {}), + "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate_Mode": track.get("BitRate_Mode", {}), + "BitRate": track.get("BitRate", {}), + "Channels": track.get("Channels", {}), + "ChannelPositions": track.get("ChannelPositions", {}), + "ChannelLayout": track.get("ChannelLayout", {}), + "Channels_Original": track.get("Channels_Original", {}), + "ChannelLayout_Original": track.get("ChannelLayout_Original", {}), + "SamplesPerFrame": track.get("SamplesPerFrame", {}), + "SamplingRate": track.get("SamplingRate", {}), + "SamplingCount": track.get("SamplingCount", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "Compression_Mode": track.get("Compression_Mode", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "Video_Delay": track.get("Video_Delay", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "ServiceKind": track.get("ServiceKind", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "extra": track.get("extra", {}), }) elif track["@type"] == "Text": filtered["media"]["track"].append({ "@type": track["@type"], - "@typeorder": track.get("@typeorder"), - "StreamOrder": track.get("StreamOrder"), - "ID": track.get("ID"), - "UniqueID": track.get("UniqueID"), - "Format": track.get("Format"), - "CodecID": track.get("CodecID"), - "Duration": track.get("Duration"), - "BitRate": track.get("BitRate"), - "FrameRate": track.get("FrameRate"), - "FrameCount": track.get("FrameCount"), - "ElementCount": track.get("ElementCount"), - "StreamSize": track.get("StreamSize"), - "Title": track.get("Title"), - "Language": track.get("Language"), - "Default": track.get("Default"), - "Forced": track.get("Forced"), + "@typeorder": track.get("@typeorder", {}), + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "ElementCount": track.get("ElementCount", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), }) elif track["@type"] == "Menu": filtered["media"]["track"].append({ "@type": track["@type"], - "extra": track.get("extra"), + "extra": track.get("extra", {}), }) return filtered From 6f1bc03f6bdc093aa8cec6d777be1feda0c997a6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 08:52:31 +1000 Subject: [PATCH 300/741] Strings or dicts or dicks and stings String here, dict over there --- src/prep.py | 38 ++++++++++++-------------------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/src/prep.py b/src/prep.py index d97045b02..077cdaeff 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1945,18 +1945,14 @@ def get_audio_v2(self, mi, meta, bdinfo): if format_settings in ['Explicit']: format_settings = "" # Channels - channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) + channels = track.get('Channels_Original', track.get('Channels')) if not str(channels).isnumeric(): - channels = mi['media']['track'][track_num]['Channels'] + channels = track.get('Channels') try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout'] + channel_layout = track.get('ChannelLayout', '') except Exception: - try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] - except Exception: - channel_layout = "" + channel_layout = track.get('ChannelLayout_Original', '') - # Ensure channel_layout is not None or an empty string before iterating if channel_layout and "LFE" in channel_layout: chan = f"{int(channels) - 1}.1" elif channel_layout == "": @@ -1967,35 +1963,30 @@ def get_audio_v2(self, mi, meta, bdinfo): else: chan = f"{channels}.0" - if meta.get('dual_audio', False): # If dual_audio flag is set, skip other checks + if meta.get('dual_audio', False): dual = "Dual-Audio" else: if meta.get('original_language', '') != 'en': eng, orig = False, False try: - for t in mi.get('media', {}).get('track', []): + for t in tracks: if t.get('@type') != "Audio": continue audio_language = t.get('Language', '') if isinstance(audio_language, str): - # Check for English Language Track if audio_language.startswith("en") and "commentary" not in (t.get('Title') or '').lower(): eng = True - # Check for original Language Track if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in (t.get('Title') or '').lower(): orig = True - # Catch Chinese / Norwegian Variants variants = ['zh', 'cn', 'cmn', 'no', 'nb'] if any(audio_language.startswith(var) for var in variants) and any(meta['original_language'].startswith(var) for var in variants): orig = True - # Only proceed if `audio_language` is valid after previous checks if isinstance(audio_language, str) and audio_language and audio_language != meta['original_language'] and not audio_language.startswith("en"): - # If audio_language is empty, set to 'und' (undefined) audio_language = "und" if audio_language == "" else audio_language console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") time.sleep(5) @@ -2008,7 +1999,7 @@ def get_audio_v2(self, mi, meta, bdinfo): console.print(traceback.format_exc()) pass - for t in mi.get('media', {}).get('track', []): + for t in tracks: if t.get('@type') != "Audio": continue @@ -2017,7 +2008,6 @@ def get_audio_v2(self, mi, meta, bdinfo): # Convert commercial name to naming conventions audio = { - # Format "DTS": "DTS", "AAC": "AAC", "AAC LC": "AAC", @@ -2028,12 +2018,9 @@ def get_audio_v2(self, mi, meta, bdinfo): "Opus": "Opus", "Vorbis": "VORBIS", "PCM": "LPCM", - - # BDINFO AUDIOS "LPCM Audio": "LPCM", "Dolby Digital Audio": "DD", "Dolby Digital Plus Audio": "DD+", - # "Dolby TrueHD" : "TrueHD", "Dolby TrueHD Audio": "TrueHD", "DTS Audio": "DTS", "DTS-HD Master Audio": "DTS-HD MA", @@ -2065,7 +2052,10 @@ def get_audio_v2(self, mi, meta, bdinfo): } search_format = True - # Ensure commercial and additional are not None before iterating + + if isinstance(additional, dict): + additional = "" # Set empty string if additional is a dictionary + if commercial: for key, value in commercial_names.items(): if key in commercial: @@ -2078,26 +2068,22 @@ def get_audio_v2(self, mi, meta, bdinfo): codec = audio.get(format, "") + audio_extra.get(additional, "") extra = format_extra.get(additional, "") - # Ensure format_settings is not None before looking it up format_settings = format_settings_extra.get(format_settings, "") if format_settings == "EX" and chan == "5.1": format_settings = "EX" else: format_settings = "" - # Ensure codec is not left empty if codec == "": codec = format - # Ensure additional and channels are not None before using them if format.startswith("DTS"): if additional and additional.endswith("X"): codec = "DTS:X" chan = f"{int(channels) - 1}.1" if format == "MPEG Audio": - codec = mi['media']['track'][2].get('CodecID_Hint', '') + codec = track.get('CodecID_Hint', '') - # Ensure audio is constructed properly even with potential None values audio = f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" audio = ' '.join(audio.split()) return audio, chan, has_commentary From 9dadade8f3d2047aa52463c01fc1ff804f9038cd Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 10:19:45 +1000 Subject: [PATCH 301/741] RF tag fix --- src/trackers/RF.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 6404f9117..5b9e6b831 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -109,9 +109,10 @@ async def edit_name(self, meta): rf_name = meta['name'] tag_lower = meta['tag'].lower() invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): for invalid_tag in invalid_tags: - rf_name = rf_name.replace(f"-{invalid_tag}", "", flags=re.IGNORECASE) + rf_name = re.sub(f"-{invalid_tag}", "", rf_name, flags=re.IGNORECASE) rf_name = f"{rf_name}-NoGroup" return rf_name From 8a53fcac948bf96303c31f9f394e71b7384c8ee7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 10:29:49 +1000 Subject: [PATCH 302/741] MTV explicitly set piece size Missed earlier since the default/starting piece size in the prep function was 8 MiB. --- src/trackers/MTV.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 5288abcce..012f4d82a 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -139,6 +139,7 @@ def enforce_size_limit(image_list, image_sizes): ) # Validate and write the new torrent + new_torrent.piece_size = 8 * 1024 * 1024 new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) From 97a77de02f5cd00dc8a74b07873757170825e7f4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 11:01:54 +1000 Subject: [PATCH 303/741] Don't validate disc torrents that have top folder modified --- src/clients.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/clients.py b/src/clients.py index 1b97da004..374bab9c9 100644 --- a/src/clients.py +++ b/src/clients.py @@ -123,6 +123,10 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client # Reuse if disc and basename matches or --keep-folder was specified if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): + torrent_name = torrent.metainfo['info']['name'] + if meta['uuid'] != torrent_name: + console.print("Modified file structure, skipping hash") + valid = False torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True From be5010bc9fdf8faecc82f6c45cd0ce8d0397a615 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 15 Oct 2024 12:54:03 +1000 Subject: [PATCH 304/741] MTV clarify error feedback Unlike using the upload form on site that gives feedback on error, the site forcibly redirects to the home page on error, preventing any meaningful oppertunity to ascertain the problem. --- src/trackers/COMMON.py | 2 +- src/trackers/MTV.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index c0fe369e5..3e4d93501 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -73,7 +73,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url] ") descfile.write("[/center]") if signature is not None: diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 012f4d82a..e9fc7c6b9 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -139,7 +139,7 @@ def enforce_size_limit(image_list, image_sizes): ) # Validate and write the new torrent - new_torrent.piece_size = 8 * 1024 * 1024 + new_torrent.piece_size = 8 * 1024 * 1024 new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) @@ -205,7 +205,8 @@ def enforce_size_limit(image_list, image_sizes): console.print("[red]No DL link in response, It may have uploaded, check manually.") else: console.print("[red]Upload Failed. Either you are not logged in......") - console.print("[red]or you are hitting this site bug: https://www.morethantv.me/forum/thread/3338?") + console.print("[red]You are hitting this site bug: https://www.morethantv.me/forum/thread/3338?") + console.print("[red]Or you hit some other error with the torrent upload.") except Exception: console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) From d6467ce4ecc6e8f76635aa56e4f0d4bb662de65a Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 17 Oct 2024 07:42:00 +1000 Subject: [PATCH 305/741] Argument to ensure no edition --- src/args.py | 1 + src/prep.py | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/args.py b/src/args.py index 117a38daf..82a175f89 100644 --- a/src/args.py +++ b/src/args.py @@ -40,6 +40,7 @@ def parse(self, args, meta): parser.add_argument('--no-aka', dest='no_aka', action='store_true', required=False, help="Remove AKA from title") parser.add_argument('--no-dub', dest='no_dub', action='store_true', required=False, help="Remove Dubbed from title") parser.add_argument('--no-tag', dest='no_tag', action='store_true', required=False, help="Remove Group Tag from title") + parser.add_argument('--no-edition', dest='no_edition', action='store_true', required=False, help="Remove Edition from title") parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) diff --git a/src/prep.py b/src/prep.py index 077cdaeff..3e6da8751 100644 --- a/src/prep.py +++ b/src/prep.py @@ -677,10 +677,13 @@ async def gather_prep(self, meta, mode): else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) - if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + if meta.get('no_edition') is False: + meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) + if "REPACK" in meta.get('edition', ""): + meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + else: + meta['edition'] = "" # WORK ON THIS meta.get('stream', False) From b4a04eda23c6569a0c6ab484598ebfee016624f9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 19 Oct 2024 13:49:35 +1000 Subject: [PATCH 306/741] Specify manual frames -mf "1234,10123,25654,30548" Respects the number of screens specified in the config. Will take random frames to hit required amount of frames if not enough manual frames are specified. Only works with single files, not discs. --- src/args.py | 15 +++- src/prep.py | 231 +++++++++++++++++++++++++++++----------------------- 2 files changed, 141 insertions(+), 105 deletions(-) diff --git a/src/args.py b/src/args.py index 82a175f89..51264fdbc 100644 --- a/src/args.py +++ b/src/args.py @@ -3,6 +3,7 @@ import urllib.parse import os import datetime +import sys from src.console import console @@ -21,6 +22,7 @@ def parse(self, args, meta): parser.add_argument('path', nargs='*', help="Path to file/directory") parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) + parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV'], dest="manual_source") @@ -31,7 +33,7 @@ def parse(self, args, meta): parser.add_argument('-g', '--tag', nargs='*', required=False, help="Group Tag", type=str) parser.add_argument('-serv', '--service', nargs='*', required=False, help="Streaming Service", type=str) parser.add_argument('-dist', '--distributor', nargs='*', required=False, help="Disc Distributor e.g.(Criterion, BFI, etc.)", type=str) - parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default="") + parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default=None) parser.add_argument('-season', '--season', nargs='*', required=False, help="Season (number)", type=str) parser.add_argument('-episode', '--episode', nargs='*', required=False, help="Episode (number)", type=str) parser.add_argument('-daily', '--daily', nargs=1, required=False, help="Air date of this episode (YYYY-MM-DD)", type=datetime.date.fromisoformat, dest="manual_date") @@ -94,6 +96,17 @@ def parse(self, args, meta): args, before_args = parser.parse_known_args(input) args = vars(args) # console.print(args) + if meta.get('manual_frames') is not None: + try: + # Join the list into a single string, split by commas, and convert to integers + meta['manual_frames'] = [int(time.strip()) for time in meta['manual_frames'].split(',')] + # console.print(f"Processed manual_frames: {meta['manual_frames']}") + except ValueError: + console.print("[red]Invalid format for manual_frames. Please provide a comma-separated list of integers.") + console.print(f"Processed manual_frames: {meta['manual_frames']}") + sys.exit(1) + else: + meta['manual_frames'] = None # Explicitly set it to None if not provided if len(before_args) >= 1 and not os.path.exists(' '.join(args['path'])): for each in before_args: args['path'].append(each) diff --git a/src/prep.py b/src/prep.py index 3e6da8751..511fe890a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -585,6 +585,7 @@ async def gather_prep(self, meta, mode): else: console.print("Skipping existing search as meta already populated") + manual_frames = meta['manual_frames'] # Take Screenshots if meta['is_disc'] == "BDMV": if meta.get('edit', False) is False: @@ -611,7 +612,11 @@ async def gather_prep(self, meta, mode): else: if meta.get('edit', False) is False: try: - s = multiprocessing.Process(target=self.screenshots, args=(videopath, filename, meta['uuid'], base_dir, meta)) + s = multiprocessing.Process( + target=self.screenshots, + args=(videopath, filename, meta['uuid'], base_dir, meta), # Positional arguments + kwargs={'manual_frames': manual_frames} # Keyword argument + ) s.start() while s.is_alive() is True: await asyncio.sleep(3) @@ -676,7 +681,6 @@ async def gather_prep(self, meta, mode): meta['video_codec'] = self.get_video_codec(bdinfo) else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - if meta.get('no_edition') is False: meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): @@ -1353,20 +1357,16 @@ def _is_vob_good(n, loops, num_screens): if smallest is not None: os.remove(smallest) - def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False): - # Ensure the image list is initialized and preserve existing images + def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): if 'image_list' not in meta: meta['image_list'] = [] - # Check if there are already at least 3 image links in the image list existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - # Skip taking screenshots if there are already 3 images and force_screenshots is False if len(existing_images) >= 3 and not force_screenshots: console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") return - # Determine the number of screenshots to take if num_screens is None: num_screens = self.screens - len(existing_images) if num_screens <= 0: @@ -1380,6 +1380,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non height = float(video_track.get('Height')) par = float(video_track.get('PixelAspectRatio', 1)) dar = float(video_track.get('DisplayAspectRatio')) + frame_rate = float(video_track.get('FrameRate', 24.0)) if par == 1: sar = w_sar = h_sar = 1 @@ -1394,106 +1395,128 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non length = round(float(length)) os.chdir(f"{base_dir}/tmp/{folder_id}") i = 0 - if len(glob.glob(f"{filename}-*.png")) >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') - else: - loglevel = 'quiet' - debug = True - if bool(meta.get('ffdebug', False)) is True: - loglevel = 'verbose' - debug = False - if meta.get('vapoursynth', False) is True: - from src.vs import vs_screengn - vs_screengn(source=path, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + + loglevel = 'quiet' + debug = True + if bool(meta.get('ffdebug', False)) is True: + loglevel = 'verbose' + debug = False + + retake = False + with Progress( + TextColumn("[bold green]Saving Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + ss_times = [] + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens) + + if manual_frames: + if isinstance(manual_frames, str): + manual_frames = [frame.strip() for frame in manual_frames.split(',') if frame.strip().isdigit()] + elif isinstance(manual_frames, list): + manual_frames = [frame for frame in manual_frames if isinstance(frame, int) or frame.isdigit()] + + # Convert to integers + manual_frames = [int(frame) for frame in manual_frames] + ss_times = [frame / frame_rate for frame in manual_frames] + + # If not enough manual frames, fill in with random frames + if len(ss_times) < num_screens: + console.print(f"[yellow]Not enough manual frames provided. Using random frames for remaining {num_screens - len(ss_times)} screenshots.") + random_times = self.valid_ss_time(ss_times, num_screens - len(ss_times), length) + ss_times.extend(random_times) + else: - retake = False - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - ss_times = [] - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - for i in range(num_screens + 1): - image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image_path) or retake is not False: - retake = False - try: - ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) - ff = ffmpeg.input(path, ss=ss_times[-1]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except (KeyboardInterrupt, Exception): - sys.exit(1) - - self.optimize_images(image_path) - if os.path.getsize(Path(image_path)) <= 75000: - console.print("[yellow]Image is incredibly small, retaking") - retake = True - time.sleep(1) - if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: - i += 1 - elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: - i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and retake is False: - i += 1 - elif self.img_host == "freeimage.host": - console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") - exit() - elif retake is True: - pass - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) - else: - i += 1 - progress.advance(screen_task) - - # Add new images to the meta['image_list'] as dictionaries - new_images = glob.glob(f"{filename}-*.png") - for image in new_images: - img_dict = { - 'img_url': image, - 'raw_url': image, - 'web_url': image # Assuming local path, but you might need to update this if uploading - } - meta['image_list'].append(img_dict) + # No manual frames provided, generate random times + # console.print("[yellow]No manual frames provided. Generating random frames.") + ss_times = self.valid_ss_time(ss_times, num_screens, length) - # Remove the smallest image if there are more than needed - if len(meta['image_list']) > self.screens: - local_images = [img for img in meta['image_list'] if not img['img_url'].startswith('http')] + for i in range(num_screens): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or retake is not False: + retake = False + try: + # console.print(f"Taking screenshot at time (s): {ss_times[i]}") + ff = ffmpeg.input(path, ss=ss_times[i]) - if local_images: - smallest = min(local_images, key=lambda x: os.path.getsize(x['img_url'])) - os.remove(smallest['img_url']) - meta['image_list'].remove(smallest) - else: - console.print("[yellow]No local images found to remove.") + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - def valid_ss_time(self, ss_times, num_screens, length): - valid_time = False - while valid_time is not True: + # console.print(f"Saving screenshot to {image_path}") + ( + ff + .output(image_path, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + + except Exception as e: + console.print(f"[red]Error during screenshot capture: {e}") + sys.exit(1) + + self.optimize_images(image_path) + if not manual_frames: + if os.path.getsize(Path(image_path)) <= 75000: + console.print("[yellow]Image is incredibly small, retaking") + retake = True + time.sleep(1) + if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: + i += 1 + elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: + i += 1 + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and retake is False: + i += 1 + elif self.img_host == "freeimage.host": + console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") + exit() + elif retake is True: + pass + else: + console.print("[red]Image too large for your image host, retaking") + retake = True + time.sleep(1) + + progress.advance(screen_task) + + new_images = glob.glob(f"{filename}-*.png") + for image in new_images: + img_dict = { + 'img_url': image, + 'raw_url': image, + 'web_url': image + } + meta['image_list'].append(img_dict) + + if len(meta['image_list']) > self.screens: + local_images = [img for img in meta['image_list'] if not img['img_url'].startswith('http')] + if local_images: + smallest = min(local_images, key=lambda x: os.path.getsize(x['img_url'])) + os.remove(smallest['img_url']) + meta['image_list'].remove(smallest) + else: + console.print("[yellow]No local images found to remove.") + + def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): + if manual_frames: + ss_times.extend(manual_frames[:num_screens]) # Use only as many as needed + console.print(f"[green]Using provided manual frame numbers for screenshots: {ss_times}") + return ss_times + + # Generate random times if manual frames are not provided + while len(ss_times) < num_screens: valid_time = True - if ss_times != []: - sst = random.randint(round(length / 5), round(length / 2)) - for each in ss_times: - tolerance = length / 10 / num_screens - if abs(sst - each) <= tolerance: - valid_time = False - if valid_time is True: - ss_times.append(sst) - else: - ss_times.append(random.randint(round(length / 5), round(length / 2))) + sst = random.randint(round(length / 5), round(4 * length / 5)) # Adjust range for more spread out times + for each in ss_times: + tolerance = length / 10 / num_screens + if abs(sst - each) <= tolerance: + valid_time = False + break + if valid_time: + ss_times.append(sst) + return ss_times def optimize_images(self, image): @@ -2485,9 +2508,9 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): if piece_size > our_max_size: piece_size = our_max_size break - elif torrent_file_size > 81920: # Break if .torrent size exceeds 80 KiB + elif torrent_file_size < 81920: # Break if .torrent size less than 80 KiB break - elif torrent_file_size < 2048: # Break if .torrent size less than 2 KiB + elif torrent_file_size > 2048: # Break if .torrent size exceeds 2 KiB break elif torrent_file_size > 102400: piece_size *= 2 From 991e5e18090806e65f071d83610be42a227bc6d4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 20 Oct 2024 23:22:36 +1000 Subject: [PATCH 307/741] Get permalink Adds a config option to search site API after upload to find the permalink (direct site link) and insert as a comment in the torrent. ie: You client will have the direct page link in the torrent comment field. Needs a good 5 seconds to ensure the API has updated, hence added as an option, default to False. Tracker specific, will add some other trackers later. --- data/example-config.py | 4 +++ src/trackers/ACM.py | 40 ++++++++++++++++++++++++++ src/trackers/AITHER.py | 40 ++++++++++++++++++++++++++ src/trackers/AL.py | 40 ++++++++++++++++++++++++++ src/trackers/BLU.py | 40 ++++++++++++++++++++++++++ src/trackers/CBR.py | 40 ++++++++++++++++++++++++++ src/trackers/FNP.py | 40 ++++++++++++++++++++++++++ src/trackers/HP.py | 40 ++++++++++++++++++++++++++ src/trackers/HUNO.py | 40 ++++++++++++++++++++++++++ src/trackers/JPTV.py | 40 ++++++++++++++++++++++++++ src/trackers/LCD.py | 40 ++++++++++++++++++++++++++ src/trackers/LST.py | 40 ++++++++++++++++++++++++++ src/trackers/LT.py | 40 ++++++++++++++++++++++++++ src/trackers/OE.py | 40 ++++++++++++++++++++++++++ src/trackers/OTW.py | 40 ++++++++++++++++++++++++++ src/trackers/PSS.py | 40 ++++++++++++++++++++++++++ src/trackers/R4E.py | 40 ++++++++++++++++++++++++++ src/trackers/RF.py | 40 ++++++++++++++++++++++++++ src/trackers/SHRI.py | 40 ++++++++++++++++++++++++++ src/trackers/STC.py | 40 ++++++++++++++++++++++++++ src/trackers/STT.py | 40 ++++++++++++++++++++++++++ src/trackers/TDC.py | 40 ++++++++++++++++++++++++++ src/trackers/TIK.py | 40 ++++++++++++++++++++++++++ src/trackers/ULCX.py | 40 ++++++++++++++++++++++++++ src/trackers/UNIT3D_TEMPLATE.py | 40 ++++++++++++++++++++++++++ src/trackers/UTP.py | 40 ++++++++++++++++++++++++++ upload.py | 50 +++++++++++++++++++++++++++++++-- 27 files changed, 1052 insertions(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 89d9acbe5..dd9b0d786 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -38,6 +38,10 @@ # Play the bell sound effect when asking for confirmation "sfx_on_prompt": True, + # Run an API search after upload to find the permalink and insert as comment in torrent + # Needs a 5 second wait to ensure the API is updated + "get_permalink": False, + }, "TRACKERS": { diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 1970a8e14..71e3d3c32 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -7,6 +7,7 @@ from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console +import bencodepy class ACM(): @@ -374,3 +375,42 @@ async def edit_desc(self, meta): descfile.write(self.signature) descfile.close() return + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index d58d14286..027fb0a62 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -5,6 +5,7 @@ from str2bool import str2bool import platform import re +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -224,3 +225,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/AL.py b/src/trackers/AL.py index dcc7b6774..50c0f1d53 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -179,3 +180,42 @@ async def search_existing(self, meta, disctype): async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 6ce0dba5d..f88da21bc 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -218,3 +219,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index c090e80af..e7c3c2407 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -177,3 +178,42 @@ async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index eb6ebaa42..9d9baf50e 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -166,3 +167,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 76acbb837..609fa066b 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -169,3 +170,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 558d4b23a..afd3d4633 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -6,6 +6,7 @@ import os import re import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -285,3 +286,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 76e8e78f9..fe28e3df7 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -198,3 +199,42 @@ async def edit_name(self, meta): name = name.replace("DD+ ", "DD+") return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 5b7397d3d..d4c0a1d47 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -179,3 +180,42 @@ async def edit_name(self, meta): name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LST.py b/src/trackers/LST.py index a9e55e382..bcc54123e 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -195,3 +196,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LT.py b/src/trackers/LT.py index c6e0e4be1..100af0a0b 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -191,3 +192,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/OE.py b/src/trackers/OE.py index e536179fc..08ffd3e6d 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -10,6 +10,7 @@ from src.bbcode import BBCODE from src.trackers.COMMON import COMMON from src.console import console +import bencodepy class OE(): @@ -322,3 +323,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 766ebd767..1835c10d0 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -166,3 +167,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 0f0fde007..e6abcb194 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -168,3 +169,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 82a8c8080..c2f73f509 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -5,6 +5,7 @@ from str2bool import str2bool import tmdbsimple as tmdb import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -164,3 +165,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 5b9e6b831..463ea9813 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -5,6 +5,7 @@ import platform import re from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -180,3 +181,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 6862b431f..ff254cd25 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -166,3 +167,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/STC.py b/src/trackers/STC.py index fb17b2c0a..8451a5265 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -3,6 +3,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -182,3 +183,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 2f8ee800d..008dd8adb 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -163,3 +164,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py index b2dd45c8e..54f7a98fe 100644 --- a/src/trackers/TDC.py +++ b/src/trackers/TDC.py @@ -3,6 +3,7 @@ import asyncio import requests from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -168,3 +169,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 6a8b9d982..20659b141 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -10,6 +10,7 @@ import urllib.request import click from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -593,3 +594,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 2b1b816c8..ee708cb47 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -161,3 +162,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index d3bc06777..4c242f346 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -4,6 +4,7 @@ import requests import platform from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -176,3 +177,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 30d16fea3..c962efe27 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -4,6 +4,7 @@ import requests from str2bool import str2bool import platform +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -165,3 +166,42 @@ async def search_existing(self, meta, disctype): await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/upload.py b/upload.py index 023a4dddf..aee320103 100644 --- a/upload.py +++ b/upload.py @@ -235,8 +235,9 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TL', 'TIK', 'PSS', 'ULCX'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', + 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TIK', 'PSS', 'ULCX'] + other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, @@ -286,6 +287,51 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) + if meta['unattended']: + upload_to_tracker = True + else: + try: + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + except (KeyboardInterrupt, EOFError): + sys.exit(1) # Exit immediately + + if upload_to_tracker: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + # Check if the group is banned for the tracker + if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): + continue + + dupes = await tracker_class.search_existing(meta, disctype) + dupes = await common.filter_dupes(dupes, meta) + meta = dupe_check(dupes, meta) + + # Proceed with upload if the meta is set to upload + if meta.get('upload', False): + await tracker_class.upload(meta, disctype) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker in other_api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + if meta['unattended']: upload_to_tracker = True else: From 795b626f430f977619326bc029689f45b55f2bfb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 20 Oct 2024 23:25:13 +1000 Subject: [PATCH 308/741] FIX - BHD perrmalink wasn't completed --- upload.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index aee320103..136728ab6 100644 --- a/upload.py +++ b/upload.py @@ -236,8 +236,8 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'BHD', 'TIK', 'PSS', 'ULCX'] - other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL'] + 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX'] + other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'BHD'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, From d818876b6c031ae0a4fe9556e3ab02135f8c5775 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 21 Oct 2024 14:15:50 +1000 Subject: [PATCH 309/741] Fix early exits --- src/prep.py | 17 ++++++++--------- upload.py | 24 ++++++++++-------------- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/prep.py b/src/prep.py index 511fe890a..243ec9e30 100644 --- a/src/prep.py +++ b/src/prep.py @@ -131,17 +131,16 @@ async def check_and_collect(image_dict): trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] # Ensure meta['trackers'] is a list - if isinstance(meta.get('trackers', ''), str): - meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] - - # if trackers is none then it need to be blanked - if meta.get('trackers') is None: - meta['trackers'] = [] - + if meta.get('trackers') is not None: + if isinstance(meta.get('trackers', ''), str): + meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] + if 'MTV' in meta.get('trackers', []): + if invalid_host_found: + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list - if 'MTV' in trackers_list or 'MTV' in meta.get('trackers', []): + elif 'MTV' in trackers_list: if invalid_host_found: - console.print("[yellow]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") return valid_images diff --git a/upload.py b/upload.py index 136728ab6..ed7b76133 100644 --- a/upload.py +++ b/upload.py @@ -543,24 +543,20 @@ def get_confirmation(meta): cli_ui.info(ring_the_bell) # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' - if meta.get('is disc', False): + if meta.get('is disc', False) is False: meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True - if meta['isdir']: - if 'keep_folder' in meta: - if meta['keep_folder']: - cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") - kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) - if not kf_confirm: - cli_ui.info('Aborting...') - exit() + if meta.get('keep_folder') is True: + if meta['isdir']: + cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") + kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) + if not kf_confirm: + cli_ui.info('Aborting...') + exit() cli_ui.info_section(cli_ui.yellow, "Is this correct?") cli_ui.info(f"Name: {meta['name']}") - try: - confirm = cli_ui.ask_yes_no("Correct?", default=False) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately + confirm = cli_ui.ask_yes_no("Correct?", default=False) else: cli_ui.info(f"Name: {meta['name']}") @@ -665,5 +661,5 @@ def get_missing(meta): try: asyncio.run(do_the_thing(base_dir)) # Pass the correct base_dir value here - except (KeyboardInterrupt, SystemExit): + except (KeyboardInterrupt): console.print("[bold red]Program interrupted. Exiting.") From 0a16f5e387ba73e2f3329b5c69e3d66e8bdaddad Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 21 Oct 2024 14:20:16 +1000 Subject: [PATCH 310/741] Indentation --- src/prep.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 243ec9e30..c81a26952 100644 --- a/src/prep.py +++ b/src/prep.py @@ -134,13 +134,13 @@ async def check_and_collect(image_dict): if meta.get('trackers') is not None: if isinstance(meta.get('trackers', ''), str): meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] - if 'MTV' in meta.get('trackers', []): - if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") + if 'MTV' in meta['trackers', []]: + if invalid_host_found: + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/red]") # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list elif 'MTV' in trackers_list: if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/yellow]") + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/red]") return valid_images From 8998a0c9be1ae6c53647e5499bd20104e1599315 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 21 Oct 2024 17:58:14 +1000 Subject: [PATCH 311/741] BHD permalink --- src/trackers/BHD.py | 49 ++++++++++++++++++++++++++++++++++++++++++++- upload.py | 6 +++--- 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 44a630c51..f8812d0f2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -6,6 +6,8 @@ from str2bool import str2bool import os import platform +import hashlib +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -25,7 +27,7 @@ def __init__(self, config): self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] + self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK'] pass async def upload(self, meta, disctype): @@ -326,3 +328,48 @@ async def edit_name(self, meta): if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + torrent = bencodepy.decode(torrent_data) + info_dict = torrent[b'info'] + bencoded_info = bencodepy.encode(info_dict) + info_hash = hashlib.sha1(bencoded_info).hexdigest() + console.print(f"Info Hash: {info_hash}") + + params = { + 'action': 'search', + 'info_hash': info_hash + } + url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" + try: + response = requests.post(url=url, json=params) + response_data = response.json() + console.print(f"[yellow]Response Data: {response_data}") + + if response_data.get('total_results') == 1: + for each in response_data['results']: + details_link = f"https://beyond-hd.me/details/{each['id']}" + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/upload.py b/upload.py index ed7b76133..bf8b98c6b 100644 --- a/upload.py +++ b/upload.py @@ -236,8 +236,8 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX'] - other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'BHD'] + 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX', 'BHD'] + other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, @@ -546,7 +546,7 @@ def get_confirmation(meta): if meta.get('is disc', False) is False: meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True - if meta.get('keep_folder') is True: + if meta.get('keep_folder'): if meta['isdir']: cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) From 314916b31e2231e0694b28ab01a8835a56bf7795 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 21 Oct 2024 18:01:40 +1000 Subject: [PATCH 312/741] Hide debugging --- src/trackers/BHD.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index f8812d0f2..a3373f34b 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -337,7 +337,7 @@ async def search_torrent_page(self, meta, disctype): info_dict = torrent[b'info'] bencoded_info = bencodepy.encode(info_dict) info_hash = hashlib.sha1(bencoded_info).hexdigest() - console.print(f"Info Hash: {info_hash}") + # console.print(f"Info Hash: {info_hash}") params = { 'action': 'search', @@ -347,7 +347,7 @@ async def search_torrent_page(self, meta, disctype): try: response = requests.post(url=url, json=params) response_data = response.json() - console.print(f"[yellow]Response Data: {response_data}") + # console.print(f"[yellow]Response Data: {response_data}") if response_data.get('total_results') == 1: for each in response_data['results']: From 10d68151243902f448dd1685099f5a32103948a7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 22 Oct 2024 14:53:14 +1000 Subject: [PATCH 313/741] Squashed commit of the following: commit 8a4121ba99963aa5ace0caae01d389d029972c15 Author: Audionut Date: Tue Oct 22 14:51:50 2024 +1000 Lint2 commit 97d3b246805d7d17329ea654fb58a012a87c9a9d Author: Audionut Date: Tue Oct 22 14:47:54 2024 +1000 Lint commit 42654688444f9cd5e52efcb05461ef662737137f Author: Zips-sipZ Date: Sat Oct 12 22:18:30 2024 +0200 Correcting missing thumbsize variable commit 24092863e547b930e13e11bf7f9203f824834fde Author: Zips-sipZ Date: Sat Oct 12 18:23:45 2024 +0200 Correction bare except commit e281e735609f0435a0cb86cfdede1563e98a0555 Author: Zips-sipZ Date: Sat Oct 12 18:01:02 2024 +0200 Adding thumbnail size and header config options Adding thumbnail size and header config options commit bce960911a87fe9f290f774c631c016c3c80889c Author: Zips-sipZ Date: Sat Oct 12 17:56:40 2024 +0200 Adding the thumbsize and screenheader options Adding the thumbsize and screenheader options commit e3f345058d213c2f8bb47c6b60ac894299c62adc Author: Zips-sipZ Date: Sat Oct 12 17:51:09 2024 +0200 Update COMMON.py Adding space to clarify the different screens --- data/example-config.py | 6 ++++++ src/trackers/COMMON.py | 14 +++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index dd9b0d786..849099081 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,6 +25,12 @@ # Number of screenshots to capture "screens": "6", + # Providing the option to change the size of the thumbnails where supported, default is 350 + "thumbnail_size": "350", + + # Providing the option to add a header, in bbcode, above the screenshot section where supported + # "screenshot_header": "[centers] SCREENSHOTS [/center]" + # Enable lossless PNG Compression (True/False) "optimize_images": True, diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 3e4d93501..b4da34530 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -69,11 +69,23 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(desc) images = meta['image_list'] if len(images) > 0: + try: + thumbsize = self.config['DEFAULT']['thumbnail_size'] + except Exception: + thumbsize = "350" + + try: + screenheader = self.config['DEFAULT']['screenshot_header'] + except Exception: + screenheader = None + if screenheader is not None: + descfile.write(screenheader + '\n') + descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url] ") + descfile.write(f"[url={web_url}][img={thumbsize}]{raw_url}[/img][/url] ") descfile.write("[/center]") if signature is not None: From 9c0e612e38b356b570529d0a2709a974056e9526 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 23 Oct 2024 17:27:16 +1000 Subject: [PATCH 314/741] Also clean image size meta --- src/prep.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/prep.py b/src/prep.py index c81a26952..7ec733556 100644 --- a/src/prep.py +++ b/src/prep.py @@ -356,6 +356,7 @@ async def handle_image_list(self, meta, tracker_name): keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") if not keep_images: meta['image_list'] = [] + meta['image_sizes'] = [] console.print(f"[yellow]Images discarded from {tracker_name}.") else: console.print(f"[green]Images retained from {tracker_name}.") From dc0e5ce4da4540f0e47a5e797211cb2f0b7b41c3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 23 Oct 2024 17:47:37 +1000 Subject: [PATCH 315/741] Revert "Also clean image size meta" This reverts commit 9c0e612e38b356b570529d0a2709a974056e9526. --- src/prep.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 7ec733556..c81a26952 100644 --- a/src/prep.py +++ b/src/prep.py @@ -356,7 +356,6 @@ async def handle_image_list(self, meta, tracker_name): keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") if not keep_images: meta['image_list'] = [] - meta['image_sizes'] = [] console.print(f"[yellow]Images discarded from {tracker_name}.") else: console.print(f"[green]Images retained from {tracker_name}.") From bec415b0234cc8c3fb44093587e4620a1e269b5a Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 23 Oct 2024 18:00:40 +1000 Subject: [PATCH 316/741] imgbox modifies files --- src/prep.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/prep.py b/src/prep.py index c81a26952..454bd0b69 100644 --- a/src/prep.py +++ b/src/prep.py @@ -92,7 +92,7 @@ async def check_images_concurrently(self, imagelist, meta): # Function to check each image's URL, host, and log size async def check_and_collect(image_dict): - img_url = image_dict.get('img_url') or image_dict.get('raw_url') + img_url = image_dict.get('raw_url') if not img_url: return None @@ -134,7 +134,7 @@ async def check_and_collect(image_dict): if meta.get('trackers') is not None: if isinstance(meta.get('trackers', ''), str): meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] - if 'MTV' in meta['trackers', []]: + if 'MTV' in meta.get('trackers', []): if invalid_host_found: console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/red]") # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list @@ -356,6 +356,7 @@ async def handle_image_list(self, meta, tracker_name): keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") if not keep_images: meta['image_list'] = [] + meta['image_sizes'] = {} console.print(f"[yellow]Images discarded from {tracker_name}.") else: console.print(f"[green]Images retained from {tracker_name}.") @@ -2872,14 +2873,13 @@ async def imgbox_upload(self, chdir, image_glob, meta): console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") return [] # Return empty list in case of failure else: - image_size = os.path.getsize(image) + # Add the uploaded image info to image_list image_dict = { 'web_url': submission['web_url'], 'img_url': submission['thumbnail_url'], 'raw_url': submission['image_url'] } image_list.append(image_dict) - meta['image_sizes'][submission['image_url']] = image_size console.print(f"[green]Successfully uploaded image: {image}") @@ -2887,8 +2887,17 @@ async def imgbox_upload(self, chdir, image_glob, meta): console.print(f"[red]Error during upload for {image}: {str(e)}") return [] # Return empty list in case of error - console.print(f"[green]Successfully uploaded all {len(image_list)} images to imgbox.") - return image_list # Return the complete list when all images are done + # After uploading all images, validate URLs and get sizes + console.print("[blue]Validating images and retrieving their sizes...") + valid_images = await self.check_images_concurrently(image_list, meta) + + if valid_images: + console.print(f"[green]Successfully uploaded and validated {len(valid_images)} images.") + else: + console.print("[red]Failed to validate any images.") + return [] # Return empty list if no valid images + + return valid_images # Return the valid image list after validation except Exception as e: console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") From c79c8a1b3e3a79f70412433497928325f59944bd Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 23 Oct 2024 18:17:45 +1000 Subject: [PATCH 317/741] Reduce imgbox console prints --- src/prep.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 454bd0b69..78e1dd514 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2675,7 +2675,7 @@ def exponential_backoff(retry_count, initial_timeout): # Add imgbox handling here if img_host == "imgbox": try: - console.print("[blue]Uploading images to imgbox...") + # console.print("[blue]Uploading images to imgbox...") # Use the current event loop to run imgbox_upload loop = asyncio.get_event_loop() @@ -2865,7 +2865,7 @@ async def imgbox_upload(self, chdir, image_glob, meta): console.print(f"[debug] Starting upload of {len(image_glob)} images to imgbox...") async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: for image in image_glob: - console.print(f"[blue]Uploading image: {image}") + # console.print(f"[blue]Uploading image: {image}") try: async for submission in gallery.add([image]): @@ -2888,11 +2888,11 @@ async def imgbox_upload(self, chdir, image_glob, meta): return [] # Return empty list in case of error # After uploading all images, validate URLs and get sizes - console.print("[blue]Validating images and retrieving their sizes...") + # console.print("[blue]Validating images and retrieving their sizes...") valid_images = await self.check_images_concurrently(image_list, meta) if valid_images: - console.print(f"[green]Successfully uploaded and validated {len(valid_images)} images.") + console.print(f"[yellow]Successfully uploaded and validated {len(valid_images)} images.") else: console.print("[red]Failed to validate any images.") return [] # Return empty list if no valid images From 237b607420e673f570bb26ab0e88f92fbbb0fb80 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 23 Oct 2024 18:59:04 +1000 Subject: [PATCH 318/741] Aither include dvd remux --- src/trackers/AITHER.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 027fb0a62..761fd34c3 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -123,6 +123,8 @@ async def edit_name(self, meta): media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 resolution = meta.get('resolution') video_codec = meta.get('video_codec') + name_type = meta.get('type', "") + source = meta.get('source', "") if not meta['is_disc']: def has_english_audio(tracks=None, media_info_text=None): @@ -153,7 +155,7 @@ def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): except (FileNotFoundError, KeyError) as e: print(f"Error processing MEDIAINFO.txt: {e}") - if meta['is_disc'] == "DVD": + if meta['is_disc'] == "DVD" or (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {resolution}", 1) aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) From 3762d6475e221afd0f82bb885ae9872daf27b3b6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 10:41:07 +1000 Subject: [PATCH 319/741] Update dupe checking Some API's don't return type, which means remux will match as dupe against non-remux, web against bluray, etc, etc. This PR attempts to add some extra checking to filter out these clear false results from the dupe check. --- src/trackers/COMMON.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index b4da34530..2f0119d03 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -376,10 +376,11 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return ptgen async def filter_dupes(self, dupes, meta): - if meta['debug']: - console.log("[cyan]Pre-filtered dupes") - console.log(dupes) + console.log("[cyan]Pre-filtered dupes") + console.log(dupes) new_dupes = [] + types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} + file_type_present = {t for t in types_to_check if t in meta['type']} for each in dupes: if meta.get('sd', 0) == 1: remove_set = set() @@ -418,6 +419,17 @@ async def filter_dupes(self, dupes, meta): 'in': meta['type'] } ] + + dupe_type_matches = {t for t in types_to_check if t in each.upper()} + if file_type_present: + if not file_type_present.intersection(dupe_type_matches): + console.log(f"[yellow]Excluding result due to type mismatch: {each}") + continue + else: + if dupe_type_matches: + console.log(f"[red]Excluding extra result with new type match: {each}") + continue + for s in search_combos: if s.get('search_for') not in (None, ''): if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): From a3c50a21b9c75ea2dd89acf4197e1520b171e684 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 11:00:07 +1000 Subject: [PATCH 320/741] Fix manual_frames not in old meta --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index 78e1dd514..9602ad8f9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -585,6 +585,8 @@ async def gather_prep(self, meta, mode): else: console.print("Skipping existing search as meta already populated") + if 'manual_frames' not in meta: + meta['manual_frames'] = {} manual_frames = meta['manual_frames'] # Take Screenshots if meta['is_disc'] == "BDMV": From ca72c4fbe71c8b3f65717ae86b235ebc754be5cd Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 11:35:31 +1000 Subject: [PATCH 321/741] Fix webdl matches --- src/trackers/COMMON.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 2f0119d03..4cc372dc7 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -380,12 +380,21 @@ async def filter_dupes(self, dupes, meta): console.log(dupes) new_dupes = [] types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} - file_type_present = {t for t in types_to_check if t in meta['type']} + + normalized_meta_type = {t.replace('-', '').upper() for t in meta['type']} if isinstance(meta['type'], list) else {meta['type'].replace('-', '').upper()} + + file_type_present = {t for t in types_to_check if t in normalized_meta_type} + for each in dupes: if meta.get('sd', 0) == 1: remove_set = set() else: remove_set = set({meta['resolution']}) + + normalized_each_type = each.replace('-', '').upper() + + # console.log(f"normalized results: {normalized_each_type}") + search_combos = [ { 'search': meta['hdr'], @@ -419,10 +428,19 @@ async def filter_dupes(self, dupes, meta): 'in': meta['type'] } ] + # console.log(f"Meta type: {normalized_meta_type}") + # console.log(f"Each type: {normalized_each_type}") + # Check if the type of the dupe matches or is sufficiently similar + dupe_type_matches = {t for t in types_to_check if t in normalized_each_type} - dupe_type_matches = {t for t in types_to_check if t in each.upper()} if file_type_present: - if not file_type_present.intersection(dupe_type_matches): + # Allow WEB-DL and similar matches if types are related (e.g., WEB-DL vs AMZN WEB-DL) + if 'WEBDL' in normalized_meta_type and 'WEBDL' in normalized_each_type: + console.log(f"[green]Allowing result we will catch later: {each}") + # Allow based on matching resolution, HDR, and audio despite type mismatch + elif meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each: + console.log(f"[green]Allowing result we will catch later: {each}") + else: console.log(f"[yellow]Excluding result due to type mismatch: {each}") continue else: From 256a43315af748236771ed0b7e9b1a2a0c79defb Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 11:36:22 +1000 Subject: [PATCH 322/741] add branch to auto docker --- .github/workflows/docker-image.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 48a1fbf9e..8ac8ce36f 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,6 +5,7 @@ on: branches: - master - develop + - dupe-checking workflow_dispatch: env: From 1f72fbc55bae89592ce8e57f7f7af5d2e1a79937 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 12:21:05 +1000 Subject: [PATCH 323/741] More mediainfo --- src/prep.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/prep.py b/src/prep.py index 9602ad8f9..9be3e76c6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -862,10 +862,13 @@ def filter_mediainfo(data): "FrameRate_Num": track.get("FrameRate_Num", {}), "FrameRate_Den": track.get("FrameRate_Den", {}), "FrameCount": track.get("FrameCount", {}), + "Standard": track.get("Standard", {}), "ColorSpace": track.get("ColorSpace", {}), "ChromaSubsampling": track.get("ChromaSubsampling", {}), "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position", {}), "BitDepth": track.get("BitDepth", {}), + "ScanType": track.get("ScanType", {}), + "ScanOrder": track.get("ScanOrder", {}), "Delay": track.get("Delay", {}), "Delay_Source": track.get("Delay_Source", {}), "StreamSize": track.get("StreamSize", {}), From 47a3366af47d4132117f8f0677f1583bf9fd2a30 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 18:35:08 +1000 Subject: [PATCH 324/741] Add LaserDisc as a source option `--source LaserDisc` Closes https://github.com/Audionut/Upload-Assistant/issues/94 --- src/args.py | 2 +- src/prep.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/args.py b/src/args.py index 51264fdbc..eee44f3a0 100644 --- a/src/args.py +++ b/src/args.py @@ -25,7 +25,7 @@ def parse(self, args, meta): parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) - parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV'], dest="manual_source") + parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') parser.add_argument('-imdb', '--imdb', nargs='*', required=False, help="IMDb ID", type=str) diff --git a/src/prep.py b/src/prep.py index 9be3e76c6..ecff10a00 100644 --- a/src/prep.py +++ b/src/prep.py @@ -669,7 +669,11 @@ async def gather_prep(self, meta, mode): if meta.get('no_tag', False): meta['tag'] = "" meta['3D'] = self.is_3d(mi, bdinfo) - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + if meta.get('manual_source', None): + meta['source'] = meta['manual_source'] + _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + else: + meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) elif meta.get('service'): @@ -2149,8 +2153,6 @@ def get_source(self, type, video, path, is_disc, meta): source = guessit(path)['source'] except Exception: source = "BluRay" - if meta.get('manual_source', None): - source = meta['manual_source'] if source in ("Blu-ray", "Ultra HD Blu-ray", "BluRay", "BR") or is_disc == "BDMV": if type == "DISC": source = "Blu-ray" From a1afe3527db3df2df65d7249fcb7a595605342b4 Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:22:10 +0100 Subject: [PATCH 325/741] Create TVC Adding TVC --- src/trackers/TVC | 450 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 450 insertions(+) create mode 100644 src/trackers/TVC diff --git a/src/trackers/TVC b/src/trackers/TVC new file mode 100644 index 000000000..cd26cb731 --- /dev/null +++ b/src/trackers/TVC @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import traceback +import cli_ui +import os +from src.bbcode import BBCODE +import json + +from src.trackers.COMMON import COMMON +from src.console import console + + +class TVC(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + ############################################################### + ######## EDIT ME ######## + ############################################################### + + # ALSO EDIT CLASS NAME ABOVE + + def __init__(self, config): + self.config = config + self.tracker = 'TVC' + self.source_flag = 'TVCHAOS' + self.upload_url = 'https://tvchaosuk.com/api/torrents/upload' + self.search_url = 'https://tvchaosuk.com/api/torrents/filter' + self.signature = "" + self.banned_groups = [''] + self.images = { + "imdb_75": 'https://i.imgur.com/Mux5ObG.png', + "tmdb_75": 'https://i.imgur.com/r3QzUbk.png', + "tvdb_75": 'https://i.imgur.com/UWtUme4.png', + "tvmaze_75": 'https://i.imgur.com/ZHEF5nE.png', + "mal_75": 'https://i.imgur.com/PBfdP3M.png' + } + + pass + + async def get_cat_id(self, genres): + # Note sections are based on Genre not type, source, resolution etc.. + self.tv_types = ["comedy", "documentary", "drama", "entertainment", "factual", "foreign", "kids", "movies", "News", "radio", "reality", "soaps", "sci-fi", "sport", "holding bin"] + self.tv_types_ids = ["29", "5", "11", "14", "19", "42", "32", "44", "45", "51", "52", "30", "33", "42", "53"] + + genres = genres.split(', ') + if len(genres) >= 1: + for i in genres: + g = i.lower().replace(',', '') + for s in self.tv_types: + if s.__contains__(g): + return self.tv_types_ids[self.tv_types.index(s)] + + + # returning 14 as that is holding bin/misc + return self.tv_types_ids[14] + + async def get_res_id(self, tv_pack, resolution): + if tv_pack: + resolution_id = { + '1080p': 'HD1080p Pack', + '1080i': 'HD1080p Pack', + '720p': 'HD720p Pack', + '576p': 'SD Pack', + '576i': 'SD Pack', + '540p': 'SD Pack', + '540i': 'SD Pack', + '480p': 'SD Pack', + '480i': 'SD Pack' + }.get(resolution, 'SD') + else: + resolution_id = { + '1080p': 'HD1080p', + '1080i':'HD1080p', + '720p': 'HD720p', + '576p': 'SD', + '576i': 'SD', + '540p': 'SD', + '540': 'SD', + '480p': 'SD', + '480i': 'SD' + }.get(resolution, 'SD') + return resolution_id + + ############################################################### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ############################################################### + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await self.get_tmdb_data(meta) + if meta['category'] == 'TV': + cat_id = await self.get_cat_id(meta['genres']) + else: + cat_id = 44 + # type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['tv_pack'] if 'tv_pack' in meta else 0, meta['resolution']) + await self.unit3d_edit_desc(meta, self.tracker, self.signature) + + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] != None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + + if meta['type'] == "ENCODE" and (str(meta['path']).lower().__contains__("bluray") or str(meta['path']).lower().__contains__("brrip") or str(meta['path']).lower().__contains__("bdrip")): + type = "BRRip" + else: + type = meta['type'].replace('WEBDL', 'WEB-DL') + + # Naming as per TVC rules. Site has unusual naming conventions. + if meta['category'] == "MOVIE": + tvc_name = f"{meta['title']} ({meta['year']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" + else: + if meta['search_year'] != "": + year = meta['year'] + else: + year = "" + if meta.get('no_season', False) == True: + season = '' + if meta.get('no_year', False) == True: + year = '' + + if meta['category'] == "TV": + if meta['tv_pack']: + # seasons called series here. + tvc_name = f"{meta['title']} ({meta['year'] if 'season_air_first_date' and len(meta['season_air_first_date']) >= 4 else meta['season_air_first_date'][:4]}) Series {meta['season_int']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + else: + if 'episode_airdate' in meta: + tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} ({meta['episode_airdate']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + else: + tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + if not meta['is_disc']: + self.get_subs_info(meta, mi) + + if 'eng_subs' in meta and meta['eng_subs']: + tvc_name = tvc_name.replace(']', ' SUBS]') + if 'sdh_subs' in meta and meta['eng_subs']: + if 'eng_subs' in meta and meta['eng_subs']: + tvc_name = tvc_name.replace(' SUBS]', ' (ENG + SDH SUBS)]') + else: + tvc_name = tvc_name.replace(']', ' (SDH SUBS)]') + + if 'origin_country_code' in meta: + if "IE" in meta['origin_country_code']: + tvc_name += " [IRL]" + elif "AU" in meta['origin_country_code']: + tvc_name += " [AUS]" + elif "NZ" in meta['origin_country_code']: + tvc_name += " [NZ]" + elif "CA" in meta['origin_country_code']: + tvc_name += " [CA]" + + if meta.get('unattended', False) == False: + upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) + + if not upload_to_tvc: + tvc_name = cli_ui.ask_string("Please enter New Name:") + upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) + + data = { + 'name' : tvc_name, + # newline does not seem to work on this site for some reason. if you edit and save it again they will but not if pushed by api + 'description' : desc.replace('\n', '
').replace('\r', '
'), + 'mediainfo' : mi_dump, + 'bdinfo' : bd_dump, + 'category_id' : cat_id, + 'type' : resolution_id, + # 'resolution_id' : resolution_id, + 'tmdb' : meta['tmdb'], + 'imdb' : meta['imdb_id'].replace('tt', ''), + 'tvdb' : meta['tvdb_id'], + 'mal' : meta['mal_id'], + 'igdb' : 0, + 'anonymous' : anon, + 'stream' : meta['stream'], + 'sd' : meta['sd'], + 'keywords' : meta['keywords'], + 'personal_release' : int(meta.get('personalrelease', False)), + 'internal' : 0, + 'featured' : 0, + 'free' : 0, + 'doubleup' : 0, + 'sticky' : 0, + } + + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' + } + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + if 'upload_to_tvc' in locals() and upload_to_tvc == False: + return + + if meta['debug'] == False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + # some reason this does not return json instead it returns something like below. + # b'application/x-bittorrent\n{"success":true,"data":"https:\\/\\/tvchaosuk.com\\/torrent\\/download\\/164633.REDACTED","message":"Torrent uploaded successfully."}' + # so you need to convert text to json. + json_data = json.loads(response.text.strip('application/x-bittorrent\n')) + console.print(json_data) + + # adding torrent link to torrent as comment + t_id = json_data['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, + self.config['TRACKERS'][self.tracker].get('announce_url'), + "https://tvchaosuk.com/torrents/" + t_id) + + except: + console.print(traceback.print_exc()) + console.print("It may have uploaded, go check") + console.print(response.text.strip('application/x-bittorrent\n')) + return + else: + console.print(f"[cyan]Request Data:") + console.print(data) + open_torrent.close() + + + async def get_tmdb_data(self, meta): + import tmdbsimple as tmdb + if meta['category'] == "MOVIE": + movie = tmdb.Movies(meta['tmdb']) + response = movie.info() + else: + tv = tmdb.TV(meta['tmdb']) + response = tv.info() + + #### TVC stuff + if meta['category'] == "TV": + if hasattr(tv, 'release_dates'): + meta['release_dates'] = tv.release_dates() + + if hasattr(tv, 'networks') and len(tv.networks) != 0 and 'name' in tv.networks[0]: + meta['networks'] = tv.networks[0]['name'] + + try: + if 'tv_pack' in meta and not meta['tv_pack']: + episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() + + meta['episode_airdate'] = episode_info['air_date'] + meta['episode_name'] = episode_info['name'] + meta['episode_overview'] = episode_info['overview'] + if 'tv_pack' in meta and meta['tv_pack']: + season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() + meta['season_air_first_date'] = season_info['air_date'] + + if hasattr(tv, 'first_air_date'): + meta['first_air_date'] = tv.first_air_date + except: + console.print(traceback.print_exc()) + console.print(f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB. \nhttps://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}/season/{meta['season_int']}") + meta['season_air_first_date'] = str({meta["year"]}) + "-N/A-N/A" + meta['first_air_date'] = str({meta["year"]}) + "-N/A-N/A" + + meta['origin_country_code'] = [] + if 'origin_country' in response: + if type(response['origin_country']) == list: + for i in response['origin_country']: + meta['origin_country_code'].append(i) + else: + meta['origin_country_code'].append(response['origin_country']) + print(type(response['origin_country'])) + + elif len(response['production_countries']): + for i in response['production_countries']: + if 'iso_3166_1' in i: + meta['origin_country_code'].append(i['iso_3166_1']) + elif len(response['production_companies']): + meta['origin_country_code'].append(response['production_companies'][0]['origin_country']) + + + async def search_existing(self, meta, disctype): + # Search on TVCUK has been DISABLED due to issues + # leaving code here for future use when it is re-enabled + console.print("[RED]Search API has been disabled. This will fail and thats normal...") + # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 + + + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb' : meta['tmdb'], + 'name' : "" + } + + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + if "message" in response and response["message"] == "No Torrents Found": + return + else: + for each in response['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + except: + console.print(response) + console.print(self.search_url, params) + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + + async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w') as descfile: + bbcode = BBCODE() + if meta.get('discs', []) != []: + discs = meta['discs'] + if discs[0]['type'] == "DVD": + descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") + descfile.write("\n") + if len(discs) >= 2: + for each in discs[1:]: + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") + descfile.write("\n") + if each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") + descfile.write("\n") + desc = "" + + #release info + rd_info = "" + # getting movie release info + if meta['category'] != "TV" and 'release_dates' in meta: + for cc in meta['release_dates']['results']: + for rd in cc['release_dates']: + if rd['type'] == 6: + channel = str(rd['note']) if str(rd['note']) != "" else "N/A Channel" + rd_info += "[color=orange][size=15]" + cc['iso_3166_1'] + " TV Release info [/size][/color]" + "\n" + str(rd['release_date'])[:10] + " on " + channel + "\n" + # movie release info adding + if rd_info != "": + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += rd_info + "\n\n" + # getting season release info. need to fix so it gets season info instead of first episode info. + elif meta['category'] == "TV" and meta['tv_pack'] == 1 and 'first_air_date' in meta: + channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += f"[color=orange][size=15]First episode of this season aired {meta['season_air_first_date']} on channel {channel}[/size][/color]" + "\n\n" + elif meta['category'] == "TV" and meta['tv_pack'] != 1 and 'episode_airdate' in meta: + channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += f"[color=orange][size=15]Episode aired on channel {channel} on {meta['episode_airdate']}[/size][/color]" + "\n\n" + else: + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += "[color=orange][size=15]TMDB has No TV release info for this[/size][/color]"+ "\n\n" + + if meta['category'] == 'TV' and meta['tv_pack'] != 1 and 'episode_overview' in meta: + desc += "[color=green][size=25]PLOT[/size][/color]" + "\n\n" + "[color=green][size=25]PLOT[/size][/color]\n" + "Episode Name: " + str(meta['episode_name']) + "\n" + str(meta['episode_overview'] + "\n\n") + else: + desc += "[color=green][size=25]PLOT[/size][/color]" + "\n" + str(meta['overview'] + "\n\n") + # Max two screenshots as per rules + if len(base) > 2 and meta['description'] != "PTP": + desc += "[color=green][size=25]Notes/Extra Info[/size][/color]" + " \n \n" + str(base) + " \n \n " + desc += self.get_links(meta, "[color=green][size=25]", "[/size][/COLOR]") + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + if comparison == False: + desc = bbcode.convert_comparison_to_collapse(desc, 1000) + descfile.write(desc) + images = meta['image_list'] + # only adding 2 screens as that is mentioned in rules. + if len(images) > 0 and int(meta['screens']) >= 2: + descfile.write("[color=green][size=25]Screenshots[/size][/color]\n\n[center]") + for each in range(len(images[:2])): + web_url = images[each]['web_url'] + img_url = images[each]['img_url'] + descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") + descfile.write("[/center]") + + if signature != None: + descfile.write(signature) + descfile.close() + return + + def get_links(self, movie, subheading, heading_end): + description = "" + description += "\n\n" + subheading + "Links" + heading_end + "\n" + if movie['imdb_id'] != "0": + description += f"[URL=https://www.imdb.com/title/tt{movie['imdb_id']}][img]{self.images['imdb_75']}[/img][/URL]" + if movie['tmdb'] != "0": + description += f" [URL=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.images['tmdb_75']}[/img][/URL]" + if movie['tvdb_id'] != 0: + description += f" [URL=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.images['tvdb_75']}[/img][/URL]" + if movie['tvmaze_id'] != 0: + description += f" [URL=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.images['tvmaze_75']}[/img][/URL]" + if movie['mal_id'] != 0: + description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.images['mal_75']}[/img][/URL]" + return description + " \n \n " + + + ## my get subs function + ## used in naming conventions + def get_subs_info(self, meta, mi): + subs = "" + subs_num = 0 + for s in mi.get("media").get("track"): + if s["@type"] == "Text": + subs_num = subs_num + 1 + if subs_num >= 1: + meta['has_subs'] = 1 + else: + meta['has_subs'] = 0 + for s in mi.get("media").get("track"): + if s["@type"] == "Text": + if "Language_String" in s: + if not subs_num <= 0: + subs = subs + s["Language_String"] + ", " + ## checking if it has romanian subs as for data scene. + if s["Language_String"] == "Romanian": + #console.print("it has romanian subs", 'grey', 'on_green') + meta['ro_sub'] = 1 + if str(s["Language_String"]).lower().__contains__("english"): + meta['eng_subs'] = 1 + if str(s).lower().__contains__("sdh"): + meta['sdh_subs'] = 1 + + return + ## my get summary function^^^^ From c2fce4de4cf85f7e1cca7f6c61201531ef3bd2c1 Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:23:34 +0100 Subject: [PATCH 326/741] Update upload.py --- upload.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index bf8b98c6b..3d68002c9 100644 --- a/upload.py +++ b/upload.py @@ -40,6 +40,7 @@ from src.trackers.AL import AL from src.trackers.SHRI import SHRI from src.trackers.TIK import TIK +from src.trackers.TVC import TVC from src.trackers.PSS import PSS from src.trackers.ULCX import ULCX import json @@ -236,11 +237,11 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX', 'BHD'] + 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'TVC', 'PSS', 'ULCX', 'BHD'] other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { - 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, + 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, 'TVC': TVC, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX} From 43131b59d7dd92c777e2bbf275120aac269cdb5d Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:27:02 +0100 Subject: [PATCH 327/741] Rename TVC to TVC.py --- src/trackers/{TVC => TVC.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/trackers/{TVC => TVC.py} (100%) diff --git a/src/trackers/TVC b/src/trackers/TVC.py similarity index 100% rename from src/trackers/TVC rename to src/trackers/TVC.py From e8b1ddc747eed6089ab33c7c743322e2db10b3e2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 20:34:04 +1000 Subject: [PATCH 328/741] imbgg correct links --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index ecff10a00..1d1e88f20 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2745,9 +2745,9 @@ def exponential_backoff(retry_count, initial_timeout): } response = requests.post(url, data=data, timeout=timeout) response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url + img_url = response['data'].get('medium', response['data']['image'])['url'] + raw_url = response['data']['image']['url'] + web_url = response['data']['url_viewer'] upload_success = True elif img_host == "ptscreens": From 93af18556b2ca4ecf8a60ff9ba92a97f8cd828f0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 24 Oct 2024 20:56:27 +1000 Subject: [PATCH 329/741] Lint --- src/trackers/TVC.py | 221 +++++++++++++++++++++----------------------- upload.py | 4 +- 2 files changed, 105 insertions(+), 120 deletions(-) diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py index cd26cb731..486336ec2 100644 --- a/src/trackers/TVC.py +++ b/src/trackers/TVC.py @@ -22,12 +22,6 @@ class TVC(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'TVC' @@ -36,7 +30,7 @@ def __init__(self, config): self.search_url = 'https://tvchaosuk.com/api/torrents/filter' self.signature = "" self.banned_groups = [''] - self.images = { + self.images = { "imdb_75": 'https://i.imgur.com/Mux5ObG.png', "tmdb_75": 'https://i.imgur.com/r3QzUbk.png', "tvdb_75": 'https://i.imgur.com/UWtUme4.png', @@ -45,7 +39,7 @@ def __init__(self, config): } pass - + async def get_cat_id(self, genres): # Note sections are based on Genre not type, source, resolution etc.. self.tv_types = ["comedy", "documentary", "drama", "entertainment", "factual", "foreign", "kids", "movies", "News", "radio", "reality", "soaps", "sci-fi", "sport", "holding bin"] @@ -59,7 +53,6 @@ async def get_cat_id(self, genres): if s.__contains__(g): return self.tv_types_ids[self.tv_types.index(s)] - # returning 14 as that is holding bin/misc return self.tv_types_ids[14] @@ -79,7 +72,7 @@ async def get_res_id(self, tv_pack, resolution): else: resolution_id = { '1080p': 'HD1080p', - '1080i':'HD1080p', + '1080i': 'HD1080p', '720p': 'HD720p', '576p': 'SD', '576i': 'SD', @@ -90,10 +83,6 @@ async def get_res_id(self, tv_pack, resolution): }.get(resolution, 'SD') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -111,7 +100,7 @@ async def upload(self, meta, disctype): else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -134,9 +123,9 @@ async def upload(self, meta, disctype): year = meta['year'] else: year = "" - if meta.get('no_season', False) == True: + if meta.get('no_season', False) is True: season = '' - if meta.get('no_year', False) == True: + if meta.get('no_year', False) is True: year = '' if meta['category'] == "TV": @@ -168,12 +157,12 @@ async def upload(self, meta, disctype): tvc_name += " [IRL]" elif "AU" in meta['origin_country_code']: tvc_name += " [AUS]" - elif "NZ" in meta['origin_country_code']: + elif "NZ" in meta['origin_country_code']: tvc_name += " [NZ]" elif "CA" in meta['origin_country_code']: tvc_name += " [CA]" - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) if not upload_to_tvc: @@ -181,29 +170,29 @@ async def upload(self, meta, disctype): upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) data = { - 'name' : tvc_name, + 'name': tvc_name, # newline does not seem to work on this site for some reason. if you edit and save it again they will but not if pushed by api - 'description' : desc.replace('\n', '
').replace('\r', '
'), - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type' : resolution_id, - # 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'description': desc.replace('\n', '
').replace('\r', '
'), + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type': resolution_id, + # 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } if meta.get('category') == "TV": @@ -213,12 +202,12 @@ async def upload(self, meta, disctype): 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if 'upload_to_tvc' in locals() and upload_to_tvc == False: + if 'upload_to_tvc' in locals() and upload_to_tvc is False: return - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: # some reason this does not return json instead it returns something like below. @@ -226,90 +215,88 @@ async def upload(self, meta, disctype): # so you need to convert text to json. json_data = json.loads(response.text.strip('application/x-bittorrent\n')) console.print(json_data) - + # adding torrent link to torrent as comment t_id = json_data['data'].split(".")[1].split("/")[3] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://tvchaosuk.com/torrents/" + t_id) - except: + except Exception: console.print(traceback.print_exc()) console.print("It may have uploaded, go check") console.print(response.text.strip('application/x-bittorrent\n')) - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - async def get_tmdb_data(self, meta): - import tmdbsimple as tmdb - if meta['category'] == "MOVIE": - movie = tmdb.Movies(meta['tmdb']) - response = movie.info() - else: - tv = tmdb.TV(meta['tmdb']) - response = tv.info() - - #### TVC stuff - if meta['category'] == "TV": - if hasattr(tv, 'release_dates'): - meta['release_dates'] = tv.release_dates() - - if hasattr(tv, 'networks') and len(tv.networks) != 0 and 'name' in tv.networks[0]: - meta['networks'] = tv.networks[0]['name'] - - try: - if 'tv_pack' in meta and not meta['tv_pack']: - episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() - - meta['episode_airdate'] = episode_info['air_date'] - meta['episode_name'] = episode_info['name'] - meta['episode_overview'] = episode_info['overview'] - if 'tv_pack' in meta and meta['tv_pack']: - season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() - meta['season_air_first_date'] = season_info['air_date'] - - if hasattr(tv, 'first_air_date'): - meta['first_air_date'] = tv.first_air_date - except: - console.print(traceback.print_exc()) - console.print(f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB. \nhttps://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}/season/{meta['season_int']}") - meta['season_air_first_date'] = str({meta["year"]}) + "-N/A-N/A" - meta['first_air_date'] = str({meta["year"]}) + "-N/A-N/A" - - meta['origin_country_code'] = [] - if 'origin_country' in response: - if type(response['origin_country']) == list: - for i in response['origin_country']: - meta['origin_country_code'].append(i) - else: - meta['origin_country_code'].append(response['origin_country']) - print(type(response['origin_country'])) - - elif len(response['production_countries']): - for i in response['production_countries']: - if 'iso_3166_1' in i: - meta['origin_country_code'].append(i['iso_3166_1']) - elif len(response['production_companies']): - meta['origin_country_code'].append(response['production_companies'][0]['origin_country']) + import tmdbsimple as tmdb + if meta['category'] == "MOVIE": + movie = tmdb.Movies(meta['tmdb']) + response = movie.info() + else: + tv = tmdb.TV(meta['tmdb']) + response = tv.info() + + # TVC stuff + if meta['category'] == "TV": + if hasattr(tv, 'release_dates'): + meta['release_dates'] = tv.release_dates() + + if hasattr(tv, 'networks') and len(tv.networks) != 0 and 'name' in tv.networks[0]: + meta['networks'] = tv.networks[0]['name'] + + try: + if 'tv_pack' in meta and not meta['tv_pack']: + episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() + + meta['episode_airdate'] = episode_info['air_date'] + meta['episode_name'] = episode_info['name'] + meta['episode_overview'] = episode_info['overview'] + if 'tv_pack' in meta and meta['tv_pack']: + season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() + meta['season_air_first_date'] = season_info['air_date'] + + if hasattr(tv, 'first_air_date'): + meta['first_air_date'] = tv.first_air_date + except Exception: + console.print(traceback.print_exc()) + console.print(f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB. \nhttps://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}/season/{meta['season_int']}") + meta['season_air_first_date'] = str({meta["year"]}) + "-N/A-N/A" + meta['first_air_date'] = str({meta["year"]}) + "-N/A-N/A" + + meta['origin_country_code'] = [] + if 'origin_country' in response: + if isinstance(response['origin_country'], list): + for i in response['origin_country']: + meta['origin_country_code'].append(i) + else: + meta['origin_country_code'].append(response['origin_country']) + print(type(response['origin_country'])) + elif len(response['production_countries']): + for i in response['production_countries']: + if 'iso_3166_1' in i: + meta['origin_country_code'].append(i['iso_3166_1']) + elif len(response['production_companies']): + meta['origin_country_code'].append(response['production_companies'][0]['origin_country']) async def search_existing(self, meta, disctype): # Search on TVCUK has been DISABLED due to issues # leaving code here for future use when it is re-enabled - console.print("[RED]Search API has been disabled. This will fail and thats normal...") + console.print("[RED]Cannot search for dupes as search api is not working...") + console.print("[RED]Please make sure you are not uploading duplicates.") # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 - dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'name': "" } try: @@ -321,7 +308,7 @@ async def search_existing(self, meta, disctype): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print(response) console.print(self.search_url, params) console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') @@ -329,7 +316,6 @@ async def search_existing(self, meta, disctype): return dupes - async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w') as descfile: @@ -350,14 +336,14 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): descfile.write("\n") desc = "" - #release info + # release info rd_info = "" # getting movie release info if meta['category'] != "TV" and 'release_dates' in meta: for cc in meta['release_dates']['results']: for rd in cc['release_dates']: if rd['type'] == 6: - channel = str(rd['note']) if str(rd['note']) != "" else "N/A Channel" + channel = str(rd['note']) if str(rd['note']) != "" else "N/A Channel" rd_info += "[color=orange][size=15]" + cc['iso_3166_1'] + " TV Release info [/size][/color]" + "\n" + str(rd['release_date'])[:10] + " on " + channel + "\n" # movie release info adding if rd_info != "": @@ -374,19 +360,19 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): desc += f"[color=orange][size=15]Episode aired on channel {channel} on {meta['episode_airdate']}[/size][/color]" + "\n\n" else: desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" - desc += "[color=orange][size=15]TMDB has No TV release info for this[/size][/color]"+ "\n\n" + desc += "[color=orange][size=15]TMDB has No TV release info for this[/size][/color]" + "\n\n" if meta['category'] == 'TV' and meta['tv_pack'] != 1 and 'episode_overview' in meta: desc += "[color=green][size=25]PLOT[/size][/color]" + "\n\n" + "[color=green][size=25]PLOT[/size][/color]\n" + "Episode Name: " + str(meta['episode_name']) + "\n" + str(meta['episode_overview'] + "\n\n") else: - desc += "[color=green][size=25]PLOT[/size][/color]" + "\n" + str(meta['overview'] + "\n\n") + desc += "[color=green][size=25]PLOT[/size][/color]" + "\n" + str(meta['overview'] + "\n\n") # Max two screenshots as per rules if len(base) > 2 and meta['description'] != "PTP": desc += "[color=green][size=25]Notes/Extra Info[/size][/color]" + " \n \n" + str(base) + " \n \n " desc += self.get_links(meta, "[color=green][size=25]", "[/size][/COLOR]") desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) - if comparison == False: + if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) descfile.write(desc) images = meta['image_list'] @@ -399,7 +385,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") descfile.write("[/center]") - if signature != None: + if signature is not None: descfile.write(signature) descfile.close() return @@ -419,9 +405,8 @@ def get_links(self, movie, subheading, heading_end): description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.images['mal_75']}[/img][/URL]" return description + " \n \n " - - ## my get subs function - ## used in naming conventions + # my get subs function + # used in naming conventions def get_subs_info(self, meta, mi): subs = "" subs_num = 0 @@ -437,9 +422,9 @@ def get_subs_info(self, meta, mi): if "Language_String" in s: if not subs_num <= 0: subs = subs + s["Language_String"] + ", " - ## checking if it has romanian subs as for data scene. + # checking if it has romanian subs as for data scene. if s["Language_String"] == "Romanian": - #console.print("it has romanian subs", 'grey', 'on_green') + # console.print("it has romanian subs", 'grey', 'on_green') meta['ro_sub'] = 1 if str(s["Language_String"]).lower().__contains__("english"): meta['eng_subs'] = 1 @@ -447,4 +432,4 @@ def get_subs_info(self, meta, mi): meta['sdh_subs'] = 1 return - ## my get summary function^^^^ + # my get summary function^^^^ diff --git a/upload.py b/upload.py index 3d68002c9..0c73bb377 100644 --- a/upload.py +++ b/upload.py @@ -237,8 +237,8 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'TVC', 'PSS', 'ULCX', 'BHD'] - other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL'] + 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX', 'BHD'] + other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'TVC'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, 'TVC': TVC, From 024514c050ffaff777fdb032e202351334c0fc4f Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:47:20 +0100 Subject: [PATCH 330/741] Update TVC.py colors were in uppercase so didnt actually show in the color as they need to be in lower case. --- src/trackers/TVC.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py index 486336ec2..e16aa27c3 100644 --- a/src/trackers/TVC.py +++ b/src/trackers/TVC.py @@ -224,7 +224,7 @@ async def upload(self, meta, disctype): except Exception: console.print(traceback.print_exc()) - console.print("It may have uploaded, go check") + console.print("[yellow]It may have uploaded, go check") console.print(response.text.strip('application/x-bittorrent\n')) return else: @@ -287,8 +287,8 @@ async def get_tmdb_data(self, meta): async def search_existing(self, meta, disctype): # Search on TVCUK has been DISABLED due to issues # leaving code here for future use when it is re-enabled - console.print("[RED]Cannot search for dupes as search api is not working...") - console.print("[RED]Please make sure you are not uploading duplicates.") + console.print("[red]Cannot search for dupes as search api is not working...") + console.print("[red]Please make sure you are not uploading duplicates.") # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 dupes = [] From b0428c18b5aa7a96d01e8b1c6759bdbdf138ca70 Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:48:26 +0100 Subject: [PATCH 331/741] Update TVC.py --- src/trackers/TVC.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py index e16aa27c3..3e184ae3b 100644 --- a/src/trackers/TVC.py +++ b/src/trackers/TVC.py @@ -405,7 +405,7 @@ def get_links(self, movie, subheading, heading_end): description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.images['mal_75']}[/img][/URL]" return description + " \n \n " - # my get subs function + # get subs function # used in naming conventions def get_subs_info(self, meta, mi): subs = "" @@ -432,4 +432,4 @@ def get_subs_info(self, meta, mi): meta['sdh_subs'] = 1 return - # my get summary function^^^^ + # get subs function^^^^ From c4e072c347bc94f4035473f9f3863664d63cb660 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 08:06:18 +1000 Subject: [PATCH 332/741] Correct ss_times call for discs Fixes https://github.com/Audionut/Upload-Assistant/issues/100 --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 1d1e88f20..0db305e87 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1171,7 +1171,7 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) ( ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) + .input(file, ss=ss_times[i], skip_frame=keyframe) .output(image, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) @@ -1311,7 +1311,7 @@ def _is_vob_good(n, loops, num_screens): voblength, n = _is_vob_good(n, 0, num_screens) # img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) + ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[i]) if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) ( From 66859928dfcd086bd4a6a8b1bf144dd5049957a7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 11:32:07 +1000 Subject: [PATCH 333/741] MTV + imgbox fixes https://github.com/Audionut/Upload-Assistant/issues/99 --- src/prep.py | 9 +++---- src/trackers/MTV.py | 66 ++++++++++++++++++++++++++++----------------- 2 files changed, 45 insertions(+), 30 deletions(-) diff --git a/src/prep.py b/src/prep.py index 0db305e87..f959a831d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2688,7 +2688,7 @@ def exponential_backoff(retry_count, initial_timeout): loop = asyncio.get_event_loop() # Run the imgbox upload in the current event loop - image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob, meta)) # Pass all images + image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob, meta, return_dict)) # Pass all images # Ensure the image_list contains valid URLs before continuing if image_list and all('img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list): @@ -2839,6 +2839,7 @@ def exponential_backoff(retry_count, initial_timeout): progress.advance(upload_task) i += 1 # Increment the image counter only after success + return_dict['image_list'] = image_list break # Break retry loop after a successful upload except Exception as e: @@ -2864,7 +2865,7 @@ def exponential_backoff(retry_count, initial_timeout): return image_list, i - async def imgbox_upload(self, chdir, image_glob, meta): + async def imgbox_upload(self, chdir, image_glob, meta, return_dict): try: os.chdir(chdir) image_list = [] @@ -2872,8 +2873,6 @@ async def imgbox_upload(self, chdir, image_glob, meta): console.print(f"[debug] Starting upload of {len(image_glob)} images to imgbox...") async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: for image in image_glob: - # console.print(f"[blue]Uploading image: {image}") - try: async for submission in gallery.add([image]): if not submission['success']: @@ -2895,11 +2894,11 @@ async def imgbox_upload(self, chdir, image_glob, meta): return [] # Return empty list in case of error # After uploading all images, validate URLs and get sizes - # console.print("[blue]Validating images and retrieving their sizes...") valid_images = await self.check_images_concurrently(image_list, meta) if valid_images: console.print(f"[yellow]Successfully uploaded and validated {len(valid_images)} images.") + return_dict['image_list'] = valid_images # Set the validated images in return_dict else: console.print("[red]Failed to validate any images.") return [] # Return empty list if no valid images diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index e9fc7c6b9..c77b49908 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -83,9 +83,10 @@ def enforce_size_limit(image_list, image_sizes): else: # Proceed with the retry logic if images are not hosted on an approved image host + images_reuploaded = False while img_host_index <= len(approved_image_hosts): # Call handle_image_upload and pass the updated meta with the current image host index - image_list, retry_mode = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + image_list, retry_mode, images_reuploaded = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) # If retry_mode is True, switch to the next host if retry_mode: @@ -155,7 +156,7 @@ def enforce_size_limit(image_list, image_sizes): des_tags = await self.get_tags(meta) # Edit description and other details - await self.edit_desc(meta) + await self.edit_desc(meta, images_reuploaded) group_desc = await self.edit_group_desc(meta) mtv_name = await self.edit_name(meta) @@ -219,25 +220,30 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox'] - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + retry_mode = False + images_reuploaded = False - if not current_img_host or current_img_host not in approved_image_hosts: - console.print("[red]Your preferred image host is not supported at MTV, re-uploading to an allowed image host.") - retry_mode = True # Ensure retry_mode is set to True when switching hosts - meta['imghost'] = approved_image_hosts[0] # Switch to the first approved host - else: - meta['imghost'] = current_img_host - retry_mode = False # Start with retry_mode False unless we know we need to switch + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") + retry_mode = True # Ensure retry_mode is set to True when switching hosts + images_reuploaded = True # Mark that reuploading occurred + img_host_index += 1 # Move to the next image host in the config + continue + else: + meta['imghost'] = current_img_host + break # Exit the loop when a valid host is found from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - - # Screenshot and upload process - prep.screenshots(Path(meta['path']), meta['name'], meta['uuid'], meta['base_dir'], meta) return_dict = {} - - # Call upload_screens with the appropriate retry_mode prep.upload_screens( meta, screens=meta['screens'], @@ -249,35 +255,45 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts retry_mode=retry_mode # Honor the retry_mode flag passed in ) - # Update meta['image_list'] with uploaded images - meta['image_list'] = return_dict.get('image_list', []) + # Overwrite meta['image_list'] with the newly uploaded images + new_image_list = return_dict.get('image_list', []) + if new_image_list: + meta['image_list'] = new_image_list # Overwrite with new images # Ensure images are from approved hosts if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - return meta['image_list'], True # Trigger retry_mode if switching hosts + return meta['image_list'], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta['image_list'], False # No need to retry, successful upload + return meta['image_list'], False, images_reuploaded # Return retry_mode and images_reuploaded - async def edit_desc(self, meta): + async def edit_desc(self, meta, images_reuploaded): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: - # adding bd_dump to description if it exits and adding empty string to mediainfo if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()[:-65].strip() bd_dump = None + if bd_dump: desc.write("[mediainfo]" + bd_dump + "[/mediainfo]\n\n") elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") + images = meta['image_list'] if len(images) > 0: - for each in range(len(images)): - raw_url = images[each]['raw_url'] - desc.write(f"[img={raw_url}][/img]\n") + for image in images: + raw_url = image['raw_url'] + img_url = image['img_url'] + + if images_reuploaded: + desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]\n") + else: + desc.write(f"[img={raw_url}][/img]\n") + desc.write(f"\n\n{base}") desc.close() return From b32aa5550c96eea993bf4e9191fdf11eeaa8d092 Mon Sep 17 00:00:00 2001 From: swannie-eire <57441681+swannie-eire@users.noreply.github.com> Date: Fri, 25 Oct 2024 02:46:39 +0100 Subject: [PATCH 334/741] adding speedapp (#98) * test * Update upload.py * Update SPD.py * Update SPD.py * Update SPD.py * Update SPD.py * Update example-config.py updating config with tvc and speedapp * Update example-config.py * Lint --------- Co-authored-by: Audionut --- data/example-config.py | 10 ++- src/trackers/SPD.py | 153 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 3 files changed, 165 insertions(+), 3 deletions(-) create mode 100644 src/trackers/SPD.py diff --git a/data/example-config.py b/data/example-config.py index 849099081..8e628821a 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -214,7 +214,6 @@ "password": "password", "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', "announce_url": "get from upload page", - # "tag": "RetroFlix, nd", "anon": True }, "RF": { @@ -254,6 +253,10 @@ "announce_url": "https://shareisland.org/announce/customannounceurl", # "anon" : "False" }, + "SPD": { + "api_key": "SPEEDAPP API KEY", + "announce_url": "https://ramjet.speedapp.io//announce", + }, "TIK": { "useAPI": False, # Set to True if using TIK "api_key": "", @@ -261,6 +264,11 @@ "anon": False, "modq": True, }, + "TVC": { + "api_key": "TVC API Key", + "announce_url": "https://tvchaosuk.com/announce/", + "anon": "False" + }, "PSS": { "api_key": "PSS api key", "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", diff --git a/src/trackers/SPD.py b/src/trackers/SPD.py new file mode 100644 index 000000000..b5893da1f --- /dev/null +++ b/src/trackers/SPD.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +from torf import Torrent +import requests +from src.console import console +from pprint import pprint +import base64 +import shutil +import os +import traceback + +from src.trackers.COMMON import COMMON + + +# from pprint import pprint + +class SPD(): + + def __init__(self, config): + self.url = "https://speedapp.io" + self.config = config + self.tracker = 'SPD' + self.source_flag = 'speedapp.io' + self.search_url = 'https://speedapp.io/api/torrent' + self.upload_url = 'https://speedapp.io/api/upload' + self.forum_link = 'https://speedapp.io/support/wiki/rules' + self.banned_groups = [''] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + type_id = "" + if meta['anime']: + type_id = '3' + elif meta['category'] == 'TV': + if meta['tv_pack']: + type_id = '41' + elif meta['sd'] and not meta['tv_pack']: + type_id = '45' + # must be hd + else: + type_id = '43' + else: + if meta['type'] != "DISC" and meta['resolution'] == "2160p": + type_id = '61' + else: + type_id = { + 'DISC': '17', + 'REMUX': '8', + 'WEBDL': '8', + 'WEBRIP': '8', + 'HDTV': '8', + 'SD': '10', + 'ENCODE': '8' + }.get(type, '0') + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + bd_dump = None + screenshots = [] + if len(meta['image_list']) != 0: + for image in meta['image_list']: + screenshots.append(image['raw_url']) + data = { + 'name': meta['name'].replace("'", '').replace(': ', '.').replace(':', '.').replace(' ', '.').replace(' ', '.').replace('DD+', 'DDP'), + 'screenshots': screenshots, + 'release_info': f"[center][url={self.forum_link}]Please seed[/url][/center]", + 'media_info': mi_dump, + 'bd_info': bd_dump, + 'type': type_id, + 'url': f"https://www.imdb.com/title/tt{meta['imdb_id']}", + 'shortDescription': meta['genres'], + 'keywords': meta['keywords'], + 'releaseInfo': self.forum_link + } + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as binary_file: + binary_file_data = binary_file.read() + base64_encoded_data = base64.b64encode(binary_file_data) + base64_message = base64_encoded_data.decode('utf-8') + data['file'] = base64_message + + headers = {'Authorization': 'Bearer ' + self.config['TRACKERS'][self.tracker]['api_key'].strip()} + + if meta['debug'] is False: + response = requests.request("POST", url=self.upload_url, json=data, headers=headers) + try: + print(response.json()) + # response = {'status': True, 'error': False, 'downloadUrl': '/api/torrent/383435/download', 'torrent': {'id': 383435, 'name': 'name-with-full-stops', 'slug': 'name-with-dashs', 'category_id': 3}} + # downloading the torrent from site as it adds a tonne of different trackers and the source is different all the time. + try: + # torrent may not dl and may not provide error if machine is under load or network connection usage high. + with requests.get(url=self.url + response.json()['downloadUrl'], stream=True, headers=headers) as r: + # replacing L4g/torf created torrent so it will be added to the client. + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + 'wb') as f: + shutil.copyfileobj(r.raw, f) + # adding as comment link to torrent + if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent"): + new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent") + new_torrent.metainfo['comment'] = f"{self.url}/browse/{response.json()['torrent']['id']}" + Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", overwrite=True) + except Exception: + console.print(traceback.print_exc()) + console.print("[red]Unable to Download torrent, try manually") + except Exception: + console.print(traceback.print_exc()) + console.print("[yellow]Unable to Download torrent, try manually") + return + else: + console.print("[cyan]Request Data:") + pprint(data) + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'FANRES': '3' + }.get(category_name, '0') + return category_id + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + headers = { + 'accept': 'application/json', + 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + } + + params = { + 'includingDead': '1' + } + + if meta['imdb_id'] != "0": + params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] + else: + params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') + + try: + response = requests.get(url=self.search_url, params=params, headers=headers) + response = response.json() + for each in response: + result = [each][0]['name'] + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/upload.py b/upload.py index 0c73bb377..300b77ea1 100644 --- a/upload.py +++ b/upload.py @@ -43,6 +43,7 @@ from src.trackers.TVC import TVC from src.trackers.PSS import PSS from src.trackers.ULCX import ULCX +from src.trackers.SPD import SPD import json from pathlib import Path import asyncio @@ -238,13 +239,13 @@ async def do_the_thing(base_dir): common = COMMON(config=config) api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX', 'BHD'] - other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'TVC'] + other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'TVC', 'SPD'] http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, 'TVC': TVC, 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, - 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX} + 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX, 'SPD': SPD} tracker_capabilities = { 'LST': {'mod_q': True, 'draft': True}, From ddfaa9f219ac537cbf5cd8b319e440c197151dc3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 11:49:52 +1000 Subject: [PATCH 335/741] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ec02dd2b8..c9ea3eb33 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS/ULCX + - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS/ULCX/SPD/TVC - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs From caacf5a56da1fa4e7d9eed91c7258e7633902e00 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 12:38:44 +1000 Subject: [PATCH 336/741] ANT piece size gap --- src/prep.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index f959a831d..94e395d5e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2503,11 +2503,21 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): # Adjust the piece size to fit within the constraints while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit - if num_pieces < 1000: + if num_pieces < 1000 and torrent_file_size >= 102400: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break + elif num_pieces < 1000: piece_size //= 2 if piece_size < our_min_size: piece_size = our_min_size break + elif piece_size > 18000000 and torrent_file_size >= 102400: + piece_size *= 2 + if piece_size > our_max_size: + piece_size = our_max_size + break elif torrent_file_size > 61440: # Break if .torrent size exceeds 60 KiB break elif num_pieces > 2000: From 0164d9c27505d406bb370fd5ede4486a03a3cfea Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 14:47:07 +1000 Subject: [PATCH 337/741] FIX repack handling in edition Also fix multiple editions, `--edition Director's Cut REPACK` --- src/prep.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 94e395d5e..3ea33745d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2429,9 +2429,12 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): edition = edition + " Open Matte" if manual_edition: + if isinstance(manual_edition, list): + manual_edition = " ".join(manual_edition) edition = str(manual_edition) + edition = edition.replace(",", " ") - print(f"Edition After Manual Edition: {edition}") + # print(f"Edition After Manual Edition: {edition}") if "REPACK" in (video or edition.upper()) or "V2" in video: repack = "REPACK" @@ -2444,14 +2447,16 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): if "RERIP" in (video or edition.upper()): repack = "RERIP" - print(f"Repack after Checks: {repack}") + # print(f"Repack after Checks: {repack}") # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition - edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() + if not manual_edition or all(tag.lower() not in ['repack', 'repack2', 'repack3', 'proper', 'rerip'] for tag in manual_edition.strip().lower().split()): + edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() + print(f"Final Edition: {edition}") bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: - edition = "" + edition = re.sub(r'\b(?:' + '|'.join(bad) + r')\b', '', edition, flags=re.IGNORECASE).strip() return edition, repack From 6c0dd51017d1254a5a4aaa14d24839b6fea1f332 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 16:53:51 +1000 Subject: [PATCH 338/741] Fix MTV host logic when already a good host --- src/trackers/MTV.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index c77b49908..d651813e3 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -47,6 +47,7 @@ async def upload(self, meta, disctype): async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] total_size_limit = 25 * 1024 * 1024 # 25 MiB in bytes + images_reuploaded = False # Helper function to calculate total size of the images def calculate_total_size(image_list, image_sizes): From 086e3405290ec98b1a49d240929dd9918e291fa8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 25 Oct 2024 17:00:58 +1000 Subject: [PATCH 339/741] Allow piece sizes up to 256 MiB --- src/args.py | 2 +- src/prep.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/args.py b/src/args.py index eee44f3a0..3826554c9 100644 --- a/src/args.py +++ b/src/args.py @@ -78,7 +78,7 @@ def parse(self, args, meta): parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") - parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64']) + parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 256 MiB)", choices=['2', '4', '8', '16', '32', '64', '128', '256']) parser.add_argument('-nh', '--nohash', action='store_true', required=False, help="Don't hash .torrent") parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD, LST)") diff --git a/src/prep.py b/src/prep.py index 3ea33745d..d84614ea1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2466,7 +2466,7 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): class CustomTorrent(torf.Torrent): # Default piece size limits torf.Torrent.piece_size_min = 16384 # 16 KiB - torf.Torrent.piece_size_max = 67108864 # 64 MiB + torf.Torrent.piece_size_max = 268435456 # 256 MiB def __init__(self, meta, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2501,8 +2501,8 @@ def piece_size(self, value): @classmethod def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 - our_max_size = max_size if max_size else 67108864 # Default to 64 MiB if max_size is None - piece_size = 67108864 # Start with 64 MiB + our_max_size = max_size if max_size else 268435456 # Default to 256 MiB if max_size is None + piece_size = 268435456 # Start with 256 MiB num_pieces = math.ceil(total_size / piece_size) torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size From 89064808722afa3ee604a68e6fa79a74999c409b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 01:45:20 +1000 Subject: [PATCH 340/741] Recalibrate torrent creation This fucker should not get caught in loops any longer, using sane break points instead of insisting on strict limits. ANT - just run with it unless .torrent > 100 KiB, then just do a thing. --- src/prep.py | 77 +++++++++++++++++++++++++++++++++------------ src/trackers/ANT.py | 60 +++++++---------------------------- 2 files changed, 69 insertions(+), 68 deletions(-) diff --git a/src/prep.py b/src/prep.py index d84614ea1..ed276c832 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2482,9 +2482,9 @@ def __init__(self, meta, *args, **kwargs): self.piece_size_max = torf.Torrent.piece_size_max # Calculate and set the piece size - total_size = self._calculate_total_size() - piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) - self.piece_size = piece_size + # total_size = self._calculate_total_size() + # piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) + self.metainfo['info']['piece length'] = self._piece_size @property def piece_size(self): @@ -2500,53 +2500,90 @@ def piece_size(self, value): @classmethod def calculate_piece_size(cls, total_size, min_size, max_size, files): + file_count = len(files) + # console.print(f"[red]Calculating piece size for {file_count} files") + our_min_size = 16384 our_max_size = max_size if max_size else 268435456 # Default to 256 MiB if max_size is None - piece_size = 268435456 # Start with 256 MiB + piece_size = 4194304 # Start with 4 MiB + num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size + + # Initial torrent_file_size calculation based on file_count + # More paths = greater error in pathname_bytes, roughly recalibrate + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + + # iteration = 0 # Track the number of iterations + # print(f"Initial piece size: {piece_size} bytes") + # print(f"Initial num_pieces: {num_pieces}, Initial torrent_file_size: {torrent_file_size} bytes") # Adjust the piece size to fit within the constraints - while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit - if num_pieces < 1000 and torrent_file_size >= 102400: + while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 102400) and torrent_file_size <= 102400): + # iteration += 1 + # print(f"\nIteration {iteration}:") + # print(f"Current piece_size: {piece_size} bytes") + # print(f"Current num_pieces: {num_pieces}, Current torrent_file_size: {torrent_file_size} bytes") + if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 100000: + break + elif num_pieces < 1500 and torrent_file_size >= 102400: piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (num_pieces < 1500 and torrent_file_size >= 100 KiB)") if piece_size > our_max_size: piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") break - elif num_pieces < 1000: + elif num_pieces < 750: piece_size //= 2 + # print(f"Halved piece_size to {piece_size} bytes (num_pieces < 750)") if piece_size < our_min_size: piece_size = our_min_size + # print(f"piece_size went below min_size, set to our_min_size: {our_min_size} bytes") break - elif piece_size > 18000000 and torrent_file_size >= 102400: - piece_size *= 2 - if piece_size > our_max_size: - piece_size = our_max_size - break - elif torrent_file_size > 61440: # Break if .torrent size exceeds 60 KiB + elif 40960 < torrent_file_size < 102400: + # print(f"torrent_file_size is between 40 KiB and 100 KiB, exiting loop.") break - elif num_pieces > 2000: + elif num_pieces > 2200: piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (num_pieces > 2500)") if piece_size > our_max_size: piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") break - elif torrent_file_size < 81920: # Break if .torrent size less than 80 KiB - break - elif torrent_file_size > 2048: # Break if .torrent size exceeds 2 KiB + elif torrent_file_size < 2048: + # print(f"torrent_file_size is less than 2 KiB, exiting loop.") break elif torrent_file_size > 102400: piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (torrent_file_size > 100 KiB)") if piece_size > our_max_size: piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') break + + # Update num_pieces num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + # Recalculate torrent_file_size based on file_count in each iteration + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + + # print(f"\nFinal piece_size: {piece_size} bytes after {iteration} iterations.") + print(f"Final num_pieces: {num_pieces}, Final torrent_file_size: {torrent_file_size} bytes") return piece_size def _calculate_total_size(self): - return sum(file.size for file in self.files) + total_size = sum(file.size for file in self.files) + return total_size @classmethod def _calculate_pathname_bytes(cls, files): diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 9e06f931f..d3aa73964 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -4,11 +4,8 @@ import asyncio import requests import platform -import cli_ui from str2bool import str2bool from pymediainfo import MediaInfo -import math -from torf import Torrent from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console @@ -66,51 +63,18 @@ async def get_flags(self, meta): async def upload(self, meta, disctype): common = COMMON(config=self.config) torrent_filename = "BASE" - torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - - # Calculate the total size of all files in the torrent - total_size = sum(file.size for file in torrent.files) - - # Calculate the total bytes consumed by all the pathnames in the torrent - def calculate_pathname_bytes(files): - total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) - return total_pathname_bytes - - total_pathname_bytes = calculate_pathname_bytes(torrent.files) - - # Calculate the number of pieces and the torrent file size based on the current piece size - def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): - num_pieces = math.ceil(total_size / piece_size) - # Approximate size: 20 bytes header + 20 bytes per piece + pathname bytes - torrent_file_size = 20 + (num_pieces * 20) + pathname_bytes - return num_pieces, torrent_file_size - - # Check if the existing torrent fits within the constraints - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, total_pathname_bytes, torrent.piece_size) - - # Convert torrent file size to KiB for display - torrent_file_size_kib = torrent_file_size / 1024 - - # If the torrent doesn't meet the constraints, ask the user if they want to regenerate it - if not (1000 <= num_pieces <= 2000) or torrent_file_size > 102400: - console.print(f"[yellow]Existing .torrent is outside of ANT preferred constraints with {num_pieces} pieces and is approximately {torrent_file_size_kib:.2f} KiB.") - regenerate = cli_ui.ask_yes_no("Do you wish to regenerate the torrent?", default=True) - - if regenerate: - console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - - # Override the max piece size before regenerating the torrent - meta['max_piece_size'] = '64' # 64 MiB, the maximum piece size allowed - - # Call create_torrent with the adjusted piece size - prep.create_torrent(meta, Path(meta['path']), "ANT") - torrent_filename = "ANT" - else: - console.print("[green]Using the existing torrent despite not meeting the preferred constraints.") - else: - console.print("[green]Existing torrent meets the constraints.") + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent_file_size_kib = os.path.getsize(torrent_path) / 1024 + + # Trigger regeneration automatically if size constraints aren't met + if torrent_file_size_kib > 100: # 100 KiB + console.print("[yellow]Existing .torrent exceeds 100 KiB and will be regenerated to fit constraints.") + + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + meta['max_piece_size'] = '256' # 256 MiB + prep.create_torrent(meta, Path(meta['path']), "ANT") + torrent_filename = "ANT" await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) flags = await self.get_flags(meta) From b03485f24b7f4c357ff6720e7c74ff2481e70157 Mon Sep 17 00:00:00 2001 From: BoBeR182 <525433+BoBeR182@users.noreply.github.com> Date: Fri, 25 Oct 2024 16:19:01 +0000 Subject: [PATCH 341/741] Add DCP source support --- src/args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/args.py b/src/args.py index 3826554c9..4eeacfcad 100644 --- a/src/args.py +++ b/src/args.py @@ -25,7 +25,7 @@ def parse(self, args, meta): parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) - parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc'], dest="manual_source") + parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc, DCP]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc', 'DCP'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') parser.add_argument('-imdb', '--imdb', nargs='*', required=False, help="IMDb ID", type=str) From 267fe424e7e76c45737eb700e34f34b7dafa1184 Mon Sep 17 00:00:00 2001 From: SomeGuy Date: Sat, 26 Oct 2024 14:16:15 +1100 Subject: [PATCH 342/741] Add example tracker and alphabetical order. Update with missing tracker and aligned to alphabetical order. --- data/example-config.py | 280 ++++++++++++++++++++++------------------- 1 file changed, 152 insertions(+), 128 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 8e628821a..49e98dad5 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -56,13 +56,32 @@ # Remove the trackers from the default_trackers list that are not used, to save being asked everytime "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS, ULCX", - "BLU": { - "useAPI": False, # Set to True if using BLU - "api_key": "BLU api key", - "announce_url": "https://blutopia.cc/announce/customannounceurl", + "ACM": { + "api_key": "ACM api key", + "announce_url": "https://asiancinema.me/announce/customannounceurl", + # "anon" : False, + + # FOR INTERNAL USE ONLY: + # "internal" : True, + # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], + }, + "AITHER": { + "useAPI": False, # Set to True if using Aither + "api_key": "AITHER api key", + "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False, # "modq" : False ## Not working yet }, + "AL": { + "api_key": "AL api key", + "announce_url": "https://animelovers.club/announce/customannounceurl", + # "anon" : False + }, + "ANT": { + "api_key": "ANT api key", + "announce_url": "https://anthelion.me/announce/customannounceurl", + # "anon" : False + }, "BHD": { "api_key": "BHD api key", "announce_url": "https://beyond-hd.me/announce/customannounceurl", @@ -76,85 +95,57 @@ "my_announce_url": "https://trackerr.bit-hdtv.com/passkey/announce", # "anon" : "False" }, - "PTP": { - "useAPI": False, # Set to True if using PTP - "add_web_source_to_desc": True, - "ApiUser": "ptp api user", - "ApiKey": 'ptp api key', - "username": "", - "password": "", - "announce_url": "" - }, - "AITHER": { - "useAPI": False, # Set to True if using Aither - "api_key": "AITHER api key", - "announce_url": "https://aither.cc/announce/customannounceurl", + "BLU": { + "useAPI": False, # Set to True if using BLU + "api_key": "BLU api key", + "announce_url": "https://blutopia.cc/announce/customannounceurl", # "anon" : False, # "modq" : False ## Not working yet }, - "R4E": { - "api_key": "R4E api key", - "announce_url": "https://racing4everyone.eu/announce/customannounceurl", - # "anon" : False - }, - "HUNO": { - "api_key": "HUNO api key", - "announce_url": "https://hawke.uno/announce/customannounceurl", + "CBR": { + "api_key": "CBR api key", + "announce_url": "https://capybarabr.com/announce/customannounceurl", # "anon" : False }, - "MTV": { - 'api_key': 'get from security page', - 'username': '', - 'password': '', - 'announce_url': "get from https://www.morethantv.me/upload.php", - 'anon': False, - # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' + "FL": { + "useAPI": False, + "username": "FL username", + "passkey": "FL passkey", + "uploader_name": "https://hdbits.org/announce/Custom_Announce_URL", + "anon": False, }, - "STC": { - "api_key": "STC", - "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", - # "anon" : False + "FNP": { + "api_key": "FNP api key", + "announce_url": "https://fearnopeer.com/announce/customannounceurl", + # "anon" : "False" }, - "STT": { - "api_key": "STC", - "announce_url": "https://stt.xyz/announce/customannounceurl", - # "anon" : False + "HDB": { + "useAPI": False, + "username": "HDB username", + "passkey": "HDB passkey", + "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", + "anon": False, }, - "SN": { - "api_key": "SN", - "announce_url": "https://tracker.swarmazon.club:8443//announce", + "HDT": { + "username": "username", + "password": "password", + "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", + # "anon" : "False" + "announce_url": "https://hdts-announce.ru/announce.php", # DO NOT EDIT THIS LINE }, "HP": { "api_key": "HP", "announce_url": "https://hidden-palace.net/announce/customannounceurl", # "anon" : False }, - "ACM": { - "api_key": "ACM api key", - "announce_url": "https://asiancinema.me/announce/customannounceurl", - # "anon" : False, - - # FOR INTERNAL USE ONLY: - # "internal" : True, - # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], - }, - "NBL": { - "api_key": "NBL api key", - "announce_url": "https://nebulance.io/customannounceurl", - }, - "ANT": { - "api_key": "ANT api key", - "announce_url": "https://anthelion.me/announce/customannounceurl", + "HUNO": { + "api_key": "HUNO api key", + "announce_url": "https://hawke.uno/announce/customannounceurl", # "anon" : False }, - "THR": { - "username": "username", - "password": "password", - "img_api": "get this from the forum post", - "announce_url": "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", - "pronfo_api_key": "pronfo api key", - "pronfo_theme": "pronfo theme code", - "pronfo_rapi_id": "pronfo remote api id", + "JPTV": { + "api_key": "JPTV api key", + "announce_url": "https://jptv.club/announce/customannounceurl", # "anon" : False }, "LCD": { @@ -162,11 +153,6 @@ "announce_url": "https://locadora.cc/announce/customannounceurl", # "anon" : False }, - "CBR": { - "api_key": "CBR api key", - "announce_url": "https://capybarabr.com/announce/customannounceurl", - # "anon" : False - }, "LST": { "useAPI": False, # Set to True if using LST "api_key": "LST api key", @@ -180,6 +166,34 @@ "announce_url": "https://lat-team.com/announce/customannounceurl", # "anon" : False }, + "MTV": { + 'api_key': 'get from security page', + 'username': '', + 'password': '', + 'announce_url': "get from https://www.morethantv.me/upload.php", + 'anon': False, + # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' + }, + "NBL": { + "api_key": "NBL api key", + "announce_url": "https://nebulance.io/customannounceurl", + }, + "OE": { + "useAPI": False, # Set to True if using OE + "api_key": "OE api key", + "announce_url": "https://onlyencodes.cc/announce/customannounceurl", + # "anon" : False + }, + "OTW": { + "api_key": "OTW api key", + "announce_url": "https://oldtoons.world/announce/customannounceurl", + # "anon" : False + }, + "PSS": { + "api_key": "PSS api key", + "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", + # "anon" : False + }, "PTER": { "passkey": 'passkey', "img_rehost": False, @@ -188,25 +202,23 @@ "ptgen_api": "", "anon": True, }, - "TL": { - "announce_key": "TL announce key", - }, - "TDC": { - "api_key": "TDC api key", - "announce_url": "https://thedarkcommunity.cc/announce/customannounceurl", - # "anon" : "False" + "PTP": { + "useAPI": False, # Set to True if using PTP + "add_web_source_to_desc": True, + "ApiUser": "ptp api user", + "ApiKey": 'ptp api key', + "username": "", + "password": "", + "announce_url": "" }, - "HDT": { - "username": "username", - "password": "password", - "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", - # "anon" : "False" - "announce_url": "https://hdts-announce.ru/announce.php", # DO NOT EDIT THIS LINE + "R4E": { + "api_key": "R4E api key", + "announce_url": "https://racing4everyone.eu/announce/customannounceurl", + # "anon" : False }, - "OE": { - "useAPI": False, # Set to True if using OE - "api_key": "OE api key", - "announce_url": "https://onlyencodes.cc/announce/customannounceurl", + "RF": { + "api_key": "RF api key", + "announce_url": "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, "RTF": { @@ -216,46 +228,43 @@ "announce_url": "get from upload page", "anon": True }, - "RF": { - "api_key": "RF api key", - "announce_url": "https://reelflix.xyz/announce/customannounceurl", - # "anon" : False + "SHRI": { + "api_key": "SHRI api key", + "announce_url": "https://shareisland.org/announce/customannounceurl", + # "anon" : "False" }, - "OTW": { - "api_key": "OTW api key", - "announce_url": "https://oldtoons.world/announce/customannounceurl", - # "anon" : False + "SN": { + "api_key": "SN", + "announce_url": "https://tracker.swarmazon.club:8443//announce", }, - "FNP": { - "api_key": "FNP api key", - "announce_url": "https://fearnopeer.com/announce/customannounceurl", - # "anon" : "False" + "SPD": { + "api_key": "SPEEDAPP API KEY", + "announce_url": "https://ramjet.speedapp.io//announce", }, - "UTP": { - "api_key": "UTP api key", - "announce_url": "https://UTP/announce/customannounceurl", + "STC": { + "api_key": "STC", + "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", # "anon" : False }, - "AL": { - "api_key": "AL api key", - "announce_url": "https://animelovers.club/announce/customannounceurl", + "STT": { + "api_key": "STT", + "announce_url": "https://stt.xyz/announce/customannounceurl", # "anon" : False }, - "HDB": { - "useAPI": False, - "username": "HDB username", - "passkey": "HDB passkey", - "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", - "anon": False, - }, - "SHRI": { - "api_key": "SHRI api key", - "announce_url": "https://shareisland.org/announce/customannounceurl", + "TDC": { + "api_key": "TDC api key", + "announce_url": "https://thedarkcommunity.cc/announce/customannounceurl", # "anon" : "False" }, - "SPD": { - "api_key": "SPEEDAPP API KEY", - "announce_url": "https://ramjet.speedapp.io//announce", + "THR": { + "username": "username", + "password": "password", + "img_api": "get this from the forum post", + "announce_url": "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", + "pronfo_api_key": "pronfo api key", + "pronfo_theme": "pronfo theme code", + "pronfo_rapi_id": "pronfo remote api id", + # "anon" : False }, "TIK": { "useAPI": False, # Set to True if using TIK @@ -264,24 +273,39 @@ "anon": False, "modq": True, }, + "TL": { + "announce_key": "TL announce key", + }, + "TTG": { + "username": "username", + "password": "password", + "login_question": "login_question", + "login_answer": "login_answer", + "user_id": "user_id", + "user_id": "pronfo theme code", + "announce_url": "https://totheglory.im/announce/", + # "anon" : False + }, "TVC": { "api_key": "TVC API Key", "announce_url": "https://tvchaosuk.com/announce/", "anon": "False" }, - "PSS": { - "api_key": "PSS api key", - "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", - # "anon" : False - }, "ULCX": { "api_key": "ULCX api key", "announce_url": "https://upload.cx/announce/customannounceurl", # "anon" : False, }, - "MANUAL": { - # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se - # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" + #"UNIT3D_TEMPLATE": { + # "api_key": "UNIT3D_TEMPLATE api key", + # "announce_url": "https://domain.tld/announce/customannounceurl", + # # "anon" : False, + # # "modq" : False ## Not working yet + }, + "UTP": { + "api_key": "UTP api key", + "announce_url": "https://UTP/announce/customannounceurl", + # "anon" : False }, }, From 2d04a4ee87c8e7857a58400aa71498673958b148 Mon Sep 17 00:00:00 2001 From: SomeGuy Date: Sat, 26 Oct 2024 14:30:53 +1100 Subject: [PATCH 343/741] Added missing comment tag. --- data/example-config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 49e98dad5..4f35cdc27 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -282,7 +282,6 @@ "login_question": "login_question", "login_answer": "login_answer", "user_id": "user_id", - "user_id": "pronfo theme code", "announce_url": "https://totheglory.im/announce/", # "anon" : False }, @@ -301,7 +300,7 @@ # "announce_url": "https://domain.tld/announce/customannounceurl", # # "anon" : False, # # "modq" : False ## Not working yet - }, + #}, "UTP": { "api_key": "UTP api key", "announce_url": "https://UTP/announce/customannounceurl", From bd71e2f6d81f1b677ddf9d03afb534185bea0a8c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 14:21:04 +1000 Subject: [PATCH 344/741] Lint --- data/example-config.py | 48 ++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 4f35cdc27..dc58eebfb 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -52,9 +52,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS, ULCX + # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TDC, TIK, TL, ULCX, UTP # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, TDC, HDT, OE, RF, OTW, FNP, UTP, AL, HDB, TIK, PSS, ULCX", + "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TDC, TIK, TL, ULCX, UTP", "ACM": { "api_key": "ACM api key", @@ -66,7 +66,7 @@ # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], }, "AITHER": { - "useAPI": False, # Set to True if using Aither + "useAPI": False, # Set to True if using Aither for automatic ID searching "api_key": "AITHER api key", "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False, @@ -85,7 +85,7 @@ "BHD": { "api_key": "BHD api key", "announce_url": "https://beyond-hd.me/announce/customannounceurl", - "draft_default": "True", + "draft_default": "True", # Send to drafts # "anon" : False }, "BHDTV": { @@ -96,7 +96,7 @@ # "anon" : "False" }, "BLU": { - "useAPI": False, # Set to True if using BLU + "useAPI": False, # Set to True if using BLU for automatic ID searching "api_key": "BLU api key", "announce_url": "https://blutopia.cc/announce/customannounceurl", # "anon" : False, @@ -108,11 +108,10 @@ # "anon" : False }, "FL": { - "useAPI": False, "username": "FL username", "passkey": "FL passkey", "uploader_name": "https://hdbits.org/announce/Custom_Announce_URL", - "anon": False, + # "anon": False, }, "FNP": { "api_key": "FNP api key", @@ -120,11 +119,11 @@ # "anon" : "False" }, "HDB": { - "useAPI": False, + "useAPI": False, # Set to True if using HDB for automatic ID searching "username": "HDB username", "passkey": "HDB passkey", "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", - "anon": False, + # "anon": False, }, "HDT": { "username": "username", @@ -154,12 +153,12 @@ # "anon" : False }, "LST": { - "useAPI": False, # Set to True if using LST + "useAPI": False, # Set to True if using LST for automatic ID searching "api_key": "LST api key", "announce_url": "https://lst.gg/announce/customannounceurl", # "anon" : False, - # "modq" : False, - # "draft" : False + # "modq" : False, # Send to modq for staff approval + # "draft" : False # Send to drafts }, "LT": { "api_key": "LT api key", @@ -171,7 +170,7 @@ 'username': '', 'password': '', 'announce_url': "get from https://www.morethantv.me/upload.php", - 'anon': False, + # 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' }, "NBL": { @@ -179,7 +178,7 @@ "announce_url": "https://nebulance.io/customannounceurl", }, "OE": { - "useAPI": False, # Set to True if using OE + "useAPI": False, # Set to True if using OE for automatic ID searching "api_key": "OE api key", "announce_url": "https://onlyencodes.cc/announce/customannounceurl", # "anon" : False @@ -194,16 +193,16 @@ "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", # "anon" : False }, - "PTER": { + "PTER": { # Does not appear to be working at all "passkey": 'passkey', "img_rehost": False, "username": "", "password": "", "ptgen_api": "", - "anon": True, + # "anon": True, }, "PTP": { - "useAPI": False, # Set to True if using PTP + "useAPI": False, # Set to True if using PTP for automatic ID searching "add_web_source_to_desc": True, "ApiUser": "ptp api user", "ApiKey": 'ptp api key', @@ -226,7 +225,7 @@ "password": "password", "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', "announce_url": "get from upload page", - "anon": True + # "anon": True }, "SHRI": { "api_key": "SHRI api key", @@ -267,11 +266,10 @@ # "anon" : False }, "TIK": { - "useAPI": False, # Set to True if using TIK - "api_key": "", + "api_key": "TIK api key", "announce_url": "https://cinematik.net/announce/", - "anon": False, - "modq": True, + # "anon": False, + # "modq": True, # Not working for now, ignored unless correct class }, "TL": { "announce_key": "TL announce key", @@ -288,19 +286,19 @@ "TVC": { "api_key": "TVC API Key", "announce_url": "https://tvchaosuk.com/announce/", - "anon": "False" + # "anon": "False" }, "ULCX": { "api_key": "ULCX api key", "announce_url": "https://upload.cx/announce/customannounceurl", # "anon" : False, }, - #"UNIT3D_TEMPLATE": { + # "UNIT3D_TEMPLATE": { # "api_key": "UNIT3D_TEMPLATE api key", # "announce_url": "https://domain.tld/announce/customannounceurl", # # "anon" : False, # # "modq" : False ## Not working yet - #}, + # }, "UTP": { "api_key": "UTP api key", "announce_url": "https://UTP/announce/customannounceurl", From f1b5616226028f67977e609b037a32efe0690eb0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 15:04:59 +1000 Subject: [PATCH 345/741] Quiet console --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index ed276c832..31981531c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2578,7 +2578,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # print(f"\nFinal piece_size: {piece_size} bytes after {iteration} iterations.") - print(f"Final num_pieces: {num_pieces}, Final torrent_file_size: {torrent_file_size} bytes") + # print(f"Final num_pieces: {num_pieces}, Final torrent_file_size: {torrent_file_size} bytes") return piece_size def _calculate_total_size(self): From 2541ccb970e3fb90814946c5cbc162557d77a79f Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 15:40:02 +1000 Subject: [PATCH 346/741] Skip single file check when not using single files fixes https://github.com/Audionut/Upload-Assistant/issues/103 --- src/clients.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/clients.py b/src/clients.py index 374bab9c9..9357142dd 100644 --- a/src/clients.py +++ b/src/clients.py @@ -134,7 +134,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client console.log(f"Torrent is valid based on disc/basename or keep-folder: {valid}") # If one file, check for folder - if len(torrent.files) == len(meta['filelist']) == 1: + elif len(torrent.files) == len(meta['filelist']) == 1: if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): if str(torrent.files[0]) == os.path.basename(torrent.files[0]): valid = True From 0782dc8ce1c8a193c1f091851e72e7d084c7d7a5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 17:11:26 +1000 Subject: [PATCH 347/741] Fix torrent validation error prints fixes https://github.com/Audionut/Upload-Assistant/issues/107 --- src/clients.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/clients.py b/src/clients.py index 9357142dd..f4c2d45eb 100644 --- a/src/clients.py +++ b/src/clients.py @@ -100,7 +100,6 @@ async def find_existing_torrent(self, meta): async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): valid = False wrong_file = False - err_print = "" # Normalize the torrent hash based on the client if torrent_client in ('qbit', 'deluge'): @@ -174,16 +173,16 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client # Piece size and count validations if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): - err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" + console.print("[bold yellow]Too many pieces exist in current hash. REHASHING") valid = False elif reuse_torrent.piece_size < 32768: - err_print = "[bold yellow]Piece size too small to reuse" + console.print("[bold yellow]Piece size too small to reuse") valid = False elif wrong_file: - err_print = "[bold red] Provided .torrent has files that were not expected" + console.print("[bold red] Provided .torrent has files that were not expected") valid = False else: - err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + console.print(f"[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}") except Exception as e: console.print(f'[bold red]Error checking reuse torrent: {e}') valid = False @@ -191,11 +190,7 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client if meta['debug']: console.log(f"Final validity after piece checks: valid={valid}") else: - err_print = '[bold yellow]Unwanted Files/Folders Identified' - - # Print the error message if needed - if print_err: - console.print(err_print) + console.print("[bold yellow]Unwanted Files/Folders Identified") return valid, torrent_path From 65f58f3c52367bd743894f558ffbd1c5629fc3d8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 17:30:30 +1000 Subject: [PATCH 348/741] Modify useAPI behavior with auto searching Now, if you want a site to be included in automatic ID/description parsing, useAPI = True in config.py. using a site argument, `--ptp 12345` for instance, still works same, but regardless of useAPI status. todo: add other UNIT3D sites that have good descriptions, or are otherwise useful for ID/screenshot matching. --- README.md | 2 +- src/prep.py | 44 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index c9ea3eb33..ac7cfe01f 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ A simple tool to take the work out of uploading. - Generates and Parses MediaInfo/BDInfo. - Generates and Uploads screenshots. - Uses srrdb to fix scene filenames - - Can grab descriptions from PTP/BLU (automatically on filename match or arg) / Aither/LST/OE (with arg) + - Can grab descriptions from PTP/BLU/Aither/LST/OE (with config option automatically on filename match, or using arg) - Can strip existing screenshots from descriptions to skip screenshot generation and uploading - Obtains TMDb/IMDb/MAL identifiers. - Converts absolute to season episode numbering for Anime diff --git a/src/prep.py b/src/prep.py index 31981531c..fd964f4c2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -510,49 +510,49 @@ async def gather_prep(self, meta, mode): if specific_tracker: console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") - if specific_tracker == 'PTP' and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + if specific_tracker == 'PTP': ptp = PTP(config=self.config) meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'BLU' and str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'BLU': blu = BLU(config=self.config) meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'AITHER' and str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'AITHER': aither = AITHER(config=self.config) meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'LST' and str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'LST': lst = LST(config=self.config) meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'OE' and str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'OE': oe = OE(config=self.config) meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'TIK' and str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'TIK': tik = TIK(config=self.config) meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) if match: found_match = True - elif specific_tracker == 'HDB' and str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + elif specific_tracker == 'HDB': hdb = HDB(config=self.config) meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) if match: found_match = True else: - # Process all trackers if no specific tracker is set in meta + # Process all trackers with API = true if no specific tracker is set in meta default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") if "PTP" in default_trackers and not found_match: @@ -569,6 +569,34 @@ async def gather_prep(self, meta, mode): if match: found_match = True + if "AITHER" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + aither = AITHER(config=self.config) + meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) + if match: + found_match = True + + if "LST" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + lst = LST(config=self.config) + meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) + if match: + found_match = True + + if "OE" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": + oe = OE(config=self.config) + meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) + if match: + found_match = True + + if "TIK" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": + tik = TIK(config=self.config) + meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) + if match: + found_match = True + if "HDB" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) From 6d4477c2eb774b5f20175f464e2019f700b8fc5f Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 26 Oct 2024 17:32:09 +1000 Subject: [PATCH 349/741] Add TIK useAPI back --- data/example-config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/data/example-config.py b/data/example-config.py index dc58eebfb..71011d5c8 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -266,6 +266,7 @@ # "anon" : False }, "TIK": { + "useAPI": False, # Set to True if using TIK for automatic ID searching, won't work great until folder searching is added to UNIT3D API "api_key": "TIK api key", "announce_url": "https://cinematik.net/announce/", # "anon": False, From 1f0e555bc71ef464931829d906a826e8c95cd3cc Mon Sep 17 00:00:00 2001 From: SomeGuy Date: Sat, 26 Oct 2024 21:23:51 +1100 Subject: [PATCH 350/741] Add-Shebang-Support Adding in Shebang support, This removes the need to declare python3 when running the script on Linux or in docker. --- upload.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/upload.py b/upload.py index 300b77ea1..a7907c095 100644 --- a/upload.py +++ b/upload.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import requests from src.args import Args from src.clients import Clients From c27b44a0ae638aaca51ca662d58046edd45dff41 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 27 Oct 2024 15:38:39 +1000 Subject: [PATCH 351/741] MTV - size validate images, not thumbnails and only return validated urls fixes https://github.com/Audionut/Upload-Assistant/issues/82 --- src/trackers/MTV.py | 101 +++++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index d651813e3..199490a70 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -48,12 +48,13 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] total_size_limit = 25 * 1024 * 1024 # 25 MiB in bytes images_reuploaded = False + valid_images = [] # Helper function to calculate total size of the images def calculate_total_size(image_list, image_sizes): total_size = 0 for image in image_list: - img_url = image['img_url'] + img_url = image['raw_url'] size = image_sizes.get(img_url, 0) # Get size from meta['image_sizes'], default to 0 if not found total_size += size return total_size @@ -61,24 +62,21 @@ def calculate_total_size(image_list, image_sizes): # Helper function to remove images until the total size is under the limit def enforce_size_limit(image_list, image_sizes): total_size = calculate_total_size(image_list, image_sizes) - valid_images = [] for image in image_list: if total_size <= total_size_limit: valid_images.append(image) else: - img_url = image['img_url'] + img_url = image['raw_url'] size = image_sizes.get(img_url, 0) total_size -= size # Subtract size of the removed image console.print(f"[red]Removed {img_url} to stay within the 25 MiB limit.") return valid_images - + image_list = meta['image_list'] # Check if the images are already hosted on an approved image host if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") - image_list = meta['image_list'] # Use the existing images - # Enforce the total size limit on the existing image list image_list = enforce_size_limit(image_list, meta['image_sizes']) @@ -106,49 +104,51 @@ def enforce_size_limit(image_list, image_sizes): return # Proceed with the rest of the upload process - torrent_filename = "BASE" - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" - torrent = Torrent.read(torrent_path) + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + if not os.path.exists(torrent_file_path): + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent = Torrent.read(torrent_path) - if torrent.piece_size > 8388608: # 8 MiB in bytes - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + if torrent.piece_size > 8388608: # 8 MiB in bytes + console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - # Override the max_piece_size to 8 MiB - meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit + # Override the max_piece_size to 8 MiB + meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit - # Determine include and exclude patterns based on whether it's a disc or not - if meta['is_disc']: - include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list - exclude = [] # Adjust as needed for disc-specific exclusions, make sure it's a list - else: - include = ["*.mkv", "*.mp4", "*.ts"] - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - - # Create a new torrent with piece size explicitly set to 8 MiB - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - new_torrent = prep.CustomTorrent( - meta=meta, - path=Path(meta['path']), - trackers=["https://fake.tracker"], - source="L4G", - private=True, - exclude_globs=exclude, # Ensure this is always a list - include_globs=include, # Ensure this is always a list - creation_date=datetime.now(), - comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" - ) - - # Validate and write the new torrent - new_torrent.piece_size = 8 * 1024 * 1024 - new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) - new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) - - torrent_filename = "MTV" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + # Determine include and exclude patterns based on whether it's a disc or not + if meta['is_disc']: + include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list + exclude = [] # Adjust as needed for disc-specific exclusions, make sure it's a list + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 8 MiB + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + new_torrent = prep.CustomTorrent( + meta=meta, + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Validate and write the new torrent + new_torrent.piece_size = 8 * 1024 * 1024 + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + + torrent_filename = "MTV" + + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) cat_id = await self.get_cat_id(meta) resolution_id = await self.get_res_id(meta['resolution']) @@ -157,7 +157,7 @@ def enforce_size_limit(image_list, image_sizes): des_tags = await self.get_tags(meta) # Edit description and other details - await self.edit_desc(meta, images_reuploaded) + await self.edit_desc(meta, images_reuploaded, valid_images) group_desc = await self.edit_group_desc(meta) mtv_name = await self.edit_name(meta) @@ -268,7 +268,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts return meta['image_list'], False, images_reuploaded # Return retry_mode and images_reuploaded - async def edit_desc(self, meta, images_reuploaded): + async def edit_desc(self, meta, images_reuploaded, valid_images): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: @@ -284,7 +284,10 @@ async def edit_desc(self, meta, images_reuploaded): elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") - images = meta['image_list'] + if valid_images: + images = valid_images + else: + images = meta['image_list'] if len(images) > 0: for image in images: raw_url = image['raw_url'] From e99739539c931836451b055e5f19e0e834917c46 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 27 Oct 2024 21:32:04 +1000 Subject: [PATCH 352/741] PTP - find 3 screen with series/multiple discs Covers off when the smallest screen is removed, leaving the wanted 2. See https://github.com/Audionut/Upload-Assistant/issues/109 todo: Don't use 6 images for the 1st episode, handle at other sites. --- src/trackers/PTP.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 133178be7..88e70a9aa 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -636,7 +636,7 @@ async def edit_desc(self, meta): use_vs = True else: use_vs = False - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 3)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) @@ -654,7 +654,7 @@ async def edit_desc(self, meta): desc.write(base2ptp) desc.write("\n\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 3)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) @@ -685,7 +685,7 @@ async def edit_desc(self, meta): # Generate and upload screens for other files # Add force_screenshots=True to ensure screenshots are taken even if images exist - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2, True)) + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 3, True, None)) s.start() while s.is_alive() is True: await asyncio.sleep(3) From ff147ff3653a74e4e7e24e269da643cea9945dbe Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 27 Oct 2024 23:31:54 +1000 Subject: [PATCH 353/741] PTP - only use 2 images for the 1st disc/file also todo: make it a config option how many screens --- src/trackers/PTP.py | 78 ++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 88e70a9aa..e33c7234d 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -616,12 +616,12 @@ async def edit_desc(self, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: - images = meta['image_list'] discs = meta.get('discs', []) # For Discs if len(discs) >= 1: - for i in range(len(discs)): + for i, each in enumerate(discs): each = discs[i] + new_screens = [] if each['type'] == "BDMV": desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") if i == 0: @@ -629,21 +629,27 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") + for img_index in range(min(2, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") mi_dump = each['summary'] else: mi_dump = each['summary'] - if meta.get('vapoursynth', False) is True: - use_vs = True - else: - use_vs = False - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 3)) + use_vs = meta.get('vapoursynth', False) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + for img in uploaded_images[:int(meta['screens'])]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") - if each['type'] == "DVD": + elif each['type'] == "DVD": desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n") @@ -653,61 +659,55 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") + for img_index in range(min(2, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 3)) + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] - desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + for img in uploaded_images[:int(meta['screens'])]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") # For non-discs elif len(meta.get('filelist', [])) >= 1: for i in range(len(meta['filelist'])): + new_screens = [] file = meta['filelist'][i] - if i == 0: # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + for each in range(min(2, len(meta['image_list']))): + raw_url = meta['image_list'][each]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: # Export Mediainfo mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() - - # Generate and upload screens for other files - # Add force_screenshots=True to ensure screenshots are taken even if images exist + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 3, True, None)) s.start() while s.is_alive() is True: await asyncio.sleep(3) - - # Upload new screenshots new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - # Write MediaInfo and screenshots to the description - desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - - if i == 0: - base2ptp = self.convert_bbcode(base) - if base2ptp.strip() != "": - desc.write(base2ptp) - desc.write("\n\n") - - if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] - desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + for img in uploaded_images[:int(meta['screens'])]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") async def get_AntiCsrfToken(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): From ccd695b07f9b555bbcbd12ac569336c70a8dc278 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 00:01:41 +1000 Subject: [PATCH 354/741] Number of screenshots for each disc/episode in pack configurable --- data/example-config.py | 4 ++++ src/trackers/PTP.py | 19 ++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 71011d5c8..fd8dcdb28 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,6 +25,10 @@ # Number of screenshots to capture "screens": "6", + # Number of screenshots to use for each disc/episode in packs + # Currently PTP only + "multiScreens": "2", + # Providing the option to change the size of the thumbnails where supported, default is 350 "thumbnail_size": "350", diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index e33c7234d..93b755244 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -615,6 +615,7 @@ async def edit_desc(self, meta): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() + multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: discs = meta.get('discs', []) # For Discs @@ -629,7 +630,7 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - for img_index in range(min(2, len(meta['image_list']))): + for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") @@ -637,13 +638,13 @@ async def edit_desc(self, meta): else: mi_dump = each['summary'] use_vs = meta.get('vapoursynth', False) - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) for img in uploaded_images[:int(meta['screens'])]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -659,18 +660,18 @@ async def edit_desc(self, meta): if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - for img_index in range(min(2, len(meta['image_list']))): + for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) for img in uploaded_images[:int(meta['screens'])]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -686,7 +687,7 @@ async def edit_desc(self, meta): desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - for each in range(min(2, len(meta['image_list']))): + for each in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][each]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") @@ -697,13 +698,13 @@ async def edit_desc(self, meta): f.write(mi_dump) mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 3, True, None)) + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) s.start() while s.is_alive() is True: await asyncio.sleep(3) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + uploaded_images, _ = prep.upload_screens(meta, multi_screens + 1, 1, 0, 2, new_screens, {}) for img in uploaded_images[:int(meta['screens'])]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") From 39ad19f908b46f936f64fdfadd474465248ec6ca Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 00:03:16 +1000 Subject: [PATCH 355/741] Upload does not need plus 1 --- src/trackers/PTP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 93b755244..f108a9c89 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -704,7 +704,7 @@ async def edit_desc(self, meta): await asyncio.sleep(3) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens + 1, 1, 0, 2, new_screens, {}) + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) for img in uploaded_images[:int(meta['screens'])]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") From 0431c7b0545342a25940a577b42091842fd54a20 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 08:26:19 +1000 Subject: [PATCH 356/741] if not elif --- src/trackers/OE.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 08ffd3e6d..7aec84b24 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -234,7 +234,7 @@ def process_languages(tracks): audio_languages.append(audio_lang) else: audio_languages.append("") - elif track.get('@type') == 'Text': + if track.get('@type') == 'Text': language = track.get('Language') if not language or language is None: subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') From 3d321071e9243e85787fe4df249f2802854a0552 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 09:21:20 +1000 Subject: [PATCH 357/741] Remove dead TDC fixes https://github.com/Audionut/Upload-Assistant/issues/110 --- README.md | 2 +- data/example-config.py | 9 +- src/trackers/TDC.py | 210 ----------------------------------------- upload.py | 29 +++--- 4 files changed, 21 insertions(+), 229 deletions(-) delete mode 100644 src/trackers/TDC.py diff --git a/README.md b/README.md index ac7cfe01f..396951237 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/HDB/AL/SHRI/OE/TL/BHDTV/HDT/JPTV/LT/MTV/PTER/TDC/TTG/UTP/PSS/ULCX/SPD/TVC + - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index fd8dcdb28..e5690839e 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -56,9 +56,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TDC, TIK, TL, ULCX, UTP + # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TDC, TIK, TL, ULCX, UTP", + "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP", "ACM": { "api_key": "ACM api key", @@ -254,11 +254,6 @@ "announce_url": "https://stt.xyz/announce/customannounceurl", # "anon" : False }, - "TDC": { - "api_key": "TDC api key", - "announce_url": "https://thedarkcommunity.cc/announce/customannounceurl", - # "anon" : "False" - }, "THR": { "username": "username", "password": "password", diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py deleted file mode 100644 index 54f7a98fe..000000000 --- a/src/trackers/TDC.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -from str2bool import str2bool -import bencodepy - -from src.trackers.COMMON import COMMON -from src.console import console - - -class TDC(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - - def __init__(self, config): - self.config = config - self.tracker = 'TDC' - self.source_flag = 'TDC' - self.upload_url = 'https://thedarkcommunity.cc/api/torrents/upload' - self.search_url = 'https://thedarkcommunity.cc/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = [""] - pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p': '10', - '4320p': '1', - '2160p': '2', - '1440p': '3', - '1080p': '3', - '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - async def upload(self, meta, disctype): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] is not None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name': meta['name'], - 'description': desc, - 'mediainfo': mi_dump, - 'bdinfo': bd_dump, - 'category_id': cat_id, - 'type_id': type_id, - 'resolution_id': resolution_id, - 'tmdb': meta['tmdb'], - 'imdb': meta['imdb_id'].replace('tt', ''), - 'tvdb': meta['tvdb_id'], - 'mal': meta['mal_id'], - 'igdb': 0, - 'anonymous': anon, - 'stream': meta['stream'], - 'sd': meta['sd'], - 'keywords': meta['keywords'], - 'personal_release': int(meta.get('personalrelease', False)), - 'internal': 0, - 'featured': 0, - 'free': 0, - 'doubleup': 0, - 'sticky': 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) is True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' - } - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - console.print(response.json()) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - open_torrent.close() - - async def search_existing(self, meta, disctype): - dupes = [] - console.print("[yellow]Searching for existing torrents on site...") - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId': meta['tmdb'], - 'categories[]': await self.get_cat_id(meta['category']), - 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), - 'name': "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] - - try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except Exception: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') - await asyncio.sleep(5) - - return dupes - - async def search_torrent_page(self, meta, disctype): - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - Name = meta['name'] - quoted_name = f'"{Name}"' - - params = { - 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'name': quoted_name - } - - try: - response = requests.get(url=self.search_url, params=params) - response.raise_for_status() - response_data = response.json() - - if response_data['data'] and isinstance(response_data['data'], list): - details_link = response_data['data'][0]['attributes'].get('details_link') - - if details_link: - with open(torrent_file_path, 'rb') as open_torrent: - torrent_data = open_torrent.read() - - torrent = bencodepy.decode(torrent_data) - torrent[b'comment'] = details_link.encode('utf-8') - updated_torrent_data = bencodepy.encode(torrent) - - with open(torrent_file_path, 'wb') as updated_torrent_file: - updated_torrent_file.write(updated_torrent_data) - - return details_link - else: - return None - else: - return None - - except requests.exceptions.RequestException as e: - print(f"An error occurred during the request: {e}") - return None diff --git a/upload.py b/upload.py index a7907c095..88daef201 100644 --- a/upload.py +++ b/upload.py @@ -29,7 +29,6 @@ from src.trackers.MTV import MTV from src.trackers.JPTV import JPTV from src.trackers.TL import TL -from src.trackers.TDC import TDC from src.trackers.HDT import HDT from src.trackers.RF import RF from src.trackers.OE import OE @@ -239,21 +238,29 @@ async def do_the_thing(base_dir): ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM', 'LCD', 'HUNO', 'LT', 'JPTV', 'TDC', 'OE', - 'OTW', 'FNP', 'CBR', 'UTP', 'AL', 'SHRI', 'LST', 'TIK', 'PSS', 'ULCX', 'BHD'] - other_api_trackers = ['SN', 'NBL', 'ANT', 'BHDTV', 'RTF', 'TL', 'TVC', 'SPD'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] + api_trackers = [ + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', + 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP' + ] + other_api_trackers = [ + 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' + ] + http_trackers = [ + 'FL', 'HDB', 'HDT', 'MTV', 'PTER', 'TTG' + ] tracker_class_map = { - 'BLU': BLU, 'BHD': BHD, 'AITHER': AITHER, 'STC': STC, 'R4E': R4E, 'THR': THR, 'STT': STT, 'HP': HP, 'PTP': PTP, 'RF': RF, 'SN': SN, 'TIK': TIK, 'TVC': TVC, - 'ACM': ACM, 'HDB': HDB, 'LCD': LCD, 'TTG': TTG, 'LST': LST, 'HUNO': HUNO, 'FL': FL, 'LT': LT, 'NBL': NBL, 'ANT': ANT, 'PTER': PTER, 'JPTV': JPTV, - 'TL': TL, 'TDC': TDC, 'HDT': HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'AL': AL, - 'SHRI': SHRI, 'PSS': PSS, 'ULCX': ULCX, 'SPD': SPD} + 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'CBR': CBR, + 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, + 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, + 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, + 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP + } tracker_capabilities = { - 'LST': {'mod_q': True, 'draft': True}, - 'BLU': {'mod_q': True, 'draft': False}, 'AITHER': {'mod_q': True, 'draft': False}, 'BHD': {'draft_live': True}, + 'BLU': {'mod_q': True, 'draft': False}, + 'LST': {'mod_q': True, 'draft': True} } async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): From c338d96ecef3c39405e5039b9741d6c76b83a7dd Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 14:27:39 +1000 Subject: [PATCH 358/741] OE has no EUR region --- src/trackers/OE.py | 2 +- upload.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 7aec84b24..cc485db0d 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -98,7 +98,7 @@ async def upload(self, meta, disctype): if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - if region_id != 0: + if region_id != 0 and region_id != 243: data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id diff --git a/upload.py b/upload.py index 88daef201..1e03823dc 100644 --- a/upload.py +++ b/upload.py @@ -239,7 +239,7 @@ async def do_the_thing(base_dir): #################################### common = COMMON(config=config) api_trackers = [ - 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP' ] other_api_trackers = [ From 1f89b05ff593ef972198e0effefff3da14811cb9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 14:43:24 +1000 Subject: [PATCH 359/741] Move dupe checking console to debug --- src/trackers/COMMON.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 4cc372dc7..564afc77f 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -376,8 +376,9 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return ptgen async def filter_dupes(self, dupes, meta): - console.log("[cyan]Pre-filtered dupes") - console.log(dupes) + if meta['debug']: + console.log("[cyan]Pre-filtered dupes") + console.log(dupes) new_dupes = [] types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} @@ -428,8 +429,6 @@ async def filter_dupes(self, dupes, meta): 'in': meta['type'] } ] - # console.log(f"Meta type: {normalized_meta_type}") - # console.log(f"Each type: {normalized_each_type}") # Check if the type of the dupe matches or is sufficiently similar dupe_type_matches = {t for t in types_to_check if t in normalized_each_type} @@ -439,13 +438,16 @@ async def filter_dupes(self, dupes, meta): console.log(f"[green]Allowing result we will catch later: {each}") # Allow based on matching resolution, HDR, and audio despite type mismatch elif meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each: - console.log(f"[green]Allowing result we will catch later: {each}") + if meta['debug']: + console.log(f"[green]Allowing result we will catch later: {each}") else: - console.log(f"[yellow]Excluding result due to type mismatch: {each}") + if meta['debug']: + console.log(f"[yellow]Excluding result due to type mismatch: {each}") continue else: if dupe_type_matches: - console.log(f"[red]Excluding extra result with new type match: {each}") + if meta['debug']: + console.log(f"[red]Excluding extra result with new type match: {each}") continue for s in search_combos: From bd77d9c31e8bfa0299863f04b989c5f830808403 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 15:10:39 +1000 Subject: [PATCH 360/741] PTP keep original image count when singles --- src/trackers/PTP.py | 75 +++++++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 19 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f108a9c89..b058ee773 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -616,12 +616,42 @@ async def edit_desc(self, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: + images = meta['image_list'] discs = meta.get('discs', []) - # For Discs - if len(discs) >= 1: + filelist = meta.get('filelist', []) + + # Handle single disc case + if len(discs) == 1: + each = discs[0] + new_screens = [] + if each['type'] == "BDMV": + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + elif each['type'] == "DVD": + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + # Handle multiple discs case + elif len(discs) > 1: for i, each in enumerate(discs): - each = discs[i] new_screens = [] if each['type'] == "BDMV": desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") @@ -634,9 +664,7 @@ async def edit_desc(self, meta): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") - mi_dump = each['summary'] else: - mi_dump = each['summary'] use_vs = meta.get('vapoursynth', False) ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) ds.start() @@ -645,7 +673,7 @@ async def edit_desc(self, meta): new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:int(meta['screens'])]: + for img in uploaded_images[:multi_screens]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") @@ -653,8 +681,7 @@ async def edit_desc(self, meta): elif each['type'] == "DVD": desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") - desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n") - desc.write("\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") if i == 0: base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": @@ -672,27 +699,37 @@ async def edit_desc(self, meta): new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if new_screens: uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:int(meta['screens'])]: + for img in uploaded_images[:multi_screens]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") - # For non-discs - elif len(meta.get('filelist', [])) >= 1: - for i in range(len(meta['filelist'])): - new_screens = [] - file = meta['filelist'][i] + + # Handle single file case + elif len(filelist) == 1: + file = filelist[0] + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: + desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + # Handle multiple files case + elif len(filelist) > 1: + for i in range(len(filelist)): + file = filelist[i] if i == 0: - # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - for each in range(min(multi_screens, len(meta['image_list']))): - raw_url = meta['image_list'][each]['raw_url'] + for img_index in range(min(multi_screens, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: - # Export Mediainfo mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) @@ -705,7 +742,7 @@ async def edit_desc(self, meta): new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:int(meta['screens'])]: + for img in uploaded_images[:multi_screens]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") From f0e30da8724cfcfb0c45b918e1c623b220df0134 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 22:37:14 +1000 Subject: [PATCH 361/741] Don't auto_search discs at UNIT3D sites is pointless until folder searching is added to their api --- src/prep.py | 69 +++++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/src/prep.py b/src/prep.py index fd964f4c2..6c25635b1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -562,40 +562,41 @@ async def gather_prep(self, meta, mode): if match: found_match = True - if "BLU" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - if match: - found_match = True - - if "AITHER" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": - aither = AITHER(config=self.config) - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) - if match: - found_match = True - - if "LST" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": - lst = LST(config=self.config) - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) - if match: - found_match = True - - if "OE" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": - oe = OE(config=self.config) - meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) - if match: - found_match = True - - if "TIK" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": - tik = TIK(config=self.config) - meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) - if match: - found_match = True + if not meta['is_disc']: + if "BLU" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + blu = BLU(config=self.config) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + if match: + found_match = True + + if "AITHER" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": + aither = AITHER(config=self.config) + meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) + if match: + found_match = True + + if "LST" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": + lst = LST(config=self.config) + meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) + if match: + found_match = True + + if "OE" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": + oe = OE(config=self.config) + meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) + if match: + found_match = True + + if "TIK" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": + tik = TIK(config=self.config) + meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) + if match: + found_match = True if "HDB" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": From 02ce1f317da62eec5bf921e46984b71cb2552ff8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 28 Oct 2024 22:50:03 +1000 Subject: [PATCH 362/741] Allow ID + images from search AND custom description Keep or discard searched description makes no difference. todo: All keeping existing searched description and adding custom --- src/prep.py | 106 ++++++++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6c25635b1..523d255bf 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3426,66 +3426,66 @@ def clean_filename(self, name): return name async def gen_desc(self, meta): - if not meta.get('skip_gen_desc', False): - def clean_text(text): - return text.replace('\r\n', '').replace('\n', '').strip() - - desclink = meta.get('desclink') - descfile = meta.get('descfile') + def clean_text(text): + return text.replace('\r\n', '').replace('\n', '').strip() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.seek(0) + desclink = meta.get('desclink') + descfile = meta.get('descfile') - if meta.get('desc_template'): - from jinja2 import Template - try: - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: - template = Template(f.read()) - template_desc = template.render(meta) - if clean_text(template_desc): - description.write(template_desc + "\n") - console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") - except FileNotFoundError: - console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") - - if meta.get('nfo'): - nfo_files = glob.glob("*.nfo") - if nfo_files: - nfo = nfo_files[0] - with open(nfo, 'r', encoding="utf-8") as nfo_file: - nfo_content = nfo_file.read() - description.write(f"[code]{nfo_content}[/code]\n") - meta['description'] = "CUSTOM" - console.print(f"[INFO] NFO file '{nfo}' used.") - - if desclink: - try: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) - split = os.path.split(parsed.path) - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") - raw_url = urllib.parse.urlunparse(raw) - desclink_content = requests.get(raw_url).text - description.write(desclink_content + "\n") - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from link '{desclink}' used.") - except Exception as e: - console.print(f"[ERROR] Failed to fetch description from link: {e}") + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.seek(0) - if descfile and os.path.isfile(descfile): - with open(descfile, 'r') as f: - file_content = f.read() - description.write(file_content) + if meta.get('desc_template'): + from jinja2 import Template + try: + with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + template = Template(f.read()) + template_desc = template.render(meta) + if clean_text(template_desc): + description.write(template_desc + "\n") + console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") + except FileNotFoundError: + console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + + if meta.get('nfo'): + nfo_files = glob.glob("*.nfo") + if nfo_files: + nfo = nfo_files[0] + with open(nfo, 'r', encoding="utf-8") as nfo_file: + nfo_content = nfo_file.read() + description.write(f"[code]{nfo_content}[/code]\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from file '{descfile}' used.") + console.print(f"[INFO] NFO file '{nfo}' used.") - if meta.get('desc'): - description.write(meta['desc'] + "\n") + if desclink: + try: + parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + split = os.path.split(parsed.path) + raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw_url = urllib.parse.urlunparse(raw) + desclink_content = requests.get(raw_url).text + description.write(desclink_content + "\n") meta['description'] = "CUSTOM" - console.print("[INFO] Custom description used.") + console.print(f"[INFO] Description from link '{desclink}' used.") + except Exception as e: + console.print(f"[ERROR] Failed to fetch description from link: {e}") + + if descfile and os.path.isfile(descfile): + with open(descfile, 'r') as f: + file_content = f.read() + description.write(file_content) + meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from file '{descfile}' used.") + + if meta.get('desc'): + description.write(meta['desc'] + "\n") + meta['description'] = "CUSTOM" + console.print("[INFO] Custom description used.") + + description.write("\n") + return meta - description.write("\n") - return meta - else: + if not meta.get('skip_gen_desc', False): description_text = meta.get('description') if meta.get('description') else "" with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text + "\n") From 0f598d7e26a0d776c54cf12860750e3b26729a48 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 21:26:08 +1000 Subject: [PATCH 363/741] UNIT3D - multi disc/file descriptions todo: Double check discs Fix DVD Make it configurable Fix PTP --- src/trackers/COMMON.py | 467 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 428 insertions(+), 39 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 564afc77f..0d2181205 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -5,6 +5,10 @@ import json import click import sys +import glob +from pymediainfo import MediaInfo +import multiprocessing +import asyncio from src.bbcode import BBCODE from src.console import console @@ -13,6 +17,7 @@ class COMMON(): def __init__(self, config): self.config = config + self.parser = self.MediaInfoParser() pass async def edit_torrent(self, meta, tracker, source_flag, torrent_filename="BASE"): @@ -35,60 +40,293 @@ async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, com Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: - if desc_header != "": + if desc_header: descfile.write(desc_header) bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") - descfile.write("\n") - if len(discs) >= 2: - for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") - descfile.write("\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") - descfile.write("\n") - elif each['type'] == "HDDVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") - descfile.write("\n") + discs = meta.get('discs', []) + filelist = meta.get('filelist', []) desc = base desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) - desc = desc.replace('[img]', '[img=300]') descfile.write(desc) - images = meta['image_list'] - if len(images) > 0: - try: - thumbsize = self.config['DEFAULT']['thumbnail_size'] - except Exception: - thumbsize = "350" - - try: - screenheader = self.config['DEFAULT']['screenshot_header'] - except Exception: - screenheader = None - if screenheader is not None: - descfile.write(screenheader + '\n') - + # Handle single disc case + if len(discs) == 1: + images = meta['image_list'] + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = images[img_index]['raw_url'] + descfile.write(f"[img={self.config['DEFAULT'].get('thumbnail_size', '350')}] {raw_url}[/img]\n") + + # Handle multiple discs case + elif len(discs) > 1: + # Initialize retry_count if not already set + if 'retry_count' not in meta: + meta['retry_count'] = 0 + + for i, each in enumerate(discs): + # Set a unique key per disc for managing images + new_images_key = f'new_images_disc_{i}' + + # Writing summary for each disc type + if each['type'] == "BDMV": + console.print("[yellow]Writing each summary") + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + + if i == 0: + # For the first disc, use images from `meta['image_list']` + console.print("[yellow]Using original images from meta['image_list'] for disc_0") + images = meta['image_list'] + descfile.write("[center]") + for img_index in range(min(2, len(images))): + raw_url = images[img_index]['raw_url'] + descfile.write(f"[img=300]{raw_url}[/img] ") + descfile.write("[/center]\n\n") + else: + # Check if screenshots exist for the current disc key + if new_images_key in meta and meta[new_images_key]: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + # Use existing URLs from meta to write to descfile + descfile.write("[center]") + for img in meta[new_images_key]: + raw_url = img['raw_url'] + descfile.write("[img=300]{raw_url}[/img] ") + descfile.write("[/center]\n\n") + else: + # Increment retry_count for tracking but use unique disc keys for each disc + meta['retry_count'] += 1 + meta[new_images_key] = [] + + # Check if new screenshots already exist before running prep.screenshots + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + console.print(f"[yellow]Checking new screens for {new_images_key}: {new_screens}") + if not new_screens: + console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") + # Run prep.screenshots if no screenshots are present + use_vs = meta.get('vapoursynth', False) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + # Re-check for new screenshots after screenshots process + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + if new_screens: + uploaded_images, _ = prep.upload_screens( + meta, + 2, 1, 0, 2, + new_screens, + {new_images_key: meta[new_images_key]} + ) + + # Append each uploaded image's data to `meta[new_images_key]` + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + + # Write new URLs to descfile + descfile.write("[center]") + for img in uploaded_images: + raw_url = img['raw_url'] + descfile.write(f"[img=300]{raw_url}[/img] ") + descfile.write("[/center]\n\n") + + # Save the updated meta to `meta.json` after upload + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + # Handle single file case + if len(filelist) == 1: + images = meta['image_list'] descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img={thumbsize}]{raw_url}[/img][/url] ") + for img_index in range(len(images[:int(meta['screens'])])): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}] {raw_url}[/img][/url] ") descfile.write("[/center]") - if signature is not None: + # Handle multiple files case + # Initialize character counter + char_count = 0 + max_char_limit = 100 # Character limit + other_files_spoiler_open = False # Track if "Other files" spoiler has been opened + + # Process each file + if len(filelist) > 1: + for i, file in enumerate(filelist): + # Check if character limit is reached + if char_count >= max_char_limit: + # Open the "Other files" spoiler if it's the first time we're exceeding the limit + if not other_files_spoiler_open and i >= 5: + descfile.write("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") + other_files_spoiler_open = True + + # Extract filename directly from the file path + filename = os.path.splitext(os.path.basename(file.strip()))[0] + + # Write filename in BBCode format + descfile.write(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") + + # Check and write screenshots if they exist + new_images_key = f'new_images_file_{i}' + if new_images_key in meta and meta[new_images_key]: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + + continue # Skip full MediaInfo and spoilers for remaining files + + # Standard processing for files until character limit is reached + new_images_key = f'new_images_file_{i}' + + if i < 5: + # Standard processing for the first five files + if i == 0: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + if mi_dump: + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + + match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) + filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + + descfile.write(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") + + images = meta['image_list'] + descfile.write("[center]") + char_count += len("[center]") + for img_index in range(min(2, len(images))): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + else: + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + + match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) + filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") + char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") + + if new_images_key in meta and meta[new_images_key]: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + else: + meta['retry_count'] = meta.get('retry_count', 0) + 1 + meta[new_images_key] = [] + + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if not new_screens: + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 3, True, None)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + descfile.write("[center]") + char_count += len("[center]") + for img in uploaded_images: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + elif i == 5 and not other_files_spoiler_open: + # Open "Other files" spoiler for the fifth file + descfile.write("[spoiler=Other files]\n") + char_count += len("[spoiler=Other files]\n") + other_files_spoiler_open = True + + if i >= 5 and char_count < max_char_limit: + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + + match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) + filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + + descfile.write(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") + char_count += len(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") + + if new_images_key in meta and meta[new_images_key]: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + else: + continue # Skip if character limit has been reached + + if other_files_spoiler_open: + descfile.write("[/spoiler][/center]\n") + char_count += len("[/spoiler][/center]\n") + + console.print(f"[green]Total characters written to description: {char_count}") + + # Append signature if provided + if signature: descfile.write(signature) descfile.close() return @@ -483,3 +721,154 @@ async def filter_dupes(self, dupes, meta): if allow and each not in new_dupes: new_dupes.append(each) return new_dupes + + class MediaInfoParser: + # Language to ISO country code mapping + LANGUAGE_CODE_MAP = { + "english": "us", + "german": "de", + "french": "fr", + "spanish": "es", + "italian": "it", + "portuguese": "pt", + "dutch": "nl", + "japanese": "jp", + "arabic": "ae", + "czech": "cz", + "danish": "dk", + "greek": "gr", + "finnish": "fi", + "hebrew": "il", + "hungarian": "hu", + "indonesian": "id", + "korean": "kr", + "norwegian bokmal": "no", + "polish": "pl", + "romanian": "ro", + "russian": "ru", + "swedish": "se", + "thai": "th", + "turkish": "tr", + "vietnamese": "vn", + "chinese": "cn", + # Add more mappings as needed + } + + def parse_mediainfo(self, mediainfo_text): + # Patterns for matching sections and fields + section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) + parsed_data = {"general": {}, "video": [], "audio": [], "text": []} + current_section = None + current_track = {} + + # Field lists based on PHP definitions + general_fields = {'file_name', 'format', 'duration', 'file_size', 'bit_rate'} + video_fields = { + 'format', 'format_version', 'codec', 'width', 'height', 'stream_size', + 'framerate_mode', 'frame_rate', 'aspect_ratio', 'bit_rate', 'bit_rate_mode', 'bit_rate_nominal', + 'bit_pixel_frame', 'bit_depth', 'language', 'format_profile', + 'color_primaries', 'title', 'scan_type', 'transfer_characteristics', 'hdr_format' + } + audio_fields = { + 'codec', 'format', 'bit_rate', 'channels', 'title', 'language', 'format_profile', 'stream_size' + } + text_fields = {'language'} + + # Split MediaInfo by lines and process each line + for line in mediainfo_text.splitlines(): + line = line.strip() + + # Detect a new section + section_match = section_pattern.match(line) + if section_match: + # Save the last track data if moving to a new section + if current_section and current_track: + if current_section in ["video", "audio", "text"]: + parsed_data[current_section].append(current_track) + else: + parsed_data[current_section] = current_track + current_track = {} + + # Update the current section + current_section = section_match.group(1).lower() + continue + + # Split each line on the first colon to separate property and value + if ":" in line: + property_name, property_value = map(str.strip, line.split(":", 1)) + property_name = property_name.lower().replace(" ", "_") + + # Add property if it's a recognized field for the current section + if current_section == "general" and property_name in general_fields: + current_track[property_name] = property_value + elif current_section == "video" and property_name in video_fields: + current_track[property_name] = property_value + elif current_section == "audio" and property_name in audio_fields: + current_track[property_name] = property_value + elif current_section == "text" and property_name in text_fields: + # Convert language to country code or fallback to the text if not in map + country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) + if country_code: + current_track[property_name] = f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]" + else: + current_track[property_name] = property_value # Fallback to text if no match + + # Append the last track to the parsed data + if current_section and current_track: + if current_section in ["video", "audio", "text"]: + parsed_data[current_section].append(current_track) + else: + parsed_data[current_section] = current_track + + return parsed_data + + def format_bbcode(self, parsed_mediainfo): + bbcode_output = "\n" + + # Format General Section + if "general" in parsed_mediainfo: + bbcode_output += "[b]General[/b]\n" + for prop, value in parsed_mediainfo["general"].items(): + bbcode_output += f"[b]{prop.replace('_', ' ').capitalize()}:[/b] {value}\n" + + # Format Video Section + if "video" in parsed_mediainfo: + bbcode_output += "\n[b]Video[/b]\n" + for track in parsed_mediainfo["video"]: + for prop, value in track.items(): + bbcode_output += f"[b]{prop.replace('_', ' ').capitalize()}:[/b] {value}\n" + + # Format Audio Section + if "audio" in parsed_mediainfo: + bbcode_output += "\n[b]Audio[/b]\n" + for index, track in enumerate(parsed_mediainfo["audio"], start=1): # Start enumeration at 1 + parts = [f"{index}."] # Start with track number without a trailing slash + + # Language flag image + language = track.get("language", "").lower() + country_code = self.LANGUAGE_CODE_MAP.get(language) + if country_code: + parts.append(f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]") + else: + parts.append(language.capitalize() if language else "") + + # Other properties to concatenate + properties = ["language", "codec", "format", "channels", "bit_rate", "format_profile", "stream_size"] + for prop in properties: + if prop in track and track[prop]: # Only add non-empty properties + parts.append(track[prop]) + + # Join parts (starting from index 1, after the track number) with slashes and add to bbcode_output + bbcode_output += f"{parts[0]} " + " / ".join(parts[1:]) + "\n" + + # Format Text Section - Centered with flags or text, spaced apart + if "text" in parsed_mediainfo: + bbcode_output += "\n[b]Subtitles[/b]\n" + subtitle_entries = [] + for track in parsed_mediainfo["text"]: + language_display = track.get("language", "") + subtitle_entries.append(language_display) + bbcode_output += " ".join(subtitle_entries) + + bbcode_output += "\n" + return bbcode_output From 5de9b1fd3dc85ac5072feb25faea9c66aac608d2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 21:49:56 +1000 Subject: [PATCH 364/741] Make multi description configurable --- data/example-config.py | 13 ++++++++++++- src/trackers/COMMON.py | 25 +++++++++++++------------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index e5690839e..ab5816de3 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -26,8 +26,19 @@ "screens": "6", # Number of screenshots to use for each disc/episode in packs - # Currently PTP only "multiScreens": "2", + + # Description character count cutoff for UNIT3D sites when season packs only + # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files + # still to be added to the description. You can set this small like 50, to only ever + # print filenames and screenshots for each file, no mediainfo will be printed. + # UNIT3D sites have a hard character limit for descriptions. A little over 17000 + # worked fine in a forum post at BLU. If you are at 1 < charLimit, the next full description will be added. + "charLimit": "16000", + + # How many files in a season pack will be added to the description before using an additional spoiler tag + # Any other files past this limit will be hidden/added all within a spoiler tag. + "fileLimit": "5", # Providing the option to change the size of the thumbnails where supported, default is 350 "thumbnail_size": "350", diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 0d2181205..5bbccda4b 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -43,6 +43,9 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() + multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + char_limit = int(self.config['DEFAULT'].get('charLimit', 16000)) + file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: if desc_header: descfile.write(desc_header) @@ -88,7 +91,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des console.print("[yellow]Using original images from meta['image_list'] for disc_0") images = meta['image_list'] descfile.write("[center]") - for img_index in range(min(2, len(images))): + for img_index in range(min(multi_screens, len(images))): raw_url = images[img_index]['raw_url'] descfile.write(f"[img=300]{raw_url}[/img] ") descfile.write("[/center]\n\n") @@ -114,7 +117,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") # Run prep.screenshots if no screenshots are present use_vs = meta.get('vapoursynth', False) - s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) s.start() while s.is_alive(): await asyncio.sleep(1) @@ -125,7 +128,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if new_screens: uploaded_images, _ = prep.upload_screens( meta, - 2, 1, 0, 2, + multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]} ) @@ -163,7 +166,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Handle multiple files case # Initialize character counter char_count = 0 - max_char_limit = 100 # Character limit + max_char_limit = char_limit # Character limit other_files_spoiler_open = False # Track if "Other files" spoiler has been opened # Process each file @@ -201,11 +204,9 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des continue # Skip full MediaInfo and spoilers for remaining files - # Standard processing for files until character limit is reached new_images_key = f'new_images_file_{i}' - if i < 5: - # Standard processing for the first five files + if i < file_limit: if i == 0: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() if mi_dump: @@ -221,7 +222,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des images = meta['image_list'] descfile.write("[center]") char_count += len("[center]") - for img_index in range(min(2, len(images))): + for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " @@ -258,14 +259,14 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 3, True, None)) + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) s.start() while s.is_alive(): await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) for img in uploaded_images: meta[new_images_key].append({ 'img_url': img['img_url'], @@ -287,13 +288,13 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des with open(meta_filename, 'w') as f: json.dump(meta, f, indent=4) - elif i == 5 and not other_files_spoiler_open: + elif i == file_limit and not other_files_spoiler_open: # Open "Other files" spoiler for the fifth file descfile.write("[spoiler=Other files]\n") char_count += len("[spoiler=Other files]\n") other_files_spoiler_open = True - if i >= 5 and char_count < max_char_limit: + if i >= file_limit and char_count < max_char_limit: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) From 137e04e92675d5bd3dbc5c60996d4696be0c6620 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 21:50:34 +1000 Subject: [PATCH 365/741] Auto docker for the brave --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 8ac8ce36f..4fe76190a 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -5,7 +5,7 @@ on: branches: - master - develop - - dupe-checking + - descriptions workflow_dispatch: env: From e15052a34e6667690b78589ad2497d660c6dad5e Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 21:52:33 +1000 Subject: [PATCH 366/741] clarify bbcode --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index ab5816de3..b9171bc36 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -28,7 +28,7 @@ # Number of screenshots to use for each disc/episode in packs "multiScreens": "2", - # Description character count cutoff for UNIT3D sites when season packs only + # Description character count (including bbcode) cutoff for UNIT3D sites when season packs only # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files # still to be added to the description. You can set this small like 50, to only ever # print filenames and screenshots for each file, no mediainfo will be printed. From 2a220665a7cc4d152bf76b8c7e623e0e05e6fbff Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 21:56:28 +1000 Subject: [PATCH 367/741] Dial default description count --- data/example-config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index b9171bc36..f90270009 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -27,15 +27,15 @@ # Number of screenshots to use for each disc/episode in packs "multiScreens": "2", - + # Description character count (including bbcode) cutoff for UNIT3D sites when season packs only # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files # still to be added to the description. You can set this small like 50, to only ever # print filenames and screenshots for each file, no mediainfo will be printed. # UNIT3D sites have a hard character limit for descriptions. A little over 17000 # worked fine in a forum post at BLU. If you are at 1 < charLimit, the next full description will be added. - "charLimit": "16000", - + "charLimit": "14000", + # How many files in a season pack will be added to the description before using an additional spoiler tag # Any other files past this limit will be hidden/added all within a spoiler tag. "fileLimit": "5", From 7f1872137e24c1d58ce9899564f1c42deb8cbdda Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 22:08:05 +1000 Subject: [PATCH 368/741] Cleanup on aisle 4 --- src/trackers/COMMON.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5bbccda4b..4d199318f 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -88,7 +88,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i == 0: # For the first disc, use images from `meta['image_list']` - console.print("[yellow]Using original images from meta['image_list'] for disc_0") + if meta['debug']: + console.print("[yellow]Using original uploaded images for first disc") images = meta['image_list'] descfile.write("[center]") for img_index in range(min(multi_screens, len(images))): @@ -98,7 +99,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des else: # Check if screenshots exist for the current disc key if new_images_key in meta and meta[new_images_key]: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") + if meta['debug']: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") # Use existing URLs from meta to write to descfile descfile.write("[center]") for img in meta[new_images_key]: @@ -112,9 +114,9 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Check if new screenshots already exist before running prep.screenshots new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - console.print(f"[yellow]Checking new screens for {new_images_key}: {new_screens}") if not new_screens: - console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") + if meta['debug']: + console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") # Run prep.screenshots if no screenshots are present use_vs = meta.get('vapoursynth', False) s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) @@ -190,7 +192,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Check and write screenshots if they exist new_images_key = f'new_images_file_{i}' if new_images_key in meta and meta[new_images_key]: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") + if meta['debug']: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") descfile.write("[center]") char_count += len("[center]") for img in meta[new_images_key]: @@ -212,9 +215,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if mi_dump: parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - - match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) - filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + filename = os.path.splitext(os.path.basename(file.strip()))[0] descfile.write(f"[center]{filename}\n[/center]\n") char_count += len(f"[center]{filename}\n[/center]\n") @@ -222,6 +223,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des images = meta['image_list'] descfile.write("[center]") char_count += len("[center]") + if meta['debug']: + console.print("[yellow]Using original uploaded images for first file") for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] @@ -235,14 +238,14 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) - filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + filename = os.path.splitext(os.path.basename(file.strip()))[0] descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") if new_images_key in meta and meta[new_images_key]: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") + if meta['debug']: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") descfile.write("[center]") char_count += len("[center]") for img in meta[new_images_key]: @@ -259,6 +262,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: + if meta['debug']: + console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) s.start() while s.is_alive(): @@ -299,8 +304,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - match = re.search(r"Complete name\s+:\s+(.+)", mi_dump) - filename = os.path.splitext(os.path.basename(match.group(1)).strip())[0] if match else "MediaInfo" + filename = os.path.splitext(os.path.basename(file.strip()))[0] descfile.write(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") char_count += len(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") @@ -324,7 +328,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write("[/spoiler][/center]\n") char_count += len("[/spoiler][/center]\n") - console.print(f"[green]Total characters written to description: {char_count}") + console.print(f"[yellow]Total characters written to description: {char_count}") # Append signature if provided if signature: From e2717dcd4133fcf7c73b63a71b33799310226d1e Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 22:48:35 +1000 Subject: [PATCH 369/741] Fix multi disc --- src/trackers/COMMON.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 4d199318f..5e2a629e4 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -77,21 +77,16 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Set a unique key per disc for managing images new_images_key = f'new_images_disc_{i}' - # Writing summary for each disc type - if each['type'] == "BDMV": - console.print("[yellow]Writing each summary") - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") - descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") - if i == 0: + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"{each.get('name', 'BDINFO')}\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") # For the first disc, use images from `meta['image_list']` if meta['debug']: console.print("[yellow]Using original uploaded images for first disc") images = meta['image_list'] - descfile.write("[center]") for img_index in range(min(multi_screens, len(images))): raw_url = images[img_index]['raw_url'] descfile.write(f"[img=300]{raw_url}[/img] ") @@ -101,17 +96,32 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if new_images_key in meta and meta[new_images_key]: if meta['debug']: console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") # Use existing URLs from meta to write to descfile descfile.write("[center]") for img in meta[new_images_key]: raw_url = img['raw_url'] - descfile.write("[img=300]{raw_url}[/img] ") + descfile.write(f"[img=300]{raw_url}[/img] ") descfile.write("[/center]\n\n") else: # Increment retry_count for tracking but use unique disc keys for each disc meta['retry_count'] += 1 meta[new_images_key] = [] - + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") # Check if new screenshots already exist before running prep.screenshots new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: From 7513eb8454d2e74bc255a24b734585dc94b380a2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 29 Oct 2024 23:13:57 +1000 Subject: [PATCH 370/741] Don't store multi image sizes --- src/prep.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 523d255bf..3dd4c2269 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2704,6 +2704,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] # Use the correctly updated image host from meta + using_custom_img_list = bool(custom_img_list) image_list = [] successfully_uploaded = set() # Track successfully uploaded images @@ -2916,7 +2917,9 @@ def exponential_backoff(retry_count, initial_timeout): successfully_uploaded.add(image) # Track the uploaded image # Store size in meta, indexed by the img_url - meta['image_sizes'][img_url] = image_size # Keep sizes separate in meta['image_sizes'] + # Storing image_sizes for any multi disc/files will probably break something, so lets not do that. + if not using_custom_img_list: + meta['image_sizes'][img_url] = image_size # Keep sizes separate in meta['image_sizes'] progress.advance(upload_task) i += 1 # Increment the image counter only after success From ff0badb5d11b8df4624a11e685675dd8b2227a12 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 00:33:13 +1000 Subject: [PATCH 371/741] DVD and other minor fixes --- src/trackers/COMMON.py | 67 +++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5e2a629e4..3a8008d1d 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -62,10 +62,18 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(desc) # Handle single disc case if len(discs) == 1: + each = discs[0] + if each['type'] == "DVD": + descfile.write("[center]") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]") images = meta['image_list'] + descfile.write("[center]") for img_index in range(len(images[:int(meta['screens'])])): + web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - descfile.write(f"[img={self.config['DEFAULT'].get('thumbnail_size', '350')}] {raw_url}[/img]\n") + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") + descfile.write("[/center]") # Handle multiple discs case elif len(discs) > 1: @@ -83,13 +91,17 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(f"{each.get('name', 'BDINFO')}\n\n") elif each['type'] == "DVD": descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") # For the first disc, use images from `meta['image_list']` if meta['debug']: console.print("[yellow]Using original uploaded images for first disc") images = meta['image_list'] for img_index in range(min(multi_screens, len(images))): + web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - descfile.write(f"[img=300]{raw_url}[/img] ") + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) descfile.write("[/center]\n\n") else: # Check if screenshots exist for the current disc key @@ -107,8 +119,10 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Use existing URLs from meta to write to descfile descfile.write("[center]") for img in meta[new_images_key]: + web_url = img['web_url'] raw_url = img['raw_url'] - descfile.write(f"[img=300]{raw_url}[/img] ") + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) descfile.write("[/center]\n\n") else: # Increment retry_count for tracking but use unique disc keys for each disc @@ -123,27 +137,30 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") descfile.write("[/center]\n\n") # Check if new screenshots already exist before running prep.screenshots - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if each['type'] == "BDMV": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + elif each['type'] == "DVD": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if not new_screens: if meta['debug']: console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") # Run prep.screenshots if no screenshots are present - use_vs = meta.get('vapoursynth', False) - s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) - s.start() - while s.is_alive(): - await asyncio.sleep(1) - - # Re-check for new screenshots after screenshots process - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if each['type'] == "BDMV": + use_vs = meta.get('vapoursynth', False) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + elif each['type'] == "DVD": + s = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) + s.start() + while s.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if new_screens: - uploaded_images, _ = prep.upload_screens( - meta, - multi_screens, 1, 0, 2, - new_screens, - {new_images_key: meta[new_images_key]} - ) + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) # Append each uploaded image's data to `meta[new_images_key]` for img in uploaded_images: @@ -156,14 +173,16 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Write new URLs to descfile descfile.write("[center]") for img in uploaded_images: + web_url = img['web_url'] raw_url = img['raw_url'] - descfile.write(f"[img=300]{raw_url}[/img] ") + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + descfile.write(image_str) descfile.write("[/center]\n\n") - # Save the updated meta to `meta.json` after upload - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) + # Save the updated meta to `meta.json` after upload + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) # Handle single file case if len(filelist) == 1: @@ -172,7 +191,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}] {raw_url}[/img][/url] ") + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") descfile.write("[/center]") # Handle multiple files case From 6a27d0ec323153b1d43f4899f5b361d872facbec Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 00:55:46 +1000 Subject: [PATCH 372/741] More languages --- src/trackers/COMMON.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 3a8008d1d..46c7c5aa3 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -759,32 +759,42 @@ async def filter_dupes(self, dupes, meta): class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { - "english": "us", - "german": "de", - "french": "fr", - "spanish": "es", - "italian": "it", - "portuguese": "pt", - "dutch": "nl", - "japanese": "jp", "arabic": "ae", + "bulgarian": "bg", + "chinese": "cn", + "croatian": "hr", "czech": "cz", "danish": "dk", - "greek": "gr", + "dutch": "nl", + "english": "us", + "estonian": "ee", "finnish": "fi", + "french": "fr", + "german": "de", + "greek": "gr", "hebrew": "il", "hungarian": "hu", + "icelandic": "is", "indonesian": "id", + "italian": "it", + "japanese": "jp", "korean": "kr", + "latvian": "lv", + "lithuanian": "lt", "norwegian bokmal": "no", "polish": "pl", + "portuguese": "pt", "romanian": "ro", "russian": "ru", + "serbian": "rs", + "slovak": "sk", + "slovenian": "si", + "spanish": "es", "swedish": "se", "thai": "th", "turkish": "tr", - "vietnamese": "vn", - "chinese": "cn", + "ukrainian": "ua", + "vietnamese": "vn" # Add more mappings as needed } From 8f922026e2c247a1bf0814d269087f174e81de42 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 01:29:45 +1000 Subject: [PATCH 373/741] Site specific image urls --- src/trackers/COMMON.py | 35 ++++++++++++++++++++++++++++++----- src/trackers/OE.py | 2 +- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 46c7c5aa3..0215f9e66 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -242,7 +242,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i == 0: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() if mi_dump: - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -264,7 +264,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des char_count += len("[/center]\n\n") else: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -330,7 +330,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i >= file_limit and char_count < max_char_limit: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -798,12 +798,37 @@ class MediaInfoParser: # Add more mappings as needed } - def parse_mediainfo(self, mediainfo_text): + TRACKER_MAP = { + "BLU": "blutopia.cc", + "AITHER": "aither.cc", + "AL": "animelovers.club", + "CBR": "capybarabr.com", + "FNP": "fearnopeer.com", + "HP": "hidden-palace.net", + "HUNO": "hawke.uno", + "JPTV": "jptv.club", + "LCD": "locadora.cc", + "LST": "lst.gg", + "LT": "lat-team.com", + "OTW": "oldtoons.world", + "PSS": "privatesilverscreen.cc", + "R4E": "racing4everyone.eu", + "RF": "reelflix.xyz", + "SHRI": "shareisland.org", + "STC": "skipthecommericals", + "STT": "skipthetrailers.xyz", + "ULCX": "upload.cx", + "UTP": "utp.to" + # Add more mappings as needed + } + + def parse_mediainfo(self, mediainfo_text, tracker): # Patterns for matching sections and fields section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) parsed_data = {"general": {}, "video": [], "audio": [], "text": []} current_section = None current_track = {} + mapped_tracker = self.TRACKER_MAP.get(tracker, tracker) # Field lists based on PHP definitions general_fields = {'file_name', 'format', 'duration', 'file_size', 'bit_rate'} @@ -853,7 +878,7 @@ def parse_mediainfo(self, mediainfo_text): # Convert language to country code or fallback to the text if not in map country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) if country_code: - current_track[property_name] = f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]" + current_track[property_name] = f"[img=20]https://{mapped_tracker}/img/flags/{country_code}.png[/img]" else: current_track[property_name] = property_value # Fallback to text if no match diff --git a/src/trackers/OE.py b/src/trackers/OE.py index cc485db0d..7aec84b24 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -98,7 +98,7 @@ async def upload(self, meta, disctype): if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - if region_id != 0 and region_id != 243: + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id From a3e128bce694f49b91af9b3ad083eb093feb2be2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 01:56:54 +1000 Subject: [PATCH 374/741] Revert "Site specific image urls" This reverts commit 8f922026e2c247a1bf0814d269087f174e81de42. --- src/trackers/COMMON.py | 35 +++++------------------------------ src/trackers/OE.py | 2 +- 2 files changed, 6 insertions(+), 31 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 0215f9e66..46c7c5aa3 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -242,7 +242,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i == 0: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() if mi_dump: - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -264,7 +264,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des char_count += len("[/center]\n\n") else: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -330,7 +330,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i >= file_limit and char_count < max_char_limit: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump, tracker) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -798,37 +798,12 @@ class MediaInfoParser: # Add more mappings as needed } - TRACKER_MAP = { - "BLU": "blutopia.cc", - "AITHER": "aither.cc", - "AL": "animelovers.club", - "CBR": "capybarabr.com", - "FNP": "fearnopeer.com", - "HP": "hidden-palace.net", - "HUNO": "hawke.uno", - "JPTV": "jptv.club", - "LCD": "locadora.cc", - "LST": "lst.gg", - "LT": "lat-team.com", - "OTW": "oldtoons.world", - "PSS": "privatesilverscreen.cc", - "R4E": "racing4everyone.eu", - "RF": "reelflix.xyz", - "SHRI": "shareisland.org", - "STC": "skipthecommericals", - "STT": "skipthetrailers.xyz", - "ULCX": "upload.cx", - "UTP": "utp.to" - # Add more mappings as needed - } - - def parse_mediainfo(self, mediainfo_text, tracker): + def parse_mediainfo(self, mediainfo_text): # Patterns for matching sections and fields section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) parsed_data = {"general": {}, "video": [], "audio": [], "text": []} current_section = None current_track = {} - mapped_tracker = self.TRACKER_MAP.get(tracker, tracker) # Field lists based on PHP definitions general_fields = {'file_name', 'format', 'duration', 'file_size', 'bit_rate'} @@ -878,7 +853,7 @@ def parse_mediainfo(self, mediainfo_text, tracker): # Convert language to country code or fallback to the text if not in map country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) if country_code: - current_track[property_name] = f"[img=20]https://{mapped_tracker}/img/flags/{country_code}.png[/img]" + current_track[property_name] = f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]" else: current_track[property_name] = property_value # Fallback to text if no match diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 7aec84b24..cc485db0d 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -98,7 +98,7 @@ async def upload(self, meta, disctype): if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - if region_id != 0: + if region_id != 0 and region_id != 243: data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id From 6ea67f35dfc9512950759d40eaefd300602c537e Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 02:27:38 +1000 Subject: [PATCH 375/741] Universal image urls --- src/trackers/COMMON.py | 76 +++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 46c7c5aa3..4346e0558 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -759,42 +759,42 @@ async def filter_dupes(self, dupes, meta): class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { - "arabic": "ae", - "bulgarian": "bg", - "chinese": "cn", - "croatian": "hr", - "czech": "cz", - "danish": "dk", - "dutch": "nl", - "english": "us", - "estonian": "ee", - "finnish": "fi", - "french": "fr", - "german": "de", - "greek": "gr", - "hebrew": "il", - "hungarian": "hu", - "icelandic": "is", - "indonesian": "id", - "italian": "it", - "japanese": "jp", - "korean": "kr", - "latvian": "lv", - "lithuanian": "lt", - "norwegian bokmal": "no", - "polish": "pl", - "portuguese": "pt", - "romanian": "ro", - "russian": "ru", - "serbian": "rs", - "slovak": "sk", - "slovenian": "si", - "spanish": "es", - "swedish": "se", - "thai": "th", - "turkish": "tr", - "ukrainian": "ua", - "vietnamese": "vn" + "arabic": "https://ptpimg.me/5g8i9u.png", + "bulgarian": "https://ptpimg.me/un9dc6.png", + "chinese": "https://ptpimg.me/ea3yv3.png", + "croatian": "https://ptpimg.me/rxi533.png", + "czech": "https://ptpimg.me/5m75n3.png", + "danish": "https://ptpimg.me/m35c41.png", + "dutch": "https://ptpimg.me/6nmwpx.png", + "english": "https://ptpimg.me/ine2fd.png", + "estonian": "https://ptpimg.me/z25pmk.png", + "finnish": "https://ptpimg.me/p4354c.png", + "french": "https://ptpimg.me/m7mfoi.png", + "german": "https://ptpimg.me/dw8d04.png", + "greek": "https://ptpimg.me/px1u3e.png", + "hebrew": "https://ptpimg.me/5jw1jp.png", + "hungarian": "https://ptpimg.me/fr4aj7.png", + "icelandic": "https://ptpimg.me/40o553.png", + "indonesian": "https://ptpimg.me/f00c8u.png", + "italian": "https://ptpimg.me/ao762a.png", + "japanese": "https://ptpimg.me/o1amm3.png", + "korean": "https://ptpimg.me/2tvwgn.png", + "latvian": "https://ptpimg.me/3x2y1b.png", + "lithuanian": "https://ptpimg.me/b444z8.png", + "norwegian bokmal": "https://ptpimg.me/1t11u4.png", + "polish": "https://ptpimg.me/m73uwa.png", + "portuguese": "https://ptpimg.me/5j1a7q.png", + "romanian": "https://ptpimg.me/ux94x0.png", + "russian": "https://ptpimg.me/v33j64.png", + "serbian": "https://ptpimg.me/2139p2.png", + "slovak": "https://ptpimg.me/70994n.png", + "slovenian": "https://ptpimg.me/61yp81.png", + "spanish": "https://ptpimg.me/xj51b9.png", + "swedish": "https://ptpimg.me/082090.png", + "thai": "https://ptpimg.me/38ru43.png", + "turkish": "https://ptpimg.me/g4jg39.png", + "ukrainian": "https://ptpimg.me/d8fp6k.png", + "vietnamese": "https://ptpimg.me/qnuya2.png" # Add more mappings as needed } @@ -853,7 +853,7 @@ def parse_mediainfo(self, mediainfo_text): # Convert language to country code or fallback to the text if not in map country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) if country_code: - current_track[property_name] = f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]" + current_track[property_name] = f"[img=20]{country_code}[/img]" else: current_track[property_name] = property_value # Fallback to text if no match @@ -892,7 +892,7 @@ def format_bbcode(self, parsed_mediainfo): language = track.get("language", "").lower() country_code = self.LANGUAGE_CODE_MAP.get(language) if country_code: - parts.append(f"[img=20]https://blutopia.cc/img/flags/{country_code}.png[/img]") + parts.append(f"[img=20]{country_code}[/img]") else: parts.append(language.capitalize() if language else "") From aa02fcf3fdc1653e6d23f593a71c391494ba078e Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 02:50:49 +1000 Subject: [PATCH 376/741] Minor site fixes --- src/trackers/OE.py | 2 +- src/trackers/RF.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index cc485db0d..7aec84b24 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -98,7 +98,7 @@ async def upload(self, meta, disctype): if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - if region_id != 0 and region_id != 243: + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 463ea9813..a502c1580 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -31,6 +31,9 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -92,8 +95,6 @@ async def upload(self, meta, disctype): params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: From ef210b1605f11d67c6048987160adcc3176b9904 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 03:11:23 +1000 Subject: [PATCH 377/741] Prepare ANT 250 KiB torrent file size --- src/prep.py | 18 +++++++++--------- src/trackers/ANT.py | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/prep.py b/src/prep.py index 523d255bf..d8cc59eeb 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2552,16 +2552,16 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): # print(f"Initial num_pieces: {num_pieces}, Initial torrent_file_size: {torrent_file_size} bytes") # Adjust the piece size to fit within the constraints - while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 102400) and torrent_file_size <= 102400): + while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 250000) and torrent_file_size <= 250000): # iteration += 1 # print(f"\nIteration {iteration}:") # print(f"Current piece_size: {piece_size} bytes") # print(f"Current num_pieces: {num_pieces}, Current torrent_file_size: {torrent_file_size} bytes") - if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 100000: + if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 250000: break - elif num_pieces < 1500 and torrent_file_size >= 102400: + elif num_pieces < 1500 and torrent_file_size >= 250000: piece_size *= 2 - # print(f"Doubled piece_size to {piece_size} bytes (num_pieces < 1500 and torrent_file_size >= 100 KiB)") + # print(f"Doubled piece_size to {piece_size} bytes (num_pieces < 1500 and torrent_file_size >= 250 KiB)") if piece_size > our_max_size: piece_size = our_max_size # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") @@ -2573,8 +2573,8 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): piece_size = our_min_size # print(f"piece_size went below min_size, set to our_min_size: {our_min_size} bytes") break - elif 40960 < torrent_file_size < 102400: - # print(f"torrent_file_size is between 40 KiB and 100 KiB, exiting loop.") + elif 40960 < torrent_file_size < 250000: + # print(f"torrent_file_size is between 40 KiB and 250 KiB, exiting loop.") break elif num_pieces > 2200: piece_size *= 2 @@ -2586,13 +2586,13 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): elif torrent_file_size < 2048: # print(f"torrent_file_size is less than 2 KiB, exiting loop.") break - elif torrent_file_size > 102400: + elif torrent_file_size > 250000: piece_size *= 2 - # print(f"Doubled piece_size to {piece_size} bytes (torrent_file_size > 100 KiB)") + # print(f"Doubled piece_size to {piece_size} bytes (torrent_file_size > 250 KiB)") if piece_size > our_max_size: piece_size = our_max_size # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") - cli_ui.warning('WARNING: .torrent size will exceed 100 KiB!') + cli_ui.warning('WARNING: .torrent size will exceed 250 KiB!') break # Update num_pieces diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index d3aa73964..eb0addc76 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -67,8 +67,8 @@ async def upload(self, meta, disctype): torrent_file_size_kib = os.path.getsize(torrent_path) / 1024 # Trigger regeneration automatically if size constraints aren't met - if torrent_file_size_kib > 100: # 100 KiB - console.print("[yellow]Existing .torrent exceeds 100 KiB and will be regenerated to fit constraints.") + if torrent_file_size_kib > 250: # 250 KiB + console.print("[yellow]Existing .torrent exceeds 250 KiB and will be regenerated to fit constraints.") from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) From ca183ef705d9af1ea2c6802d4775c53060ea8352 Mon Sep 17 00:00:00 2001 From: Khakis Date: Tue, 29 Oct 2024 12:35:40 -0500 Subject: [PATCH 378/741] Update COMMON.py For Screenshot Formating The use of the Space at the end of each screenshot would cause for formatting issues when going to new lines as there would be a trailing space with bbcode, please reach out if screens are needed. --- src/trackers/COMMON.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 564afc77f..e5af7cf4c 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -85,7 +85,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img={thumbsize}]{raw_url}[/img][/url] ") + descfile.write(f"[url={web_url}][img={thumbsize}]{raw_url}[/img][/url]") descfile.write("[/center]") if signature is not None: From 54cb771b8b454bcff59f27e128aaa991d7c30b69 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 09:13:14 +1000 Subject: [PATCH 379/741] discord --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 396951237..420004264 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ [![Create and publish a Docker image](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml/badge.svg?branch=master)](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml) +Discord support https://discord.gg/QHHAZu7e2A + # L4G's Upload Assistant A simple tool to take the work out of uploading. From aea424cce73fcd934c4b9a94f80a2daf36b98114 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 09:40:55 +1000 Subject: [PATCH 380/741] Remove space from BHD images also Can cause alignment issues in some circumstances --- src/trackers/BHD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index a3373f34b..0c34476b2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -209,7 +209,7 @@ async def edit_desc(self, meta): for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") desc.write("[/center]") desc.write(self.signature) desc.close() From 901a0aa798fb61a58352dd21de2065cdcbfcb5a3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 10:32:46 +1000 Subject: [PATCH 381/741] Remove spaces from images --- src/trackers/COMMON.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 4346e0558..d107539dd 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -100,7 +100,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") else: @@ -121,7 +121,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") else: @@ -175,7 +175,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in uploaded_images: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") @@ -191,7 +191,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url]") descfile.write("[/center]") # Handle multiple files case @@ -228,7 +228,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -257,7 +257,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -280,7 +280,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -312,7 +312,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in uploaded_images: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -345,7 +345,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url] " + image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") From ad9c7f6ad1c360811b3c086e8dd7c8acdc99c360 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 12:40:37 +1000 Subject: [PATCH 382/741] PTP - discs double check dvd --- src/trackers/PTP.py | 100 +++++++++++++++++++++++++++++++++----------- 1 file changed, 76 insertions(+), 24 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index b058ee773..f3f7774fd 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -651,11 +651,13 @@ async def edit_desc(self, meta): # Handle multiple discs case elif len(discs) > 1: + if 'retry_count' not in meta: + meta['retry_count'] = 0 for i, each in enumerate(discs): - new_screens = [] + new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": - desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") if i == 0: + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) @@ -665,24 +667,48 @@ async def edit_desc(self, meta): desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: - use_vs = meta.get('vapoursynth', False) - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:multi_screens]: + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + desc.write("\n") + else: + meta['retry_count'] += 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if not new_screens: + use_vs = meta.get('vapoursynth', False) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) elif each['type'] == "DVD": - desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") - desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") - desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") if i == 0: + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) @@ -692,17 +718,43 @@ async def edit_desc(self, meta): desc.write(f"[img]{raw_url}[/img]\n") desc.write("\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:multi_screens]: + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + desc.write("\n") + else: + meta['retry_count'] += 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if not new_screens: + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) # Handle single file case elif len(filelist) == 1: From 027fd0a402f531cb24e97cf32229d60192bc9928 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 12:51:19 +1000 Subject: [PATCH 383/741] Don't duplicate information with first disc --- src/trackers/PTP.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f3f7774fd..1ecafb3b5 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -657,11 +657,6 @@ async def edit_desc(self, meta): new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": if i == 0: - desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") - base2ptp = self.convert_bbcode(base) - if base2ptp.strip() != "": - desc.write(base2ptp) - desc.write("\n\n") for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -706,13 +701,6 @@ async def edit_desc(self, meta): elif each['type'] == "DVD": if i == 0: - desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") - desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") - desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") - base2ptp = self.convert_bbcode(base) - if base2ptp.strip() != "": - desc.write(base2ptp) - desc.write("\n\n") for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") From 3d3c3ae68086e425a1ba330fee1d487468722a00 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 13:04:12 +1000 Subject: [PATCH 384/741] Revert "Don't duplicate information with first disc" This reverts commit 027fd0a402f531cb24e97cf32229d60192bc9928. --- src/trackers/PTP.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 1ecafb3b5..f3f7774fd 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -657,6 +657,11 @@ async def edit_desc(self, meta): new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": if i == 0: + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -701,6 +706,13 @@ async def edit_desc(self, meta): elif each['type'] == "DVD": if i == 0: + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") for img_index in range(min(multi_screens, len(meta['image_list']))): raw_url = meta['image_list'][img_index]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") From 1f9bffe5e178d49939f51e92979c5b484d959516 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 13:38:33 +1000 Subject: [PATCH 385/741] PTP multi files --- src/trackers/PTP.py | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f3f7774fd..34c14bc03 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -787,17 +787,37 @@ async def edit_desc(self, meta): f.write(mi_dump) mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) - s.start() - while s.is_alive() is True: - await asyncio.sleep(3) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {}) - for img in uploaded_images[:multi_screens]: + new_images_key = f'new_images_file_{i}' + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: raw_url = img['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + desc.write("\n") + else: + meta['retry_count'] = meta.get('retry_count', 0) + 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if not new_screens: + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) + s.start() + while s.is_alive() is True: + await asyncio.sleep(3) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) async def get_AntiCsrfToken(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): From 8a479036fbf5df800b278794ba3abc1a6fd031b0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 18:32:53 +1000 Subject: [PATCH 386/741] qbittorrent hash searching feedback --- src/clients.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/clients.py b/src/clients.py index f4c2d45eb..f4646ae5c 100644 --- a/src/clients.py +++ b/src/clients.py @@ -77,10 +77,12 @@ async def find_existing_torrent(self, meta): torrenthash = None if torrent_storage_dir is not None and os.path.exists(torrent_storage_dir): if meta.get('torrenthash', None) is not None: + console.print("torrenthash:", torrenthash) valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['torrenthash'] elif meta.get('ext_torrenthash', None) is not None: + console.print("ext_torrenthash:", meta.get('ext_torrenthash')) valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['ext_torrenthash'] @@ -89,6 +91,7 @@ async def find_existing_torrent(self, meta): if not torrenthash: console.print("[bold yellow]No Valid .torrent found") if not torrenthash: + console.print("No torrenthash in find_existing") return None torrent_path = f"{torrent_storage_dir}/{torrenthash}.torrent" valid2, torrent_path = await self.is_valid_torrent(meta, torrent_path, torrenthash, torrent_client, print_err=False) @@ -98,6 +101,7 @@ async def find_existing_torrent(self, meta): return None async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): + console.print("We've moved into torrent validation") valid = False wrong_file = False @@ -209,6 +213,7 @@ async def search_qbit_for_torrent(self, meta, client): try: qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) qbt_client.auth_log_in() + console.print("We logged into qbittorrent") except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return None @@ -230,6 +235,7 @@ async def search_qbit_for_torrent(self, meta, client): for torrent in torrents: try: torrent_path = torrent.get('content_path', f"{torrent.save_path}{torrent.name}") + console.print("trying torrent_path", torrent_path) except AttributeError: if meta['debug']: console.print(torrent) @@ -238,14 +244,17 @@ async def search_qbit_for_torrent(self, meta, client): if remote_path_map: torrent_path = torrent_path.replace(remote_path, local_path) torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) + console.print("torrent path after remote mapping", torrent_path) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): + console.print("we've found an is_disc torrent path, now validating") valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) if valid: console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") return torrent.hash elif meta['path'] == torrent_path: + console.print("Now validating a path torrent path") valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) if valid: console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") From 6a55bfc5e16730219333a70cf3bcfddf6115f3c9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 19:01:22 +1000 Subject: [PATCH 387/741] Check for and remove duplicate paths in remote mapping --- src/clients.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/clients.py b/src/clients.py index f4646ae5c..1d87e35be 100644 --- a/src/clients.py +++ b/src/clients.py @@ -242,9 +242,18 @@ async def search_qbit_for_torrent(self, meta, client): console.print_exception() continue if remote_path_map: + # Replace remote path with local path torrent_path = torrent_path.replace(remote_path, local_path) + console.print("replaced paths:", torrent_path) + + # Check if the local path was accidentally duplicated and correct it + if torrent_path.startswith(f"{local_path}/{local_path.split('/')[-1]}"): + torrent_path = torrent_path.replace(f"{local_path}/{local_path.split('/')[-1]}", local_path) + console.print("replaced torrent path after duplicate removal:", torrent_path) + + # Standardize path separators for the local OS torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) - console.print("torrent path after remote mapping", torrent_path) + console.print("Final torrent path after remote mapping:", torrent_path) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): From 198060b21a442b159764dd9288a4849fa117d14b Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 19:43:59 +1000 Subject: [PATCH 388/741] Only replace remote paths is not already mapped --- src/clients.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/clients.py b/src/clients.py index 1d87e35be..b8e1ec356 100644 --- a/src/clients.py +++ b/src/clients.py @@ -242,18 +242,19 @@ async def search_qbit_for_torrent(self, meta, client): console.print_exception() continue if remote_path_map: - # Replace remote path with local path - torrent_path = torrent_path.replace(remote_path, local_path) - console.print("replaced paths:", torrent_path) + # Replace remote path with local path only if not already mapped + if not torrent_path.startswith(local_path): + torrent_path = torrent_path.replace(remote_path, local_path) + console.print("Replaced paths round 2:", torrent_path) # Check if the local path was accidentally duplicated and correct it if torrent_path.startswith(f"{local_path}/{local_path.split('/')[-1]}"): torrent_path = torrent_path.replace(f"{local_path}/{local_path.split('/')[-1]}", local_path) - console.print("replaced torrent path after duplicate removal:", torrent_path) + console.print("Corrected duplicate in torrent path round 2:", torrent_path) # Standardize path separators for the local OS torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) - console.print("Final torrent path after remote mapping:", torrent_path) + console.print("Final torrent path after remote mapping round 2:", torrent_path) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): From 5607ac33d0d559ea0cd96f9ad6921afeb58c9a74 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 19:46:11 +1000 Subject: [PATCH 389/741] More (all?) the required language codes --- src/trackers/COMMON.py | 61 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index d107539dd..d4b30c43d 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -759,43 +759,91 @@ async def filter_dupes(self, dupes, meta): class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { + "afrikaans": "https://ptpimg.me/i9pt6k.png", + "albanian": "https://ptpimg.me/sfhik8.png", + "amharic": "https://ptpimg.me/zm816y.png", "arabic": "https://ptpimg.me/5g8i9u.png", + "armenian": "https://ptpimg.me/zm816y.png", + "azerbaijani": "https://ptpimg.me/h3rbe0.png", + "basque": "https://ptpimg.me/xj51b9.png", + "belarusian": "https://ptpimg.me/iushg1.png", + "bengali": "https://ptpimg.me/jq996n.png", + "bosnian": "https://ptpimg.me/19t9rv.png", "bulgarian": "https://ptpimg.me/un9dc6.png", + "catalan": "https://ptpimg.me/v4h5bf.png", "chinese": "https://ptpimg.me/ea3yv3.png", "croatian": "https://ptpimg.me/rxi533.png", "czech": "https://ptpimg.me/5m75n3.png", "danish": "https://ptpimg.me/m35c41.png", "dutch": "https://ptpimg.me/6nmwpx.png", + "dzongkha": "https://ptpimg.me/56e7y5.png", "english": "https://ptpimg.me/ine2fd.png", "estonian": "https://ptpimg.me/z25pmk.png", + "filipino": "https://ptpimg.me/9d3z9w.png", "finnish": "https://ptpimg.me/p4354c.png", "french": "https://ptpimg.me/m7mfoi.png", + "galician": "https://ptpimg.me/xj51b9.png", + "georgian": "https://ptpimg.me/pp412q.png", "german": "https://ptpimg.me/dw8d04.png", "greek": "https://ptpimg.me/px1u3e.png", + "gujarati": "https://ptpimg.me/d0l479.png", + "haitian creole": "https://ptpimg.me/f64wlp.png", "hebrew": "https://ptpimg.me/5jw1jp.png", + "hindi": "https://ptpimg.me/d0l479.png", "hungarian": "https://ptpimg.me/fr4aj7.png", "icelandic": "https://ptpimg.me/40o553.png", "indonesian": "https://ptpimg.me/f00c8u.png", + "irish": "https://ptpimg.me/71x9mk.png", "italian": "https://ptpimg.me/ao762a.png", "japanese": "https://ptpimg.me/o1amm3.png", + "kannada": "https://ptpimg.me/d0l479.png", + "kazakh": "https://ptpimg.me/tq1h8b.png", + "khmer": "https://ptpimg.me/0p1tli.png", "korean": "https://ptpimg.me/2tvwgn.png", + "kurdish": "https://ptpimg.me/g290wo.png", + "kyrgyz": "https://ptpimg.me/336unh.png", + "lao": "https://ptpimg.me/n3nan1.png", "latvian": "https://ptpimg.me/3x2y1b.png", "lithuanian": "https://ptpimg.me/b444z8.png", + "luxembourgish": "https://ptpimg.me/52x189.png", + "macedonian": "https://ptpimg.me/2g5lva.png", + "malagasy": "https://ptpimg.me/n5120r.png", + "malay": "https://ptpimg.me/06suor.png", + "malayalam": "https://ptpimg.me/d0l479.png", + "maltese": "https://ptpimg.me/ua46c2.png", + "maori": "https://ptpimg.me/2fw03g.png", + "marathi": "https://ptpimg.me/d0l479.png", + "mongolian": "https://ptpimg.me/z2h682.png", + "nepali": "https://ptpimg.me/5yd3sp.png", "norwegian bokmal": "https://ptpimg.me/1t11u4.png", + "pashto": "https://ptpimg.me/i9pt6k.png", + "persian": "https://ptpimg.me/i0y103.png", "polish": "https://ptpimg.me/m73uwa.png", "portuguese": "https://ptpimg.me/5j1a7q.png", + "punjabi": "https://ptpimg.me/d0l479.png", "romanian": "https://ptpimg.me/ux94x0.png", "russian": "https://ptpimg.me/v33j64.png", + "samoan": "https://ptpimg.me/8nt3zq.png", "serbian": "https://ptpimg.me/2139p2.png", "slovak": "https://ptpimg.me/70994n.png", "slovenian": "https://ptpimg.me/61yp81.png", + "somali": "https://ptpimg.me/320pa6.png", "spanish": "https://ptpimg.me/xj51b9.png", + "swahili": "https://ptpimg.me/d0l479.png", "swedish": "https://ptpimg.me/082090.png", + "tamil": "https://ptpimg.me/d0l479.png", + "telugu": "https://ptpimg.me/d0l479.png", "thai": "https://ptpimg.me/38ru43.png", "turkish": "https://ptpimg.me/g4jg39.png", "ukrainian": "https://ptpimg.me/d8fp6k.png", - "vietnamese": "https://ptpimg.me/qnuya2.png" - # Add more mappings as needed + "urdu": "https://ptpimg.me/z23gg5.png", + "uzbek": "https://ptpimg.me/89854s.png", + "vietnamese": "https://ptpimg.me/qnuya2.png", + "welsh": "https://ptpimg.me/a9w539.png", + "xhosa": "https://ptpimg.me/7teg09.png", + "yiddish": "https://ptpimg.me/5jw1jp.png", + "yoruba": "https://ptpimg.me/9l34il.png", + "zulu": "https://ptpimg.me/7teg09.png" } def parse_mediainfo(self, mediainfo_text): @@ -816,7 +864,7 @@ def parse_mediainfo(self, mediainfo_text): audio_fields = { 'codec', 'format', 'bit_rate', 'channels', 'title', 'language', 'format_profile', 'stream_size' } - text_fields = {'language'} + text_fields = {'title', 'language'} # Split MediaInfo by lines and process each line for line in mediainfo_text.splitlines(): @@ -853,9 +901,12 @@ def parse_mediainfo(self, mediainfo_text): # Convert language to country code or fallback to the text if not in map country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) if country_code: + # If there is a country code, use it and append title if available current_track[property_name] = f"[img=20]{country_code}[/img]" - else: - current_track[property_name] = property_value # Fallback to text if no match + # if "title" in current_track and current_track["title"]: + # current_track[property_name] += f" {current_track['title']}" + elif property_value: # Only fallback if `property_value` exists but is not in LANGUAGE_CODE_MAP + current_track[property_name] = property_value # Append the last track to the parsed data if current_section and current_track: From 01f44ee0acffcc1e4e0d1b83d7beb2e56d39e951 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 21:17:01 +1000 Subject: [PATCH 390/741] 5 is enough for purpose --- src/clients.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/clients.py b/src/clients.py index b8e1ec356..4dfa5a650 100644 --- a/src/clients.py +++ b/src/clients.py @@ -232,10 +232,12 @@ async def search_qbit_for_torrent(self, meta, client): console.print(f"Remote path: {remote_path}") torrents = qbt_client.torrents.info() - for torrent in torrents: + for i, torrent in enumerate(torrents): + if i >= 5: + break # Limit to only the first 5 torrent paths try: torrent_path = torrent.get('content_path', f"{torrent.save_path}{torrent.name}") - console.print("trying torrent_path", torrent_path) + console.print("Trying torrent_path:", torrent_path) except AttributeError: if meta['debug']: console.print(torrent) From 3587abd121c5aef6c1c4a116802f789df1eb1df1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 30 Oct 2024 21:56:17 +1000 Subject: [PATCH 391/741] case-insensitive comparison Otherwise `d:\movies` never matches with `D:\Movies` --- src/clients.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/clients.py b/src/clients.py index 4dfa5a650..d0228fee7 100644 --- a/src/clients.py +++ b/src/clients.py @@ -213,7 +213,8 @@ async def search_qbit_for_torrent(self, meta, client): try: qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) qbt_client.auth_log_in() - console.print("We logged into qbittorrent") + if meta['debug']: + console.print("We logged into qbittorrent") except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return None @@ -232,12 +233,10 @@ async def search_qbit_for_torrent(self, meta, client): console.print(f"Remote path: {remote_path}") torrents = qbt_client.torrents.info() - for i, torrent in enumerate(torrents): - if i >= 5: - break # Limit to only the first 5 torrent paths + for torrent in torrents: try: torrent_path = torrent.get('content_path', f"{torrent.save_path}{torrent.name}") - console.print("Trying torrent_path:", torrent_path) + # console.print("Trying torrent_paths") except AttributeError: if meta['debug']: console.print(torrent) @@ -247,26 +246,28 @@ async def search_qbit_for_torrent(self, meta, client): # Replace remote path with local path only if not already mapped if not torrent_path.startswith(local_path): torrent_path = torrent_path.replace(remote_path, local_path) - console.print("Replaced paths round 2:", torrent_path) + if meta['debug']: + console.print("Replaced paths round 2:", torrent_path) # Check if the local path was accidentally duplicated and correct it if torrent_path.startswith(f"{local_path}/{local_path.split('/')[-1]}"): torrent_path = torrent_path.replace(f"{local_path}/{local_path.split('/')[-1]}", local_path) - console.print("Corrected duplicate in torrent path round 2:", torrent_path) + if meta['debug']: + console.print("Corrected duplicate in torrent path round 2:", torrent_path) # Standardize path separators for the local OS torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) - console.print("Final torrent path after remote mapping round 2:", torrent_path) + if meta['debug']: + console.print("Final torrent path after remote mapping round 2:", torrent_path) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: - if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): - console.print("we've found an is_disc torrent path, now validating") + if torrent_path.lower() == meta['filelist'][0].lower() and len(torrent.files) == len(meta['filelist']): valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) if valid: console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") return torrent.hash - elif meta['path'] == torrent_path: - console.print("Now validating a path torrent path") + + elif os.path.normpath(meta['path']).lower() == os.path.normpath(torrent_path).lower(): valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) if valid: console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") From e82094295723b87a4c3662bd6dc343134e6e683f Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 09:24:49 +1000 Subject: [PATCH 392/741] Option to set the pack thumbnail size --- data/example-config.py | 17 +++++++++++------ src/trackers/COMMON.py | 19 ++++++++++--------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index f90270009..e8c29de7b 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,24 +25,29 @@ # Number of screenshots to capture "screens": "6", - # Number of screenshots to use for each disc/episode in packs + # Providing the option to change the size of the screenshot thumbnails where supported. + # Default is 350, ie [img=350] + "thumbnail_size": "350", + + # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites "multiScreens": "2", - # Description character count (including bbcode) cutoff for UNIT3D sites when season packs only + # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. + "pack_thumb_size": "300", + + # Description character count (including bbcode) cutoff for UNIT3D sites when **season packs only** # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files # still to be added to the description. You can set this small like 50, to only ever # print filenames and screenshots for each file, no mediainfo will be printed. # UNIT3D sites have a hard character limit for descriptions. A little over 17000 - # worked fine in a forum post at BLU. If you are at 1 < charLimit, the next full description will be added. + # worked fine in a forum post at BLU. If the description is at 1 < charLimit, the next full + # description will be added before respecting this cutoff. "charLimit": "14000", # How many files in a season pack will be added to the description before using an additional spoiler tag # Any other files past this limit will be hidden/added all within a spoiler tag. "fileLimit": "5", - # Providing the option to change the size of the thumbnails where supported, default is 350 - "thumbnail_size": "350", - # Providing the option to add a header, in bbcode, above the screenshot section where supported # "screenshot_header": "[centers] SCREENSHOTS [/center]" diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index d4b30c43d..949268a19 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -46,6 +46,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) char_limit = int(self.config['DEFAULT'].get('charLimit', 16000)) file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) + thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: if desc_header: descfile.write(desc_header) @@ -72,7 +73,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url] ") + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url]") descfile.write("[/center]") # Handle multiple discs case @@ -100,7 +101,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") else: @@ -121,7 +122,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") else: @@ -175,7 +176,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in uploaded_images: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) descfile.write("[/center]\n\n") @@ -228,7 +229,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -257,7 +258,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img_index in range(min(multi_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -280,7 +281,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -312,7 +313,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in uploaded_images: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") @@ -345,7 +346,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des for img in meta[new_images_key]: web_url = img['web_url'] raw_url = img['raw_url'] - image_str = f"[url={web_url}][img=300]{raw_url}[/img][/url]" + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") From d72fb74ab2d2113061e830c3f11c56b1c6a37002 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 09:49:27 +1000 Subject: [PATCH 393/741] Load plugins for VS --- src/vs.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/vs.py b/src/vs.py index 7fb918cfe..1585a1c13 100644 --- a/src/vs.py +++ b/src/vs.py @@ -6,6 +6,10 @@ core = vs.core +core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libffms2.so") +core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libsub.so") +core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libimwri.so") + def CustomFrameInfo(clip, text): def FrameProps(n, f, clip): From e6b3223583761f79f47c0e8d46ea843643aae9da Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 14:27:42 +1000 Subject: [PATCH 394/741] VS plugins should have been commented --- src/vs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/vs.py b/src/vs.py index 1585a1c13..4209464f6 100644 --- a/src/vs.py +++ b/src/vs.py @@ -6,9 +6,9 @@ core = vs.core -core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libffms2.so") -core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libsub.so") -core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libimwri.so") +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libffms2.so") +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libsub.so") +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libimwri.so") def CustomFrameInfo(clip, text): From 81642dad4f2910c09913dd2d3da0672bc2aab7e7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 14:32:34 +1000 Subject: [PATCH 395/741] Remove auto docker --- .github/workflows/docker-image.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 4fe76190a..f6facbeb7 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -4,8 +4,6 @@ on: push: branches: - master - - develop - - descriptions workflow_dispatch: env: From 03d753307d0d68f0a29788985c18416e52b0a755 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 16:06:24 +1000 Subject: [PATCH 396/741] Pulled description fixes Save to description file and don't overwrite --- src/prep.py | 20 ++++++++++++++------ src/trackers/COMMON.py | 6 +++--- src/trackers/PTP.py | 4 ++-- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/prep.py b/src/prep.py index 5822c7def..509a5dd98 100644 --- a/src/prep.py +++ b/src/prep.py @@ -184,7 +184,9 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): if mal not in [None, '0']: meta['mal'] = mal if desc not in [None, '0', '']: - meta[f'{tracker_name.lower()}_desc'] = desc + meta['description'] = desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(desc + "\n") if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() @@ -253,7 +255,8 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met # Retrieve PTP description and image list ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc - meta['skip_gen_desc'] = True + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(ptp_desc + "\n") if not meta['is_disc']: if not meta.get('image_list'): # Only handle images if image_list is not already populated @@ -269,7 +272,9 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met found_match = True ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc - meta['skip_gen_desc'] = True + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(ptp_desc + "\n") + meta['saved_description'] = True if not meta['is_disc']: if not meta.get('image_list'): # Only handle images if image_list is not already populated @@ -291,7 +296,9 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['skipit'] = True ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) meta['description'] = ptp_desc - meta['skip_gen_desc'] = True + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(ptp_desc + "\n") + meta['saved_description'] = True if not meta['is_disc']: if not meta.get('image_list'): # Only handle images if image_list is not already populated valid_images = await self.check_images_concurrently(ptp_imagelist, meta) @@ -366,6 +373,7 @@ async def gather_prep(self, meta, mode): base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] + meta['saved_description'] = False if meta.get('uuid', None) is None: folder_id = os.path.basename(meta['path']) @@ -561,7 +569,6 @@ async def gather_prep(self, meta, mode): meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) if match: found_match = True - if not meta['is_disc']: if "BLU" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": @@ -729,7 +736,8 @@ async def gather_prep(self, meta, mode): meta['stream'] = self.stream_optimized(meta['stream']) meta.get('anon', False) meta['anon'] = self.is_anon(meta['anon']) - meta = await self.gen_desc(meta) + if meta['saved_description'] is False: + meta = await self.gen_desc(meta) return meta """ diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 949268a19..96337f764 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -565,7 +565,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N if edited_description: description = edited_description.strip() meta['description'] = description - meta['skip_gen_desc'] = True + meta['saved_description'] = True console.print(f"Final description after editing: {description}", markup=False) elif edit_choice.lower() == 'd': description = None @@ -573,10 +573,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N else: console.print("[green]Keeping the original description.[/green]") meta['description'] = description - meta['skip_gen_desc'] = True + meta['saved_description'] = True else: meta['description'] = description - meta['skip_gen_desc'] = True + meta['saved_description'] = True return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 34c14bc03..f34d6d16e 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -219,7 +219,7 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): if edited_description: desc = edited_description.strip() meta['description'] = desc - meta['skip_gen_desc'] = True + meta['saved_description'] = True console.print(f"[green]Final description after editing:[/green] {desc}") elif edit_choice.lower() == 'd': desc = None @@ -227,7 +227,7 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): else: console.print("[green]Keeping the original description.[/green]") meta['description'] = ptp_desc - meta['skip_gen_desc'] = True + meta['saved_description'] = True return desc, imagelist From 9caf8b46b6badd31d12fd4f755da9757c7c97d43 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 16:16:14 +1000 Subject: [PATCH 397/741] Fix description write error when discarding description --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 509a5dd98..b38ebb487 100644 --- a/src/prep.py +++ b/src/prep.py @@ -186,7 +186,7 @@ async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): if desc not in [None, '0', '']: meta['description'] = desc with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write(desc + "\n") + description.write((desc or "") + "\n") if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() @@ -256,7 +256,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write(ptp_desc + "\n") + description.write((ptp_desc or "") + "\n") if not meta['is_disc']: if not meta.get('image_list'): # Only handle images if image_list is not already populated @@ -273,7 +273,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: - description.write(ptp_desc + "\n") + description.write((ptp_desc or "") + "\n") meta['saved_description'] = True if not meta['is_disc']: From c7b3d9ad9e2f859ed53ab7fb3a511796fb4fe1af Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 16:30:58 +1000 Subject: [PATCH 398/741] Fix description centering when fileLimit = 1 https://github.com/Audionut/Upload-Assistant/issues/111#issuecomment-2449069891 --- src/trackers/COMMON.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 96337f764..9037a39fe 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -325,8 +325,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des elif i == file_limit and not other_files_spoiler_open: # Open "Other files" spoiler for the fifth file - descfile.write("[spoiler=Other files]\n") - char_count += len("[spoiler=Other files]\n") + descfile.write("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") other_files_spoiler_open = True if i >= file_limit and char_count < max_char_limit: @@ -336,8 +336,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des filename = os.path.splitext(os.path.basename(file.strip()))[0] - descfile.write(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") - char_count += len(f"[spoiler={filename}]{formatted_bbcode}[/spoiler]\n\n") + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n") + char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n") if new_images_key in meta and meta[new_images_key]: console.print(f"[yellow]Found needed image URLs for {new_images_key}") From 8874a56068228d4cfbc604ea512ae0468b3cbc26 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 16:41:02 +1000 Subject: [PATCH 399/741] Add single file mediainfo in debug mode --- src/trackers/COMMON.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 9037a39fe..5d59c238c 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -187,6 +187,15 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Handle single file case if len(filelist) == 1: + if meta['debug']: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + if mi_dump: + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + for i, file in enumerate(filelist): + if i == 0: + filename = os.path.splitext(os.path.basename(file.strip()))[0] + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") images = meta['image_list'] descfile.write("[center]") for img_index in range(len(images[:int(meta['screens'])])): From 0a990c778ca8bb7bc3b1bb75e5548caa5344c8de Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 16:50:56 +1000 Subject: [PATCH 400/741] Images fixes --- src/trackers/COMMON.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 5d59c238c..9c840af8a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -818,14 +818,14 @@ class MediaInfoParser: "luxembourgish": "https://ptpimg.me/52x189.png", "macedonian": "https://ptpimg.me/2g5lva.png", "malagasy": "https://ptpimg.me/n5120r.png", - "malay": "https://ptpimg.me/06suor.png", + "malay": "https://ptpimg.me/02e17w.png", "malayalam": "https://ptpimg.me/d0l479.png", "maltese": "https://ptpimg.me/ua46c2.png", "maori": "https://ptpimg.me/2fw03g.png", "marathi": "https://ptpimg.me/d0l479.png", "mongolian": "https://ptpimg.me/z2h682.png", "nepali": "https://ptpimg.me/5yd3sp.png", - "norwegian bokmal": "https://ptpimg.me/1t11u4.png", + "norwegian": "https://ptpimg.me/1t11u4.png", "pashto": "https://ptpimg.me/i9pt6k.png", "persian": "https://ptpimg.me/i0y103.png", "polish": "https://ptpimg.me/m73uwa.png", From 3be28187364fedddb10769e95df7b8125f01be1f Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 20:41:57 +1000 Subject: [PATCH 401/741] Use exact match from text title field if present Will allow to differentiate between different dialects. --- src/trackers/COMMON.py | 58 ++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 9c840af8a..62274ef9a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -791,6 +791,7 @@ class MediaInfoParser: "estonian": "https://ptpimg.me/z25pmk.png", "filipino": "https://ptpimg.me/9d3z9w.png", "finnish": "https://ptpimg.me/p4354c.png", + "french canadian": "https://ptpimg.me/ei4s6u.png", "french": "https://ptpimg.me/m7mfoi.png", "galician": "https://ptpimg.me/xj51b9.png", "georgian": "https://ptpimg.me/pp412q.png", @@ -874,7 +875,7 @@ def parse_mediainfo(self, mediainfo_text): audio_fields = { 'codec', 'format', 'bit_rate', 'channels', 'title', 'language', 'format_profile', 'stream_size' } - text_fields = {'title', 'language'} + # text_fields = {'title', 'language'} # Split MediaInfo by lines and process each line for line in mediainfo_text.splitlines(): @@ -889,7 +890,9 @@ def parse_mediainfo(self, mediainfo_text): parsed_data[current_section].append(current_track) else: parsed_data[current_section] = current_track - current_track = {} + # Debug output for finalizing the current track data + # print(f"Final processed track data for section '{current_section}': {current_track}") + current_track = {} # Reset current track # Update the current section current_section = section_match.group(1).lower() @@ -907,23 +910,50 @@ def parse_mediainfo(self, mediainfo_text): current_track[property_name] = property_value elif current_section == "audio" and property_name in audio_fields: current_track[property_name] = property_value - elif current_section == "text" and property_name in text_fields: - # Convert language to country code or fallback to the text if not in map - country_code = self.LANGUAGE_CODE_MAP.get(property_value.lower()) - if country_code: - # If there is a country code, use it and append title if available - current_track[property_name] = f"[img=20]{country_code}[/img]" - # if "title" in current_track and current_track["title"]: - # current_track[property_name] += f" {current_track['title']}" - elif property_value: # Only fallback if `property_value` exists but is not in LANGUAGE_CODE_MAP - current_track[property_name] = property_value - - # Append the last track to the parsed data + elif current_section == "text": + # Processing specific properties for text + # Process title field + if property_name == "title" and "title" not in current_track: + title_lower = property_value.lower() + # print(f"\nProcessing Title: '{property_value}'") # Debugging output + + # Store the title as-is since it should remain descriptive + current_track["title"] = property_value + # print(f"Stored title: '{property_value}'") + + # If there's an exact match in LANGUAGE_CODE_MAP, add country code to language field + if title_lower in self.LANGUAGE_CODE_MAP: + country_code = self.LANGUAGE_CODE_MAP[title_lower] + current_track["language"] = f"[img=20]{country_code}[/img]" + # print(f"Exact match found for title '{title_lower}' with country code: {country_code}") + + # Process language field only if it hasn't already been set + elif property_name == "language" and "language" not in current_track: + language_lower = property_value.lower() + # print(f"\nProcessing Language: '{property_value}'") # Debugging output + + if language_lower in self.LANGUAGE_CODE_MAP: + country_code = self.LANGUAGE_CODE_MAP[language_lower] + current_track["language"] = f"[img=20]{country_code}[/img]" + # print(f"Matched language '{language_lower}' to country code: {country_code}") + else: + # If no match in LANGUAGE_CODE_MAP, store language as-is + current_track["language"] = property_value + # print(f"No match found for language '{property_value}', stored as-is.") + + # Append the last track to the parsed data if it exists if current_section and current_track: if current_section in ["video", "audio", "text"]: parsed_data[current_section].append(current_track) else: parsed_data[current_section] = current_track + # Final debug output for the last track data + # print(f"Final processed track data for last section '{current_section}': {current_track}") + + # Debug output for the complete parsed_data + # print("\nComplete Parsed Data:") + for section, data in parsed_data.items(): + print(f"{section}: {data}") return parsed_data From 21b3e8b776e9948867e550b30a8c1b94c2457bfc Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 22:22:34 +1000 Subject: [PATCH 402/741] Clean bot image --- src/bbcode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bbcode.py b/src/bbcode.py index 8702befc0..485f678f6 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -208,6 +208,7 @@ def clean_unit3d_description(self, desc, site): bot_image_urls = [ "https://blutopia.xyz/favicon.ico", # Example bot image URL "https://i.ibb.co/2NVWb0c/uploadrr.webp", + "https://blutopia/favicon.ico", # Add any other known bot image URLs here ] imagelist = [ From 512601d8bfd451b466282d2ae59b007e1390a5ee Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 31 Oct 2024 23:09:05 +1000 Subject: [PATCH 403/741] Ensure description.txt is present --- src/prep.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/prep.py b/src/prep.py index b38ebb487..8d7e5f773 100644 --- a/src/prep.py +++ b/src/prep.py @@ -492,6 +492,10 @@ async def gather_prep(self, meta, mode): # Debugging information after population # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") + description_text = meta.get('description') if meta.get('description') else "" + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(description_text) + if not meta.get('image_list'): # Reuse information from trackers with fallback found_match = False From 0d853490470efaba0b505a734a2858f742ccc161 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 00:33:38 +1000 Subject: [PATCH 404/741] Refactor filelist description handling This should be far more robust in handling varying config settings, but it's not well tested. --- src/trackers/COMMON.py | 206 ++++++++++++++--------------------------- 1 file changed, 72 insertions(+), 134 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 62274ef9a..fec21f493 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -210,158 +210,94 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des max_char_limit = char_limit # Character limit other_files_spoiler_open = False # Track if "Other files" spoiler has been opened - # Process each file - if len(filelist) > 1: - for i, file in enumerate(filelist): - # Check if character limit is reached - if char_count >= max_char_limit: - # Open the "Other files" spoiler if it's the first time we're exceeding the limit - if not other_files_spoiler_open and i >= 5: - descfile.write("[center][spoiler=Other files]\n") - char_count += len("[center][spoiler=Other files]\n") - other_files_spoiler_open = True - - # Extract filename directly from the file path - filename = os.path.splitext(os.path.basename(file.strip()))[0] - - # Write filename in BBCode format - descfile.write(f"[center]{filename}\n[/center]\n") - char_count += len(f"[center]{filename}\n[/center]\n") - - # Check and write screenshots if they exist - new_images_key = f'new_images_file_{i}' - if new_images_key in meta and meta[new_images_key]: - if meta['debug']: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") - descfile.write("[center]") - char_count += len("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") - - continue # Skip full MediaInfo and spoilers for remaining files - + # First Pass: Create and Upload Images for Each File + for i, file in enumerate(filelist): + if i > 0: new_images_key = f'new_images_file_{i}' + if new_images_key not in meta or not meta[new_images_key]: + # Proceed with image generation if not already present + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if i < file_limit: - if i == 0: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() - if mi_dump: - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) - formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - filename = os.path.splitext(os.path.basename(file.strip()))[0] - - descfile.write(f"[center]{filename}\n[/center]\n") - char_count += len(f"[center]{filename}\n[/center]\n") - - images = meta['image_list'] - descfile.write("[center]") - char_count += len("[center]") - if meta['debug']: - console.print("[yellow]Using original uploaded images for first file") - for img_index in range(min(multi_screens, len(images))): - web_url = images[img_index]['web_url'] - raw_url = images[img_index]['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") - else: - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) - formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - - filename = os.path.splitext(os.path.basename(file.strip()))[0] - - descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") - char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") - - if new_images_key in meta and meta[new_images_key]: - if meta['debug']: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") - descfile.write("[center]") - char_count += len("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") - else: - meta['retry_count'] = meta.get('retry_count', 0) + 1 - meta[new_images_key] = [] + # If no screenshots exist, create them + if not new_screens: + if meta['debug']: + console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if not new_screens: - if meta['debug']: - console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) - s.start() - while s.is_alive(): - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) - descfile.write("[center]") - char_count += len("[center]") - for img in uploaded_images: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") + # Upload generated screenshots + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + meta[new_images_key] = [] + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + + # Save updated meta + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) + # Second Pass: Process MediaInfo and Write Descriptions + if len(filelist) > 1: + for i, file in enumerate(filelist): + # Extract filename directly from the file path + filename = os.path.splitext(os.path.basename(file.strip()))[0] - elif i == file_limit and not other_files_spoiler_open: - # Open "Other files" spoiler for the fifth file - descfile.write("[center][spoiler=Other files]\n") - char_count += len("[center][spoiler=Other files]\n") - other_files_spoiler_open = True + # If we are beyond the file limit, add all further files in a spoiler + if i >= file_limit: + if not other_files_spoiler_open: + descfile.write("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") + other_files_spoiler_open = True - if i >= file_limit and char_count < max_char_limit: + # Write filename in BBCode format with MediaInfo in spoiler if not the first file + if i > 0 and char_count < max_char_limit: mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + else: + descfile.write(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") - filename = os.path.splitext(os.path.basename(file.strip()))[0] - - descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n") - char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n") - - if new_images_key in meta and meta[new_images_key]: - console.print(f"[yellow]Found needed image URLs for {new_images_key}") + # Write images if they exist + new_images_key = f'new_images_file_{i}' + if i == 0: # For the first file, use 'image_list' key + images = meta['image_list'] + if images: descfile.write("[center]") char_count += len("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] + if file_limit == 1: + multi_screens = len(images) # Use all images if only one file + for img_index in range(min(multi_screens, len(images))): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" descfile.write(image_str) char_count += len(image_str) descfile.write("[/center]\n\n") char_count += len("[/center]\n\n") - else: - continue # Skip if character limit has been reached + elif new_images_key in meta and meta[new_images_key]: + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") if other_files_spoiler_open: descfile.write("[/spoiler][/center]\n") @@ -779,6 +715,7 @@ class MediaInfoParser: "belarusian": "https://ptpimg.me/iushg1.png", "bengali": "https://ptpimg.me/jq996n.png", "bosnian": "https://ptpimg.me/19t9rv.png", + "brazilian": "https://ptpimg.me/p8sgla.png", "bulgarian": "https://ptpimg.me/un9dc6.png", "catalan": "https://ptpimg.me/v4h5bf.png", "chinese": "https://ptpimg.me/ea3yv3.png", @@ -814,6 +751,7 @@ class MediaInfoParser: "kurdish": "https://ptpimg.me/g290wo.png", "kyrgyz": "https://ptpimg.me/336unh.png", "lao": "https://ptpimg.me/n3nan1.png", + "latin american": "https://ptpimg.me/11350x.png", "latvian": "https://ptpimg.me/3x2y1b.png", "lithuanian": "https://ptpimg.me/b444z8.png", "luxembourgish": "https://ptpimg.me/52x189.png", From 308a1979b3cf18fe429ff226c1aead41ca4ae8c3 Mon Sep 17 00:00:00 2001 From: xzin Date: Thu, 31 Oct 2024 12:38:58 -0500 Subject: [PATCH 405/741] fix episode detection when --daily flag is used --- src/prep.py | 60 ++++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/prep.py b/src/prep.py index d8cc59eeb..16a6d327e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3121,35 +3121,34 @@ async def get_season_episode(self, video, meta): if meta['anime'] is False: try: if meta.get('manual_date'): - raise ManualDateException # noqa: F405 - try: - guess_year = guessit(video)['year'] - except Exception: - guess_year = "" - if guessit(video)["season"] == guess_year: - if f"s{guessit(video)['season']}" in video.lower(): + guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] + season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) + season = f"S{str(season_int).zfill(2)}" + episode = f"E{str(episode_int).zfill(2)}" + # season = str(guess_date) + # episode = "" + is_daily = True + else: + try: + guess_year = guessit(video)['year'] + except Exception: + guess_year = "" + if guessit(video)["season"] == guess_year: + if f"s{guessit(video)['season']}" in video.lower(): + season_int = str(guessit(video)["season"]) + season = "S" + season_int.zfill(2) + else: + season_int = "1" + season = "S01" + else: season_int = str(guessit(video)["season"]) season = "S" + season_int.zfill(2) - else: - season_int = "1" - season = "S01" - else: - season_int = str(guessit(video)["season"]) - season = "S" + season_int.zfill(2) except Exception: - try: - guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] - season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - # season = f"S{season_int.zfill(2)}" - # episode = f"E{episode_int.zfill(2)}" - season = str(guess_date) - episode = "" - is_daily = True - except Exception: - console.print_exception() - season_int = "1" - season = "S01" + console.print_exception() + season_int = "1" + season = "S01" + try: if is_daily is not True: episodes = "" @@ -3172,6 +3171,7 @@ async def get_season_episode(self, video, meta): episode = "" episode_int = "0" meta['tv_pack'] = 1 + else: # If Anime parsed = anitopy.parse(Path(video).name) @@ -3648,17 +3648,17 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd def daily_to_tmdb_season_episode(self, tmdbid, date): show = tmdb.TV(tmdbid) seasons = show.info().get('seasons') - season = '1' - episode = '1' + season = 1 + episode = 1 date = datetime.fromisoformat(str(date)) for each in seasons: air_date = datetime.fromisoformat(each['air_date']) if air_date <= date: - season = str(each['season_number']) + season = int(each['season_number']) season_info = tmdb.TV_Seasons(tmdbid, season).info().get('episodes') for each in season_info: - if str(each['air_date']) == str(date): - episode = str(each['episode_number']) + if str(each['air_date']) == str(date.date()): + episode = int(each['episode_number']) break else: console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") From 1968eaf513fe6cf47390193c2c1652975a89d61c Mon Sep 17 00:00:00 2001 From: xzin Date: Thu, 31 Oct 2024 15:11:46 -0500 Subject: [PATCH 406/741] For --daily flagged shows, always use the supplied date as the episode title --- .gitignore | 3 ++- src/prep.py | 12 +++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 38ae89b4c..4943e5364 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ data/cookies/*.pickle .vscode/ __pycache__/ tmp/* -.wdm/ \ No newline at end of file +.wdm/ +.DS_Store \ No newline at end of file diff --git a/src/prep.py b/src/prep.py index 16a6d327e..a8dcdf318 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3121,13 +3121,15 @@ async def get_season_episode(self, video, meta): if meta['anime'] is False: try: if meta.get('manual_date'): + is_daily = True guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - season = f"S{str(season_int).zfill(2)}" - episode = f"E{str(episode_int).zfill(2)}" - # season = str(guess_date) - # episode = "" - is_daily = True + + # For --daily flagged shows, always use the supplied date as the episode title + season = "" + episode = "" + meta['episode_title'] = meta.get('manual_date') + else: try: guess_year = guessit(video)['year'] From 5f2cc5d6811816145cf6fd2d5eabebb9fc42a83d Mon Sep 17 00:00:00 2001 From: xzin Date: Thu, 31 Oct 2024 15:58:25 -0500 Subject: [PATCH 407/741] tweak naming to allow both manual seas/eps and --daily --- src/prep.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index a8dcdf318..a7c772821 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3026,6 +3026,10 @@ async def get_name(self, meta): year = meta['year'] else: year = "" + if meta.get('manual_date'): + # Ignore season and year for --daily flagged shows, just use manual date stored in episode_name + season = '' + episode = '' if meta.get('no_season', False) is True: season = '' if meta.get('no_year', False) is True: @@ -3125,9 +3129,7 @@ async def get_season_episode(self, video, meta): guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - # For --daily flagged shows, always use the supplied date as the episode title - season = "" - episode = "" + # For --daily flagged shows, pass the supplied date as the episode title meta['episode_title'] = meta.get('manual_date') else: From c0ee1bd9f3c25e00fc82f9bd4d60ce09539a4524 Mon Sep 17 00:00:00 2001 From: xzin Date: Thu, 31 Oct 2024 16:57:29 -0500 Subject: [PATCH 408/741] still need to assign season & episode variables --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index a7c772821..a86df7c96 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3130,6 +3130,8 @@ async def get_season_episode(self, video, meta): season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) # For --daily flagged shows, pass the supplied date as the episode title + season = f"S{str(season_int).zfill(2)}" + episode = f"E{str(episode_int).zfill(2)}" meta['episode_title'] = meta.get('manual_date') else: From c3eea23d9487c99345d77d367d0e2d1ad4f3da0b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 18:43:15 +1000 Subject: [PATCH 409/741] Gracefully handle tracker related errors in auto search --- src/prep.py | 113 ++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 92 insertions(+), 21 deletions(-) diff --git a/src/prep.py b/src/prep.py index d8cc59eeb..01bad30dd 100644 --- a/src/prep.py +++ b/src/prep.py @@ -552,58 +552,129 @@ async def gather_prep(self, meta, mode): if match: found_match = True else: + timeout_duration = 2 # seconds # Process all trackers with API = true if no specific tracker is set in meta default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") if "PTP" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("PTP tracker request timed out.") + except aiohttp.ClientSSLError: + print("PTP tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"PTP tracker request failed due to connection error: {conn_err}") if not meta['is_disc']: if "BLU" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("BLU tracker request timed out.") + except aiohttp.ClientSSLError: + print("BLU tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"BLU tracker request failed due to connection error: {conn_err}") if "AITHER" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": aither = AITHER(config=self.config) - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("AITHER tracker request timed out.") + except aiohttp.ClientSSLError: + print("AITHER tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"AITHER tracker request failed due to connection error: {conn_err}") if "LST" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": lst = LST(config=self.config) - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("LST tracker request timed out.") + except aiohttp.ClientSSLError: + print("LST tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"LST tracker request failed due to connection error: {conn_err}") if "OE" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": oe = OE(config=self.config) - meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("OE tracker request timed out.") + except aiohttp.ClientSSLError: + print("OE tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"OE tracker request failed due to connection error: {conn_err}") if "TIK" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": tik = TIK(config=self.config) - meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("TIK tracker request timed out.") + except aiohttp.ClientSSLError: + print("TIK tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"TIK tracker request failed due to connection error: {conn_err}") if "HDB" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - if match: - found_match = True + try: + meta, match = await asyncio.wait_for( + self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder), + timeout=timeout_duration + ) + if match: + found_match = True + except asyncio.TimeoutError: + print("HDB tracker request timed out.") + except aiohttp.ClientSSLError: + print("HDB tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"HDB tracker request failed due to connection error: {conn_err}") if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") From 41c58156b6d029f5177041d63cccd3ebe4718232 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 18:45:02 +1000 Subject: [PATCH 410/741] language codes --- src/trackers/COMMON.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index fec21f493..c4d81bf50 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -728,6 +728,7 @@ class MediaInfoParser: "estonian": "https://ptpimg.me/z25pmk.png", "filipino": "https://ptpimg.me/9d3z9w.png", "finnish": "https://ptpimg.me/p4354c.png", + "french (canada)": "https://ptpimg.me/ei4s6u.png", "french canadian": "https://ptpimg.me/ei4s6u.png", "french": "https://ptpimg.me/m7mfoi.png", "galician": "https://ptpimg.me/xj51b9.png", @@ -769,6 +770,7 @@ class MediaInfoParser: "persian": "https://ptpimg.me/i0y103.png", "polish": "https://ptpimg.me/m73uwa.png", "portuguese": "https://ptpimg.me/5j1a7q.png", + "portuguese (brazil)": "https://ptpimg.me/p8sgla.png", "punjabi": "https://ptpimg.me/d0l479.png", "romanian": "https://ptpimg.me/ux94x0.png", "russian": "https://ptpimg.me/v33j64.png", @@ -778,6 +780,7 @@ class MediaInfoParser: "slovenian": "https://ptpimg.me/61yp81.png", "somali": "https://ptpimg.me/320pa6.png", "spanish": "https://ptpimg.me/xj51b9.png", + "spanish (latin america)": "https://ptpimg.me/11350x.png", "swahili": "https://ptpimg.me/d0l479.png", "swedish": "https://ptpimg.me/082090.png", "tamil": "https://ptpimg.me/d0l479.png", @@ -890,8 +893,8 @@ def parse_mediainfo(self, mediainfo_text): # Debug output for the complete parsed_data # print("\nComplete Parsed Data:") - for section, data in parsed_data.items(): - print(f"{section}: {data}") + # for section, data in parsed_data.items(): + # print(f"{section}: {data}") return parsed_data From a9031e8e7b6bfcf1b207c56a83cf376e627e9e4b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 20:01:31 +1000 Subject: [PATCH 411/741] Revert only tacker timeout handling Was catching wait time editing descriptions and the like --- src/prep.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index 01bad30dd..73910b163 100644 --- a/src/prep.py +++ b/src/prep.py @@ -552,7 +552,6 @@ async def gather_prep(self, meta, mode): if match: found_match = True else: - timeout_duration = 2 # seconds # Process all trackers with API = true if no specific tracker is set in meta default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") @@ -566,8 +565,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("PTP tracker request timed out.") except aiohttp.ClientSSLError: print("PTP tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -584,8 +581,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("BLU tracker request timed out.") except aiohttp.ClientSSLError: print("BLU tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -601,8 +596,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("AITHER tracker request timed out.") except aiohttp.ClientSSLError: print("AITHER tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -618,8 +611,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("LST tracker request timed out.") except aiohttp.ClientSSLError: print("LST tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -635,8 +626,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("OE tracker request timed out.") except aiohttp.ClientSSLError: print("OE tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -652,8 +641,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("TIK tracker request timed out.") except aiohttp.ClientSSLError: print("TIK tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: @@ -669,8 +656,6 @@ async def gather_prep(self, meta, mode): ) if match: found_match = True - except asyncio.TimeoutError: - print("HDB tracker request timed out.") except aiohttp.ClientSSLError: print("HDB tracker request failed due to SSL error.") except requests.exceptions.ConnectionError as conn_err: From 44c6492f3090e00688eeec75f17ba241023f9fbe Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 20:02:16 +1000 Subject: [PATCH 412/741] Remove definitions also --- src/prep.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index 73910b163..f496b547e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -561,7 +561,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -577,7 +576,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -592,7 +590,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -607,7 +604,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -622,7 +618,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -637,7 +632,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True @@ -652,7 +646,6 @@ async def gather_prep(self, meta, mode): try: meta, match = await asyncio.wait_for( self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder), - timeout=timeout_duration ) if match: found_match = True From 5d2860408bd1234e903a4cd99551f395796cb1b5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 20:50:14 +1000 Subject: [PATCH 413/741] Keep description meta in description text --- src/prep.py | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index fb7459935..f4464e91f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -492,7 +492,7 @@ async def gather_prep(self, meta, mode): # Debugging information after population # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") - description_text = meta.get('description') if meta.get('description') else "" + description_text = meta['description'] if meta['description'] else "" with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text) @@ -563,6 +563,7 @@ async def gather_prep(self, meta, mode): meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) if match: found_match = True + else: # Process all trackers with API = true if no specific tracker is set in meta default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") @@ -3500,6 +3501,8 @@ def clean_text(text): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) + content_written = False + if meta.get('desc_template'): from jinja2 import Template try: @@ -3508,11 +3511,11 @@ def clean_text(text): template_desc = template.render(meta) if clean_text(template_desc): description.write(template_desc + "\n") - console.print(f"[INFO] Description from template '{meta['desc_template']}' used.") + content_written = True except FileNotFoundError: console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") - if meta.get('nfo'): + if meta.get('nfo') and not content_written: nfo_files = glob.glob("*.nfo") if nfo_files: nfo = nfo_files[0] @@ -3520,38 +3523,44 @@ def clean_text(text): nfo_content = nfo_file.read() description.write(f"[code]{nfo_content}[/code]\n") meta['description'] = "CUSTOM" - console.print(f"[INFO] NFO file '{nfo}' used.") + content_written = True - if desclink: + if desclink and not content_written: try: parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) split = os.path.split(parsed.path) raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") raw_url = urllib.parse.urlunparse(raw) desclink_content = requests.get(raw_url).text - description.write(desclink_content + "\n") - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from link '{desclink}' used.") + if clean_text(desclink_content): + description.write(desclink_content + "\n") + meta['description'] = "CUSTOM" + content_written = True except Exception as e: console.print(f"[ERROR] Failed to fetch description from link: {e}") - if descfile and os.path.isfile(descfile): + if descfile and os.path.isfile(descfile) and not content_written: with open(descfile, 'r') as f: file_content = f.read() - description.write(file_content) - meta['description'] = "CUSTOM" - console.print(f"[INFO] Description from file '{descfile}' used.") + if clean_text(file_content): + description.write(file_content) + meta['description'] = "CUSTOM" + content_written = True - if meta.get('desc'): + if meta.get('desc') and not content_written: description.write(meta['desc'] + "\n") meta['description'] = "CUSTOM" - console.print("[INFO] Custom description used.") + content_written = True + + if not content_written: + description.write(meta['description'] + "\n") description.write("\n") return meta + # Fallback if no description is provided if not meta.get('skip_gen_desc', False): - description_text = meta.get('description') if meta.get('description') else "" + description_text = meta['description'] if meta['description'] else "" with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text + "\n") From e4234deed9958d256108b79277b1956e66bff439 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 21:01:21 +1000 Subject: [PATCH 414/741] HDR logos todo: universal logos that work good with light or dark styles. --- src/trackers/COMMON.py | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index c4d81bf50..34936314a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -798,6 +798,13 @@ class MediaInfoParser: "zulu": "https://ptpimg.me/7teg09.png" } + HDR_CODE_MAP = { + "dvhe": ("https://ptpimg.me/y1a27n.png", "102x92"), + "hdr10+": ("https://ptpimg.me/c3tw3w.png", "85x85"), + "hdr10": ("https://ptpimg.me/h34our.png", "50"), + "hdr": ("https://ptpimg.me/h34our.png", "50") + } + def parse_mediainfo(self, mediainfo_text): # Patterns for matching sections and fields section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) @@ -848,7 +855,25 @@ def parse_mediainfo(self, mediainfo_text): if current_section == "general" and property_name in general_fields: current_track[property_name] = property_value elif current_section == "video" and property_name in video_fields: - current_track[property_name] = property_value + if property_name == "hdr_format": + hdr_format_lower = property_value.lower() + print(f"\nProcessing hdr_format: '{property_value}'") + matched_icons = [] + matched_keys = set() + + # Check for each key in HDR_CODE_MAP to see if it exists anywhere in the hdr_format field + for hdr_key, (icon_url, img_size) in self.HDR_CODE_MAP.items(): + if hdr_key in hdr_format_lower and hdr_key not in matched_keys: + matched_icons.append(f"[img={img_size}]{icon_url}[/img]") + matched_keys.add(hdr_key) + # Stop matching further HDR types if HDR10+ is matched + if hdr_key == "hdr10+": + break + + # If matches were found, display only the icons; otherwise, keep the original text + current_track[property_name] = " ".join(matched_icons) if matched_icons else property_value + else: + current_track[property_name] = property_value elif current_section == "audio" and property_name in audio_fields: current_track[property_name] = property_value elif current_section == "text": @@ -894,7 +919,7 @@ def parse_mediainfo(self, mediainfo_text): # Debug output for the complete parsed_data # print("\nComplete Parsed Data:") # for section, data in parsed_data.items(): - # print(f"{section}: {data}") + # print(f"{section}: {data}") return parsed_data From 378db3aa08dc29268deff63b1a966c488fc5233c Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 22:44:15 +1000 Subject: [PATCH 415/741] Revert "HDR logos" This reverts commit e4234deed9958d256108b79277b1956e66bff439. --- src/trackers/COMMON.py | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 34936314a..c4d81bf50 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -798,13 +798,6 @@ class MediaInfoParser: "zulu": "https://ptpimg.me/7teg09.png" } - HDR_CODE_MAP = { - "dvhe": ("https://ptpimg.me/y1a27n.png", "102x92"), - "hdr10+": ("https://ptpimg.me/c3tw3w.png", "85x85"), - "hdr10": ("https://ptpimg.me/h34our.png", "50"), - "hdr": ("https://ptpimg.me/h34our.png", "50") - } - def parse_mediainfo(self, mediainfo_text): # Patterns for matching sections and fields section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) @@ -855,25 +848,7 @@ def parse_mediainfo(self, mediainfo_text): if current_section == "general" and property_name in general_fields: current_track[property_name] = property_value elif current_section == "video" and property_name in video_fields: - if property_name == "hdr_format": - hdr_format_lower = property_value.lower() - print(f"\nProcessing hdr_format: '{property_value}'") - matched_icons = [] - matched_keys = set() - - # Check for each key in HDR_CODE_MAP to see if it exists anywhere in the hdr_format field - for hdr_key, (icon_url, img_size) in self.HDR_CODE_MAP.items(): - if hdr_key in hdr_format_lower and hdr_key not in matched_keys: - matched_icons.append(f"[img={img_size}]{icon_url}[/img]") - matched_keys.add(hdr_key) - # Stop matching further HDR types if HDR10+ is matched - if hdr_key == "hdr10+": - break - - # If matches were found, display only the icons; otherwise, keep the original text - current_track[property_name] = " ".join(matched_icons) if matched_icons else property_value - else: - current_track[property_name] = property_value + current_track[property_name] = property_value elif current_section == "audio" and property_name in audio_fields: current_track[property_name] = property_value elif current_section == "text": @@ -919,7 +894,7 @@ def parse_mediainfo(self, mediainfo_text): # Debug output for the complete parsed_data # print("\nComplete Parsed Data:") # for section, data in parsed_data.items(): - # print(f"{section}: {data}") + # print(f"{section}: {data}") return parsed_data From 0fc832b5def8853cfb94f0e718a6f70c28ea12e8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 23:45:15 +1000 Subject: [PATCH 416/741] Proper fix timeout removal --- src/prep.py | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/src/prep.py b/src/prep.py index f496b547e..6c3956187 100644 --- a/src/prep.py +++ b/src/prep.py @@ -559,9 +559,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -574,9 +572,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -588,9 +584,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": aither = AITHER(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -602,9 +596,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": lst = LST(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -616,9 +608,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": oe = OE(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -630,9 +620,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": tik = TIK(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: @@ -644,9 +632,7 @@ async def gather_prep(self, meta, mode): if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) try: - meta, match = await asyncio.wait_for( - self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder), - ) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) if match: found_match = True except aiohttp.ClientSSLError: From 5d5b2d909cdd39328ccc66a11b52e6cbfd02a32c Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 1 Nov 2024 23:55:14 +1000 Subject: [PATCH 417/741] Create description state --- src/prep.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 91bee8aaa..d930bfb61 100644 --- a/src/prep.py +++ b/src/prep.py @@ -492,7 +492,10 @@ async def gather_prep(self, meta, mode): # Debugging information after population # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") - description_text = meta['description'] if meta['description'] else "" + if 'description' not in meta: + meta['description'] = "" + + description_text = meta.get('description', '') with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text) From 65af3295056a895191c9ef42366a83ac76c376a2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 00:44:08 +1000 Subject: [PATCH 418/741] Add option for original behavior --- data/example-config.py | 11 ++-- src/trackers/COMMON.py | 115 ++++++++++++++++++++++------------------- 2 files changed, 67 insertions(+), 59 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index e8c29de7b..5092faa60 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -29,13 +29,14 @@ # Default is 350, ie [img=350] "thumbnail_size": "350", - # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites - "multiScreens": "2", + # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites. + # 0 equals old behavior where only the original description and images are added. + "multiScreens": "0", # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. "pack_thumb_size": "300", - # Description character count (including bbcode) cutoff for UNIT3D sites when **season packs only** + # Description character count (including bbcode) cutoff for UNIT3D sites when **season packs only**. # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files # still to be added to the description. You can set this small like 50, to only ever # print filenames and screenshots for each file, no mediainfo will be printed. @@ -44,9 +45,9 @@ # description will be added before respecting this cutoff. "charLimit": "14000", - # How many files in a season pack will be added to the description before using an additional spoiler tag + # How many files in a season pack will be added to the description before using an additional spoiler tag. # Any other files past this limit will be hidden/added all within a spoiler tag. - "fileLimit": "5", + "fileLimit": "2", # Providing the option to add a header, in bbcode, above the screenshot section where supported # "screenshot_header": "[centers] SCREENSHOTS [/center]" diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index c4d81bf50..6f8fadba9 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -212,34 +212,35 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # First Pass: Create and Upload Images for Each File for i, file in enumerate(filelist): - if i > 0: - new_images_key = f'new_images_file_{i}' - if new_images_key not in meta or not meta[new_images_key]: - # Proceed with image generation if not already present - meta[new_images_key] = [] - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if multi_screens != 0: + if i > 0: + new_images_key = f'new_images_file_{i}' + if new_images_key not in meta or not meta[new_images_key]: + # Proceed with image generation if not already present + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - # If no screenshots exist, create them - if not new_screens: - if meta['debug']: - console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) - s.start() - while s.is_alive(): - await asyncio.sleep(1) + # If no screenshots exist, create them + if not new_screens: + if meta['debug']: + console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") - # Upload generated screenshots - if new_screens: - uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) - meta[new_images_key] = [] - for img in uploaded_images: - meta[new_images_key].append({ - 'img_url': img['img_url'], - 'raw_url': img['raw_url'], - 'web_url': img['web_url'] - }) + # Upload generated screenshots + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + meta[new_images_key] = [] + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) # Save updated meta meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" @@ -253,33 +254,38 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des filename = os.path.splitext(os.path.basename(file.strip()))[0] # If we are beyond the file limit, add all further files in a spoiler - if i >= file_limit: - if not other_files_spoiler_open: - descfile.write("[center][spoiler=Other files]\n") - char_count += len("[center][spoiler=Other files]\n") - other_files_spoiler_open = True + if multi_screens != 0: + if i >= file_limit: + if not other_files_spoiler_open: + descfile.write("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") + other_files_spoiler_open = True # Write filename in BBCode format with MediaInfo in spoiler if not the first file - if i > 0 and char_count < max_char_limit: - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) - parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) - formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) - descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") - char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") - else: - descfile.write(f"[center]{filename}\n[/center]\n") - char_count += len(f"[center]{filename}\n[/center]\n") + if multi_screens != 0: + if i > 0 and char_count < max_char_limit: + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + else: + descfile.write(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") # Write images if they exist new_images_key = f'new_images_file_{i}' if i == 0: # For the first file, use 'image_list' key images = meta['image_list'] if images: + # Process all images if multi_screens is 0 or set multi_screens as usual + if file_limit == 1 or multi_screens == 0: + single_screens = len(images) # Use all images if only one file or if multi_screens is 0 + else: + single_screens = multi_screens descfile.write("[center]") char_count += len("[center]") - if file_limit == 1: - multi_screens = len(images) # Use all images if only one file - for img_index in range(min(multi_screens, len(images))): + for img_index in range(min(single_screens, len(images))): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" @@ -287,17 +293,18 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des char_count += len(image_str) descfile.write("[/center]\n\n") char_count += len("[/center]\n\n") - elif new_images_key in meta and meta[new_images_key]: - descfile.write("[center]") - char_count += len("[center]") - for img in meta[new_images_key]: - web_url = img['web_url'] - raw_url = img['raw_url'] - image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" - descfile.write(image_str) - char_count += len(image_str) - descfile.write("[/center]\n\n") - char_count += len("[/center]\n\n") + elif multi_screens != 0: + if new_images_key in meta and meta[new_images_key]: + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") if other_files_spoiler_open: descfile.write("[/spoiler][/center]\n") @@ -894,7 +901,7 @@ def parse_mediainfo(self, mediainfo_text): # Debug output for the complete parsed_data # print("\nComplete Parsed Data:") # for section, data in parsed_data.items(): - # print(f"{section}: {data}") + # print(f"{section}: {data}") return parsed_data From 10abd2c553dc0a8a78b3af94fd0a2f0f902c2d33 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 16:35:34 +1000 Subject: [PATCH 419/741] Some image size consistency --- src/trackers/COMMON.py | 196 ++++++++++++++++++++--------------------- 1 file changed, 98 insertions(+), 98 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 6f8fadba9..fbb072f68 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -44,7 +44,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) - char_limit = int(self.config['DEFAULT'].get('charLimit', 16000)) + char_limit = int(self.config['DEFAULT'].get('charLimit', 14000)) file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: @@ -712,97 +712,97 @@ async def filter_dupes(self, dupes, meta): class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { - "afrikaans": "https://ptpimg.me/i9pt6k.png", - "albanian": "https://ptpimg.me/sfhik8.png", - "amharic": "https://ptpimg.me/zm816y.png", - "arabic": "https://ptpimg.me/5g8i9u.png", - "armenian": "https://ptpimg.me/zm816y.png", - "azerbaijani": "https://ptpimg.me/h3rbe0.png", - "basque": "https://ptpimg.me/xj51b9.png", - "belarusian": "https://ptpimg.me/iushg1.png", - "bengali": "https://ptpimg.me/jq996n.png", - "bosnian": "https://ptpimg.me/19t9rv.png", - "brazilian": "https://ptpimg.me/p8sgla.png", - "bulgarian": "https://ptpimg.me/un9dc6.png", - "catalan": "https://ptpimg.me/v4h5bf.png", - "chinese": "https://ptpimg.me/ea3yv3.png", - "croatian": "https://ptpimg.me/rxi533.png", - "czech": "https://ptpimg.me/5m75n3.png", - "danish": "https://ptpimg.me/m35c41.png", - "dutch": "https://ptpimg.me/6nmwpx.png", - "dzongkha": "https://ptpimg.me/56e7y5.png", - "english": "https://ptpimg.me/ine2fd.png", - "estonian": "https://ptpimg.me/z25pmk.png", - "filipino": "https://ptpimg.me/9d3z9w.png", - "finnish": "https://ptpimg.me/p4354c.png", - "french (canada)": "https://ptpimg.me/ei4s6u.png", - "french canadian": "https://ptpimg.me/ei4s6u.png", - "french": "https://ptpimg.me/m7mfoi.png", - "galician": "https://ptpimg.me/xj51b9.png", - "georgian": "https://ptpimg.me/pp412q.png", - "german": "https://ptpimg.me/dw8d04.png", - "greek": "https://ptpimg.me/px1u3e.png", - "gujarati": "https://ptpimg.me/d0l479.png", - "haitian creole": "https://ptpimg.me/f64wlp.png", - "hebrew": "https://ptpimg.me/5jw1jp.png", - "hindi": "https://ptpimg.me/d0l479.png", - "hungarian": "https://ptpimg.me/fr4aj7.png", - "icelandic": "https://ptpimg.me/40o553.png", - "indonesian": "https://ptpimg.me/f00c8u.png", - "irish": "https://ptpimg.me/71x9mk.png", - "italian": "https://ptpimg.me/ao762a.png", - "japanese": "https://ptpimg.me/o1amm3.png", - "kannada": "https://ptpimg.me/d0l479.png", - "kazakh": "https://ptpimg.me/tq1h8b.png", - "khmer": "https://ptpimg.me/0p1tli.png", - "korean": "https://ptpimg.me/2tvwgn.png", - "kurdish": "https://ptpimg.me/g290wo.png", - "kyrgyz": "https://ptpimg.me/336unh.png", - "lao": "https://ptpimg.me/n3nan1.png", - "latin american": "https://ptpimg.me/11350x.png", - "latvian": "https://ptpimg.me/3x2y1b.png", - "lithuanian": "https://ptpimg.me/b444z8.png", - "luxembourgish": "https://ptpimg.me/52x189.png", - "macedonian": "https://ptpimg.me/2g5lva.png", - "malagasy": "https://ptpimg.me/n5120r.png", - "malay": "https://ptpimg.me/02e17w.png", - "malayalam": "https://ptpimg.me/d0l479.png", - "maltese": "https://ptpimg.me/ua46c2.png", - "maori": "https://ptpimg.me/2fw03g.png", - "marathi": "https://ptpimg.me/d0l479.png", - "mongolian": "https://ptpimg.me/z2h682.png", - "nepali": "https://ptpimg.me/5yd3sp.png", - "norwegian": "https://ptpimg.me/1t11u4.png", - "pashto": "https://ptpimg.me/i9pt6k.png", - "persian": "https://ptpimg.me/i0y103.png", - "polish": "https://ptpimg.me/m73uwa.png", - "portuguese": "https://ptpimg.me/5j1a7q.png", - "portuguese (brazil)": "https://ptpimg.me/p8sgla.png", - "punjabi": "https://ptpimg.me/d0l479.png", - "romanian": "https://ptpimg.me/ux94x0.png", - "russian": "https://ptpimg.me/v33j64.png", - "samoan": "https://ptpimg.me/8nt3zq.png", - "serbian": "https://ptpimg.me/2139p2.png", - "slovak": "https://ptpimg.me/70994n.png", - "slovenian": "https://ptpimg.me/61yp81.png", - "somali": "https://ptpimg.me/320pa6.png", - "spanish": "https://ptpimg.me/xj51b9.png", - "spanish (latin america)": "https://ptpimg.me/11350x.png", - "swahili": "https://ptpimg.me/d0l479.png", - "swedish": "https://ptpimg.me/082090.png", - "tamil": "https://ptpimg.me/d0l479.png", - "telugu": "https://ptpimg.me/d0l479.png", - "thai": "https://ptpimg.me/38ru43.png", - "turkish": "https://ptpimg.me/g4jg39.png", - "ukrainian": "https://ptpimg.me/d8fp6k.png", - "urdu": "https://ptpimg.me/z23gg5.png", - "uzbek": "https://ptpimg.me/89854s.png", - "vietnamese": "https://ptpimg.me/qnuya2.png", - "welsh": "https://ptpimg.me/a9w539.png", - "xhosa": "https://ptpimg.me/7teg09.png", - "yiddish": "https://ptpimg.me/5jw1jp.png", - "yoruba": "https://ptpimg.me/9l34il.png", - "zulu": "https://ptpimg.me/7teg09.png" + "afrikaans": ("https://ptpimg.me/i9pt6k.png", "20"), + "albanian": ("https://ptpimg.me/sfhik8.png", "20"), + "amharic": ("https://ptpimg.me/zm816y.png", "20"), + "arabic": ("https://ptpimg.me/5g8i9u.png", "26x10"), + "armenian": ("https://ptpimg.me/zm816y.png", "20"), + "azerbaijani": ("https://ptpimg.me/h3rbe0.png", "20"), + "basque": ("https://ptpimg.me/xj51b9.png", "20"), + "belarusian": ("https://ptpimg.me/iushg1.png", "20"), + "bengali": ("https://ptpimg.me/jq996n.png", "20"), + "bosnian": ("https://ptpimg.me/19t9rv.png", "20"), + "brazilian": ("https://ptpimg.me/p8sgla.png", "20"), + "bulgarian": ("https://ptpimg.me/un9dc6.png", "20"), + "catalan": ("https://ptpimg.me/v4h5bf.png", "20"), + "chinese": ("https://ptpimg.me/ea3yv3.png", "20"), + "croatian": ("https://ptpimg.me/rxi533.png", "20"), + "czech": ("https://ptpimg.me/5m75n3.png", "20"), + "danish": ("https://ptpimg.me/m35c41.png", "20"), + "dutch": ("https://ptpimg.me/6nmwpx.png", "20"), + "dzongkha": ("https://ptpimg.me/56e7y5.png", "20"), + "english": ("https://ptpimg.me/ine2fd.png", "25x10"), + "estonian": ("https://ptpimg.me/z25pmk.png", "20"), + "filipino": ("https://ptpimg.me/9d3z9w.png", "20"), + "finnish": ("https://ptpimg.me/p4354c.png", "20"), + "french (canada)": ("https://ptpimg.me/ei4s6u.png", "20"), + "french canadian": ("https://ptpimg.me/ei4s6u.png", "20"), + "french": ("https://ptpimg.me/m7mfoi.png", "20"), + "galician": ("https://ptpimg.me/xj51b9.png", "20"), + "georgian": ("https://ptpimg.me/pp412q.png", "20"), + "german": ("https://ptpimg.me/dw8d04.png", "30x10"), + "greek": ("https://ptpimg.me/px1u3e.png", "20"), + "gujarati": ("https://ptpimg.me/d0l479.png", "20"), + "haitian creole": ("https://ptpimg.me/f64wlp.png", "20"), + "hebrew": ("https://ptpimg.me/5jw1jp.png", "20"), + "hindi": ("https://ptpimg.me/d0l479.png", "20"), + "hungarian": ("https://ptpimg.me/fr4aj7.png", "30x10"), + "icelandic": ("https://ptpimg.me/40o553.png", "20"), + "indonesian": ("https://ptpimg.me/f00c8u.png", "20"), + "irish": ("https://ptpimg.me/71x9mk.png", "20"), + "italian": ("https://ptpimg.me/ao762a.png", "20"), + "japanese": ("https://ptpimg.me/o1amm3.png", "20"), + "kannada": ("https://ptpimg.me/d0l479.png", "20"), + "kazakh": ("https://ptpimg.me/tq1h8b.png", "20"), + "khmer": ("https://ptpimg.me/0p1tli.png", "20"), + "korean": ("https://ptpimg.me/2tvwgn.png", "20"), + "kurdish": ("https://ptpimg.me/g290wo.png", "20"), + "kyrgyz": ("https://ptpimg.me/336unh.png", "20"), + "lao": ("https://ptpimg.me/n3nan1.png", "20"), + "latin american": ("https://ptpimg.me/11350x.png", "20"), + "latvian": ("https://ptpimg.me/3x2y1b.png", "25x10"), + "lithuanian": ("https://ptpimg.me/b444z8.png", "20"), + "luxembourgish": ("https://ptpimg.me/52x189.png", "20"), + "macedonian": ("https://ptpimg.me/2g5lva.png", "20"), + "malagasy": ("https://ptpimg.me/n5120r.png", "20"), + "malay": ("https://ptpimg.me/02e17w.png", "30x10"), + "malayalam": ("https://ptpimg.me/d0l479.png", "20"), + "maltese": ("https://ptpimg.me/ua46c2.png", "20"), + "maori": ("https://ptpimg.me/2fw03g.png", "20"), + "marathi": ("https://ptpimg.me/d0l479.png", "20"), + "mongolian": ("https://ptpimg.me/z2h682.png", "20"), + "nepali": ("https://ptpimg.me/5yd3sp.png", "20"), + "norwegian": ("https://ptpimg.me/1t11u4.png", "20"), + "pashto": ("https://ptpimg.me/i9pt6k.png", "20"), + "persian": ("https://ptpimg.me/i0y103.png", "20"), + "polish": ("https://ptpimg.me/m73uwa.png", "20"), + "portuguese": ("https://ptpimg.me/5j1a7q.png", "20"), + "portuguese (brazil)": ("https://ptpimg.me/p8sgla.png", "20"), + "punjabi": ("https://ptpimg.me/d0l479.png", "20"), + "romanian": ("https://ptpimg.me/ux94x0.png", "20"), + "russian": ("https://ptpimg.me/v33j64.png", "20"), + "samoan": ("https://ptpimg.me/8nt3zq.png", "20"), + "serbian": ("https://ptpimg.me/2139p2.png", "20"), + "slovak": ("https://ptpimg.me/70994n.png", "20"), + "slovenian": ("https://ptpimg.me/61yp81.png", "25x10"), + "somali": ("https://ptpimg.me/320pa6.png", "20"), + "spanish": ("https://ptpimg.me/xj51b9.png", "20"), + "spanish (latin america)": ("https://ptpimg.me/11350x.png", "20"), + "swahili": ("https://ptpimg.me/d0l479.png", "20"), + "swedish": ("https://ptpimg.me/082090.png", "20"), + "tamil": ("https://ptpimg.me/d0l479.png", "20"), + "telugu": ("https://ptpimg.me/d0l479.png", "20"), + "thai": ("https://ptpimg.me/38ru43.png", "20"), + "turkish": ("https://ptpimg.me/g4jg39.png", "20"), + "ukrainian": ("https://ptpimg.me/d8fp6k.png", "20"), + "urdu": ("https://ptpimg.me/z23gg5.png", "20"), + "uzbek": ("https://ptpimg.me/89854s.png", "20"), + "vietnamese": ("https://ptpimg.me/qnuya2.png", "20"), + "welsh": ("https://ptpimg.me/a9w539.png", "20"), + "xhosa": ("https://ptpimg.me/7teg09.png", "20"), + "yiddish": ("https://ptpimg.me/5jw1jp.png", "20"), + "yoruba": ("https://ptpimg.me/9l34il.png", "20"), + "zulu": ("https://ptpimg.me/7teg09.png", "20") } def parse_mediainfo(self, mediainfo_text): @@ -871,8 +871,8 @@ def parse_mediainfo(self, mediainfo_text): # If there's an exact match in LANGUAGE_CODE_MAP, add country code to language field if title_lower in self.LANGUAGE_CODE_MAP: - country_code = self.LANGUAGE_CODE_MAP[title_lower] - current_track["language"] = f"[img=20]{country_code}[/img]" + country_code, size = self.LANGUAGE_CODE_MAP[title_lower] + current_track["language"] = f"[img={size}]{country_code}[/img]" # print(f"Exact match found for title '{title_lower}' with country code: {country_code}") # Process language field only if it hasn't already been set @@ -881,8 +881,8 @@ def parse_mediainfo(self, mediainfo_text): # print(f"\nProcessing Language: '{property_value}'") # Debugging output if language_lower in self.LANGUAGE_CODE_MAP: - country_code = self.LANGUAGE_CODE_MAP[language_lower] - current_track["language"] = f"[img=20]{country_code}[/img]" + country_code, size = self.LANGUAGE_CODE_MAP[language_lower] + current_track["language"] = f"[img={size}]{country_code}[/img]" # print(f"Matched language '{language_lower}' to country code: {country_code}") else: # If no match in LANGUAGE_CODE_MAP, store language as-is @@ -929,9 +929,9 @@ def format_bbcode(self, parsed_mediainfo): # Language flag image language = track.get("language", "").lower() - country_code = self.LANGUAGE_CODE_MAP.get(language) + country_code, size = self.LANGUAGE_CODE_MAP.get(language) if country_code: - parts.append(f"[img=20]{country_code}[/img]") + parts.append(f"[img={size}]{country_code}[/img]") else: parts.append(language.capitalize() if language else "") From fcae0713394d62e86c2d048008e3c274c3c1f6fb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 17:08:42 +1000 Subject: [PATCH 420/741] PTP override 0 multiscreens --- src/trackers/PTP.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index f34d6d16e..44de8875a 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -616,6 +616,9 @@ async def edit_desc(self, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + if multi_screens == 0: + multi_screens = 2 + console.print(f"[yellow]PTP requires screenshots for multi disc/file content, overriding config") with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: images = meta['image_list'] From 15463fb22ad35d5d8dc35adccec76ee29c1443b5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 17:12:37 +1000 Subject: [PATCH 421/741] lint --- src/trackers/PTP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 44de8875a..344d59dac 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -618,7 +618,7 @@ async def edit_desc(self, meta): multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) if multi_screens == 0: multi_screens = 2 - console.print(f"[yellow]PTP requires screenshots for multi disc/file content, overriding config") + console.print("[yellow]PTP requires screenshots for multi disc/file content, overriding config") with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: images = meta['image_list'] From 14ca6af1594d3bfeee9820f1cd60832fe484dfba Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 17:22:15 +1000 Subject: [PATCH 422/741] UNIT3D override 0 multiscreens for discs --- src/trackers/COMMON.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index fbb072f68..8b791a777 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -78,6 +78,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Handle multiple discs case elif len(discs) > 1: + if multi_screens == 0: + multi_screens = 2 # Initialize retry_count if not already set if 'retry_count' not in meta: meta['retry_count'] = 0 From 7e50153669460d600e3c0c5b0d255c7d2d98d75b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 17:28:23 +1000 Subject: [PATCH 423/741] Default should match code --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 5092faa60..41cc70147 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -31,7 +31,7 @@ # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites. # 0 equals old behavior where only the original description and images are added. - "multiScreens": "0", + "multiScreens": "2", # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. "pack_thumb_size": "300", From 152c368bbd1f034d86c970589c0961c7b2c55a80 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 18:45:24 +1000 Subject: [PATCH 424/741] Save meta with uploaded images after uploading --- upload.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/upload.py b/upload.py index 1e03823dc..137edcc6d 100644 --- a/upload.py +++ b/upload.py @@ -184,6 +184,9 @@ async def do_the_thing(base_dir): if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: return_dict = {} meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) if meta['debug']: console.print(meta['image_list']) # meta['uploaded_screens'] = True From bb7d394122dcece25c711747ea4d8594ab2b6d29 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 19:38:55 +1000 Subject: [PATCH 425/741] Download scene nfo and post as description --- src/prep.py | 104 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 82 insertions(+), 22 deletions(-) diff --git a/src/prep.py b/src/prep.py index d930bfb61..54facd759 100644 --- a/src/prep.py +++ b/src/prep.py @@ -390,7 +390,7 @@ async def gather_prep(self, meta, mode): # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") if meta['is_disc'] == "BDMV": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] # No filelist for discs, use path search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -418,7 +418,7 @@ async def gather_prep(self, meta, mode): mi = None elif meta['is_disc'] == "DVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -440,7 +440,7 @@ async def gather_prep(self, meta, mode): meta['sd'] = self.is_sd(meta['resolution']) elif meta['is_disc'] == "HDDVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] search_term = os.path.basename(meta['path']) search_file_folder = 'folder' @@ -464,7 +464,7 @@ async def gather_prep(self, meta, mode): videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None search_file_folder = 'file' - video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta, meta.get('imdb', None)) guess_name = ntpath.basename(video).replace('-', ' ') filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) @@ -1170,27 +1170,66 @@ def is_sd(self, resolution): """ Is a scene release? """ - def is_scene(self, video, imdb=None): + def is_scene(self, video, meta, imdb=None): scene = False base = os.path.basename(video) base = os.path.splitext(base)[0] base = urllib.parse.quote(base) url = f"https://api.srrdb.com/v1/search/r:{base}" + console.print("srrdb url:", url) + try: response = requests.get(url, timeout=30) - response = response.json() - if int(response.get('resultsCount', 0)) != 0: - video = f"{response['results'][0]['release']}.mkv" + response_json = response.json() + console.print(response_json) + + if int(response_json.get('resultsCount', 0)) > 0: + first_result = response_json['results'][0] + video = f"{first_result['release']}.mkv" scene = True - r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") - r = r.json() - if r['releases'] != [] and imdb is None: - imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb - console.print(f"[green]SRRDB: Matched to {response['results'][0]['release']}") - except Exception: - video = video - scene = False - console.print("[yellow]SRRDB: No match found, or request has timed out") + + # NFO Download Handling + if first_result.get("hasNFO") == "yes": + try: + release = first_result['release'] + release_lower = release.lower() + nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" + console.print("nfo url:", nfo_url) + + # Define path and create directory + save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) + os.makedirs(save_path, exist_ok=True) + nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") + + # Download the NFO file + nfo_response = requests.get(nfo_url, timeout=30) + if nfo_response.status_code == 200: + with open(nfo_file_path, 'wb') as f: + f.write(nfo_response.content) + meta['nfo'] = True + console.print(f"[green]NFO downloaded to {nfo_file_path}") + else: + console.print("[yellow]NFO file not available for download.") + except Exception as e: + console.print("[yellow]Failed to download NFO file:", e) + + # IMDb Handling + try: + r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") + r = r.json() + + if r['releases'] != [] and imdb is None: + imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb + console.print(f"[green]SRRDB: Matched to {first_result['release']}") + except Exception as e: + console.print("[yellow]Failed to fetch IMDb information:", e) + + else: + console.print("[yellow]SRRDB: No match found") + + except Exception as e: + console.print("[yellow]SRRDB: No match found, or request has timed out", e) + return video, scene, imdb """ @@ -3486,10 +3525,10 @@ def clean_text(text): desclink = meta.get('desclink') descfile = meta.get('descfile') + scene_nfo = False with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) - content_written = False if meta.get('desc_template'): @@ -3504,13 +3543,34 @@ def clean_text(text): except FileNotFoundError: console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + base_dir = meta['base_dir'] + uuid = meta['uuid'] + current_dir_path = "*.nfo" + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + if meta.get('nfo') and not content_written: - nfo_files = glob.glob("*.nfo") + nfo_files = glob.glob(current_dir_path) + if not nfo_files: + nfo_files = glob.glob(specified_dir_path) + scene_nfo = True + if nfo_files: + console.print("We found nfo") nfo = nfo_files[0] - with open(nfo, 'r', encoding="utf-8") as nfo_file: - nfo_content = nfo_file.read() - description.write(f"[code]{nfo_content}[/code]\n") + try: + with open(nfo, 'r', encoding="utf-8") as nfo_file: + nfo_content = nfo_file.read() + console.print("NFO content read with utf-8 encoding.") + except UnicodeDecodeError: + console.print("utf-8 decoding failed, trying latin1.") + with open(nfo, 'r', encoding="latin1") as nfo_file: + nfo_content = nfo_file.read() + console.print("NFO content read with latin1 encoding.") + + if scene_nfo is True: + description.write(f"[center][spoiler=Scene NFO:][code]{nfo_content}[/code][/spoiler][/center]\n") + else: + description.write(f"[code]{nfo_content}[/code]\n") meta['description'] = "CUSTOM" content_written = True From 2f78faa195f223f79aec5a6615d82be436f299b5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 20:24:56 +1000 Subject: [PATCH 426/741] UNIT3D (blu only currently) strip nfo from description and use upload endpoint --- src/prep.py | 8 ++++---- src/trackers/BLU.py | 15 +++++++++++++++ src/trackers/COMMON.py | 2 ++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 54facd759..ec5a2c336 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1195,12 +1195,12 @@ def is_scene(self, video, meta, imdb=None): release_lower = release.lower() nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" console.print("nfo url:", nfo_url) - + # Define path and create directory save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) os.makedirs(save_path, exist_ok=True) nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") - + # Download the NFO file nfo_response = requests.get(nfo_url, timeout=30) if nfo_response.status_code == 200: @@ -1217,7 +1217,7 @@ def is_scene(self, video, meta, imdb=None): try: r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") r = r.json() - + if r['releases'] != [] and imdb is None: imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb console.print(f"[green]SRRDB: Matched to {first_result['release']}") @@ -1229,7 +1229,7 @@ def is_scene(self, video, meta, imdb=None): except Exception as e: console.print("[yellow]SRRDB: No match found, or request has timed out", e) - + return video, scene, imdb """ diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index f88da21bc..b0bddc66f 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -62,8 +64,21 @@ async def upload(self, meta, disctype): mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r', encoding='utf-8').read() + + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { 'name': blu_name, 'description': desc, diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 8b791a777..17b8edeb9 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -55,6 +55,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des discs = meta.get('discs', []) filelist = meta.get('filelist', []) desc = base + desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) + descfile.write(desc) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) if comparison is False: From 5536b7d74834052fa6f56126557fc9bddae2cf62 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 20:39:10 +1000 Subject: [PATCH 427/741] Add other support sites for nfo --- src/trackers/AITHER.py | 11 +++++++++++ src/trackers/AL.py | 11 +++++++++++ src/trackers/CBR.py | 11 +++++++++++ src/trackers/FNP.py | 11 +++++++++++ src/trackers/LCD.py | 11 +++++++++++ src/trackers/LST.py | 11 +++++++++++ src/trackers/LT.py | 11 +++++++++++ src/trackers/OTW.py | 11 +++++++++++ src/trackers/PSS.py | 11 +++++++++++ src/trackers/R4E.py | 11 +++++++++++ src/trackers/RF.py | 11 +++++++++++ src/trackers/SHRI.py | 11 +++++++++++ src/trackers/STC.py | 11 +++++++++++ src/trackers/STT.py | 11 +++++++++++ src/trackers/TL.py | 2 +- src/trackers/ULCX.py | 11 +++++++++++ src/trackers/UTP.py | 11 +++++++++++ 17 files changed, 177 insertions(+), 1 deletion(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 761fd34c3..88a4d90c7 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -6,6 +6,8 @@ import platform import re import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -56,6 +58,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': name, 'description': desc, diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 50c0f1d53..a0b45caad 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -94,6 +96,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': name, 'description': desc, diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index e7c3c2407..79d98d003 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -53,6 +55,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': name, 'description': desc, diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 9d9baf50e..9802d9508 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -86,6 +88,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index d4c0a1d47..450b05140 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -53,6 +55,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': name, 'description': desc, diff --git a/src/trackers/LST.py b/src/trackers/LST.py index bcc54123e..09da2581a 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -103,6 +105,15 @@ async def upload(self, meta, disctype): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 100af0a0b..f748af0ee 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -111,6 +113,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': lt_name, 'description': desc, diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 1835c10d0..16e00cfcd 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -86,6 +88,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index e6abcb194..a25c89d01 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -88,6 +90,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index c2f73f509..6d6717c64 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -6,6 +6,8 @@ import tmdbsimple as tmdb import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -48,6 +50,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': name, 'description': desc, diff --git a/src/trackers/RF.py b/src/trackers/RF.py index a502c1580..64f73a4a3 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -6,6 +6,8 @@ import re from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -56,6 +58,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': rf_name, 'description': desc, diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index ff254cd25..e93837956 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -86,6 +88,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 8451a5265..6acdf103d 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -4,6 +4,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -48,6 +50,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': stc_name, 'description': desc, diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 008dd8adb..4715864e8 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -49,6 +51,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': stt_name, 'description': desc, diff --git a/src/trackers/TL.py b/src/trackers/TL.py index f563a6839..234dcf2b3 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -79,7 +79,7 @@ async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(common, meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + # await common.unit3d_edit_desc(meta, self.tracker, self.signature) open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+', encoding='utf-8') diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index ee708cb47..cb5a3eed1 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -5,6 +5,8 @@ import platform from str2bool import str2bool import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -81,6 +83,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index c962efe27..b59993b5d 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -5,6 +5,8 @@ from str2bool import str2bool import platform import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -52,6 +54,15 @@ async def upload(self, meta, disctype): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { 'name': meta['name'], 'description': desc, From debe4ea270eb538e8fe56c318a04c5b3a3f7fd1d Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 22:12:58 +1000 Subject: [PATCH 428/741] Remove console logs --- src/prep.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index ec5a2c336..140fab1a8 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1176,12 +1176,10 @@ def is_scene(self, video, meta, imdb=None): base = os.path.splitext(base)[0] base = urllib.parse.quote(base) url = f"https://api.srrdb.com/v1/search/r:{base}" - console.print("srrdb url:", url) try: response = requests.get(url, timeout=30) response_json = response.json() - console.print(response_json) if int(response_json.get('resultsCount', 0)) > 0: first_result = response_json['results'][0] @@ -1194,7 +1192,6 @@ def is_scene(self, video, meta, imdb=None): release = first_result['release'] release_lower = release.lower() nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" - console.print("nfo url:", nfo_url) # Define path and create directory save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) From 52bbbb785fe91b9f3a4152f88db73e08847c2edf Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 2 Nov 2024 22:16:55 +1000 Subject: [PATCH 429/741] Add option to limit total files processed in packs for descriptions --- data/example-config.py | 3 +++ src/trackers/COMMON.py | 16 ++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 41cc70147..8413d8c81 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -49,6 +49,9 @@ # Any other files past this limit will be hidden/added all within a spoiler tag. "fileLimit": "2", + # Absolute limit on processed files in packs. You might not want to upload images for a large number of episodes + "processLimit": "10", + # Providing the option to add a header, in bbcode, above the screenshot section where supported # "screenshot_header": "[centers] SCREENSHOTS [/center]" diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 17b8edeb9..09fbbee09 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -47,6 +47,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des char_limit = int(self.config['DEFAULT'].get('charLimit', 14000)) file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) + process_limit = int(self.config['DEFAULT'].get('processLimit', 10)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: if desc_header: descfile.write(desc_header) @@ -55,7 +56,10 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des discs = meta.get('discs', []) filelist = meta.get('filelist', []) desc = base + # Regular expression to find and remove the specific spoiler section wrapped with [center] tags desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) + + # Writing the cleaned description to the file descfile.write(desc) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) @@ -216,6 +220,9 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # First Pass: Create and Upload Images for Each File for i, file in enumerate(filelist): + if i >= process_limit: + # console.print("[yellow]Skipping processing more files as they exceed the process limit.") + continue if multi_screens != 0: if i > 0: new_images_key = f'new_images_file_{i}' @@ -254,6 +261,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Second Pass: Process MediaInfo and Write Descriptions if len(filelist) > 1: for i, file in enumerate(filelist): + if i >= process_limit: + continue # Extract filename directly from the file path filename = os.path.splitext(os.path.basename(file.strip()))[0] @@ -282,14 +291,9 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if i == 0: # For the first file, use 'image_list' key images = meta['image_list'] if images: - # Process all images if multi_screens is 0 or set multi_screens as usual - if file_limit == 1 or multi_screens == 0: - single_screens = len(images) # Use all images if only one file or if multi_screens is 0 - else: - single_screens = multi_screens descfile.write("[center]") char_count += len("[center]") - for img_index in range(min(single_screens, len(images))): + for img_index in range(len(images)): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" From 3a444a34ac4fcdb00fc8500233980d434a972a70 Mon Sep 17 00:00:00 2001 From: xzin Date: Sat, 2 Nov 2024 10:15:28 -0500 Subject: [PATCH 430/741] regex matching for when --daily flag isn't supplied --- src/prep.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index a86df7c96..86a758eef 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3124,14 +3124,21 @@ async def get_season_episode(self, video, meta): is_daily = False if meta['anime'] is False: try: - if meta.get('manual_date'): + daily_match = re.search(r"\d{4}[-\.]\d{2}[-\.]\d{2}", video) + if meta.get('manual_date') or daily_match: + # Handle daily episodes + # The user either provided the --daily argument or a date was found in the filename + + if meta.get('manual_date') is None and daily_match is not None: + meta['manual_date'] = daily_match.group().replace('.', '-') is_daily = True guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - # For --daily flagged shows, pass the supplied date as the episode title season = f"S{str(season_int).zfill(2)}" - episode = f"E{str(episode_int).zfill(2)}" + episode = f"E{str(episode_int).zfill(2)}" + # For daily shows, pass the supplied date as the episode title + # Season and episode will be stripped later to conform with standard daily episode naming format meta['episode_title'] = meta.get('manual_date') else: From d2f9f52b5d69ad502579bde45b7359398c94c680 Mon Sep 17 00:00:00 2001 From: xzin Date: Sat, 2 Nov 2024 10:22:24 -0500 Subject: [PATCH 431/741] adding tvmaze to confirmation output --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index 86a758eef..78066a91e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3549,6 +3549,8 @@ async def package(self, meta): generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") if meta['tvdb_id'] != "0": generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if meta['tvmaze_id'] != "0": + generic.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): if meta.get('rehosted_poster', None) is None: From d023693e8eeb4c11e2e9994e9e8fa9ac154333f5 Mon Sep 17 00:00:00 2001 From: xzin Date: Sat, 2 Nov 2024 10:25:33 -0500 Subject: [PATCH 432/741] add tvmaze to cli output --- upload.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/upload.py b/upload.py index 1e03823dc..4892cf895 100644 --- a/upload.py +++ b/upload.py @@ -530,6 +530,8 @@ def get_confirmation(meta): cli_ui.info(f"IMDB: https://www.imdb.com/title/tt{meta['imdb_id']}") if int(meta.get('tvdb_id', '0')) != 0: cli_ui.info(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") + if int(meta.get('tvmaze_id', '0')) != 0: + cli_ui.info(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}") if int(meta.get('mal_id', 0)) != 0: cli_ui.info(f"MAL : https://myanimelist.net/anime/{meta['mal_id']}") console.print() From 442ad2346edd4487e0aff31f6c6772481ba2b419 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 3 Nov 2024 10:44:31 +1000 Subject: [PATCH 433/741] Assign as language first to check result fixes https://github.com/Audionut/Upload-Assistant/issues/117 --- src/trackers/COMMON.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 09fbbee09..c4b575d62 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -937,11 +937,15 @@ def format_bbcode(self, parsed_mediainfo): # Language flag image language = track.get("language", "").lower() - country_code, size = self.LANGUAGE_CODE_MAP.get(language) - if country_code: + result = self.LANGUAGE_CODE_MAP.get(language) + + # Check if the language was found in LANGUAGE_CODE_MAP + if result is not None: + country_code, size = result parts.append(f"[img={size}]{country_code}[/img]") else: - parts.append(language.capitalize() if language else "") + # If language is not found, use a fallback or display the language as plain text + parts.append(language.capitalize() if language else "Unknown") # Other properties to concatenate properties = ["language", "codec", "format", "channels", "bit_rate", "format_profile", "stream_size"] From 77b62fda87dfa9f23911782a564d0bf1c31d0283 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 3 Nov 2024 10:57:57 +1000 Subject: [PATCH 434/741] Blank language if not present --- src/trackers/COMMON.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index c4b575d62..66d212f34 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -945,7 +945,7 @@ def format_bbcode(self, parsed_mediainfo): parts.append(f"[img={size}]{country_code}[/img]") else: # If language is not found, use a fallback or display the language as plain text - parts.append(language.capitalize() if language else "Unknown") + parts.append(language.capitalize() if language else "") # Other properties to concatenate properties = ["language", "codec", "format", "channels", "bit_rate", "format_profile", "stream_size"] From 196d0f917cacffe5f9daa2d864d21609d6638fc1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 3 Nov 2024 16:43:41 +1000 Subject: [PATCH 435/741] lint --- src/trackers/COMMON.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 66d212f34..f33b700eb 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -938,7 +938,7 @@ def format_bbcode(self, parsed_mediainfo): # Language flag image language = track.get("language", "").lower() result = self.LANGUAGE_CODE_MAP.get(language) - + # Check if the language was found in LANGUAGE_CODE_MAP if result is not None: country_code, size = result From 9f5b6990a19f8c4ee04f745c19b998fa6c8cdf8d Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 4 Nov 2024 08:50:48 +1000 Subject: [PATCH 436/741] Handle exception when date get missing from guessit BLU > /torrents/241849 with --daily 2024-01-01 --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 78066a91e..895b75036 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3132,7 +3132,7 @@ async def get_season_episode(self, video, meta): if meta.get('manual_date') is None and daily_match is not None: meta['manual_date'] = daily_match.group().replace('.', '-') is_daily = True - guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] + guess_date = meta.get('manual_date', guessit(video).get('date')) if meta.get('manual_date') else guessit(video).get('date') season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) season = f"S{str(season_int).zfill(2)}" @@ -3140,7 +3140,7 @@ async def get_season_episode(self, video, meta): # For daily shows, pass the supplied date as the episode title # Season and episode will be stripped later to conform with standard daily episode naming format meta['episode_title'] = meta.get('manual_date') - + else: try: guess_year = guessit(video)['year'] From 63264d4c550ab9488d53853cb79b5115819d8c79 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 4 Nov 2024 14:46:06 +1000 Subject: [PATCH 437/741] ANT - fix no audio with blu-ray --- src/trackers/ANT.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index eb0addc76..6e3c24277 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -87,7 +87,11 @@ async def upload(self, meta, disctype): bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') - file_name = meta['bdinfo']['files'][0]['file'].lower() + longest_file = max( + meta['bdinfo']['files'], + key=lambda x: x.get('length', 0) + ) + file_name = longest_file['file'].lower() m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) mi_dump = media_info_output.replace('\r\n', '\n') From 971ec29266ce8dd852bf1e09a19c961e90d810e2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 07:37:04 +1000 Subject: [PATCH 438/741] Don't blindly use video meta if season pack --- src/trackers/ANT.py | 2 +- src/trackers/MTV.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 6e3c24277..83169b6ca 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -88,7 +88,7 @@ async def upload(self, meta, disctype): bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') longest_file = max( - meta['bdinfo']['files'], + meta['bdinfo']['files'], key=lambda x: x.get('length', 0) ) file_name = longest_file['file'].lower() diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 199490a70..b3d873be1 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -319,7 +319,8 @@ async def edit_group_desc(self, meta): async def edit_name(self, meta): if meta['scene'] is True: - mtv_name = meta['video'] + if meta['tv_pack'] != 0: + mtv_name = meta['video'] else: mtv_name = meta['name'] if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE') and "DD" in meta['audio']: From 7e977f967e028e690aa39f0a1aa4f67b7684299d Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 15:56:55 +1000 Subject: [PATCH 439/741] Last ditch effort to find progressive marker based on HUNO naming fun and games --- src/prep.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index c03e4cabc..c342132d1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1089,7 +1089,12 @@ def get_resolution(self, guess, folder_id, base_dir): elif framerate == "25.000": scan = "p" else: - scan = "i" + # Fallback using regex on meta['uuid'] - mainly for HUNO fun and games. + match = re.search(r'\b(1080p|720p|2160p)\b', folder_id, re.IGNORECASE) + if match: + scan = "p" # Assume progressive based on common resolution markers + else: + scan = "i" # Default to interlaced if no indicators are found width_list = [3840, 2560, 1920, 1280, 1024, 854, 720, 15360, 7680, 0] height_list = [2160, 1440, 1080, 720, 576, 540, 480, 8640, 4320, 0] width = self.closest(width_list, int(width)) From 9eb5812d4c26208006eaea1a5bfc77670a7a999b Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 17:07:21 +1000 Subject: [PATCH 440/741] Fix uncommenting screenshot header --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 8413d8c81..7e9d9855d 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -53,7 +53,7 @@ "processLimit": "10", # Providing the option to add a header, in bbcode, above the screenshot section where supported - # "screenshot_header": "[centers] SCREENSHOTS [/center]" + # "screenshot_header": "[centers] SCREENSHOTS [/center]", # Enable lossless PNG Compression (True/False) "optimize_images": True, From 41068c7fbf9a86d381fdbbb1e5a25162a37f0921 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 21:51:28 +1000 Subject: [PATCH 441/741] Filter out non-repack from dupe checking when uploading repack --- src/trackers/COMMON.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index f33b700eb..89314e0e0 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -616,8 +616,8 @@ async def filter_dupes(self, dupes, meta): types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} normalized_meta_type = {t.replace('-', '').upper() for t in meta['type']} if isinstance(meta['type'], list) else {meta['type'].replace('-', '').upper()} - file_type_present = {t for t in types_to_check if t in normalized_meta_type} + has_repack_in_uuid = "repack" in meta['uuid'].lower() if meta.get('uuid') else False for each in dupes: if meta.get('sd', 0) == 1: @@ -666,10 +666,9 @@ async def filter_dupes(self, dupes, meta): dupe_type_matches = {t for t in types_to_check if t in normalized_each_type} if file_type_present: - # Allow WEB-DL and similar matches if types are related (e.g., WEB-DL vs AMZN WEB-DL) if 'WEBDL' in normalized_meta_type and 'WEBDL' in normalized_each_type: - console.log(f"[green]Allowing result we will catch later: {each}") - # Allow based on matching resolution, HDR, and audio despite type mismatch + if meta['debug']: + console.log(f"[green]Allowing result we will catch later: {each}") elif meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each: if meta['debug']: console.log(f"[green]Allowing result we will catch later: {each}") @@ -683,6 +682,12 @@ async def filter_dupes(self, dupes, meta): console.log(f"[red]Excluding extra result with new type match: {each}") continue + # If "repack" is in the UUID, only keep results that also contain "repack" + if has_repack_in_uuid and "repack" not in each.lower(): + if meta['debug']: + console.log(f"[yellow]Excluding result because it lacks 'repack': {each}") + continue + for s in search_combos: if s.get('search_for') not in (None, ''): if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): @@ -715,6 +720,7 @@ async def filter_dupes(self, dupes, meta): allow = False if allow and each not in new_dupes: new_dupes.append(each) + return new_dupes class MediaInfoParser: From 3e8240e46cf78f1abdd30a8bc4dbae189a61cf38 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 22:15:24 +1000 Subject: [PATCH 442/741] Add arg 'uac', must be used in conjuction with -ua Should work like unattended, but ask if a dupe was found instead of just aborting. --- src/args.py | 1 + upload.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/args.py b/src/args.py index 4eeacfcad..2c36bed47 100644 --- a/src/args.py +++ b/src/args.py @@ -90,6 +90,7 @@ def parse(self, args, meta): parser.add_argument('-tk', '--trackers', nargs='*', required=False, help="Upload to these trackers, space seperated (--trackers blu bhd)") parser.add_argument('-rt', '--randomized', nargs='*', required=False, help="Number of extra, torrents with random infohash", default=0) parser.add_argument('-ua', '--unattended', action='store_true', required=False, help=argparse.SUPPRESS) + parser.add_argument('-uac', '--unattended-confirm', action='store_true', required=False, help=argparse.SUPPRESS) parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") diff --git a/upload.py b/upload.py index 64ceefbf6..151b896a9 100644 --- a/upload.py +++ b/upload.py @@ -592,7 +592,12 @@ def dupe_check(dupes, meta): console.print() cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) - if meta['unattended']: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if meta.get('dupe', False) is False: + upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) + else: + upload = True + else: if meta.get('dupe', False) is False: console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") upload = False @@ -600,11 +605,6 @@ def dupe_check(dupes, meta): console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") upload = True console.print() - if not meta['unattended']: - if meta.get('dupe', False) is False: - upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) - else: - upload = True if upload is False: meta['upload'] = False else: From 426aadf0c9b2e7c464a998da1b1010d6a04dbab8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 22:31:33 +1000 Subject: [PATCH 443/741] Add GB flag --- src/trackers/COMMON.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index f33b700eb..a607702d4 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -740,6 +740,7 @@ class MediaInfoParser: "dutch": ("https://ptpimg.me/6nmwpx.png", "20"), "dzongkha": ("https://ptpimg.me/56e7y5.png", "20"), "english": ("https://ptpimg.me/ine2fd.png", "25x10"), + "english (gb)": ("https://ptpimg.me/a9w539.png", "20"), "estonian": ("https://ptpimg.me/z25pmk.png", "20"), "filipino": ("https://ptpimg.me/9d3z9w.png", "20"), "finnish": ("https://ptpimg.me/p4354c.png", "20"), From 611629a7f19f859a47cdb3e6e01b988e049df809 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 22:45:31 +1000 Subject: [PATCH 444/741] Don't double write descriptions --- src/trackers/COMMON.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a607702d4..f5c0c2daf 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -58,9 +58,6 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des desc = base # Regular expression to find and remove the specific spoiler section wrapped with [center] tags desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) - - # Writing the cleaned description to the file - descfile.write(desc) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) if comparison is False: From a61f693db8afb680d06b0e9a9ab1e826197073e3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 5 Nov 2024 22:53:08 +1000 Subject: [PATCH 445/741] Filter group image being caught as screenshot --- src/bbcode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bbcode.py b/src/bbcode.py index 485f678f6..07cb7ff1c 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -209,6 +209,7 @@ def clean_unit3d_description(self, desc, site): "https://blutopia.xyz/favicon.ico", # Example bot image URL "https://i.ibb.co/2NVWb0c/uploadrr.webp", "https://blutopia/favicon.ico", + "https://ptpimg.me/606tk4.png", # Add any other known bot image URLs here ] imagelist = [ From 0f2c66fc7a64c246002a3bf550fb1f523c01cf14 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 7 Nov 2024 08:16:10 +1000 Subject: [PATCH 446/741] Save name from srrdb and use at MTV --- src/prep.py | 1 + src/trackers/MTV.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index c342132d1..c06b9212a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1188,6 +1188,7 @@ def is_scene(self, video, meta, imdb=None): if int(response_json.get('resultsCount', 0)) > 0: first_result = response_json['results'][0] + meta['scene_name'] = first_result['release'] video = f"{first_result['release']}.mkv" scene = True diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index b3d873be1..af9b93682 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -319,8 +319,10 @@ async def edit_group_desc(self, meta): async def edit_name(self, meta): if meta['scene'] is True: - if meta['tv_pack'] != 0: - mtv_name = meta['video'] + if meta.get('scene_name') != "": + mtv_name = meta.get('scene_name') + else: + mtv_name = meta['uuid'] else: mtv_name = meta['name'] if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE') and "DD" in meta['audio']: From 07fcdecb8801523970f5c03811f85690483956f2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 7 Nov 2024 08:55:18 +1000 Subject: [PATCH 447/741] Aither salt 'n' pepper fixes https://github.com/Audionut/Upload-Assistant/issues/119 --- src/trackers/AITHER.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 88a4d90c7..a3005520f 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -138,6 +138,7 @@ async def edit_name(self, meta): source = meta.get('source', "") if not meta['is_disc']: + def has_english_audio(tracks=None, media_info_text=None): if media_info_text: audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) @@ -162,12 +163,15 @@ def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): if not has_english_audio(media_info_text=media_info_text): audio_lang = get_audio_lang(media_info_text=media_info_text) if audio_lang: - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + if (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): + aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {audio_lang}", 1) + else: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) except (FileNotFoundError, KeyError) as e: print(f"Error processing MEDIAINFO.txt: {e}") if meta['is_disc'] == "DVD" or (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): - aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {resolution}", 1) + aither_name = aither_name.replace((meta['source']), f"{resolution} {meta['source']}", 1) aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': From d4d260b7d94b0f41b9122ada05dd5ba07595d35f Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 7 Nov 2024 23:11:02 +1000 Subject: [PATCH 448/741] MTV - don't fail if keeping links from non-supported hosts --- src/prep.py | 4 +- src/trackers/MTV.py | 129 ++++++++++++++++++++++++++++++-------------- 2 files changed, 92 insertions(+), 41 deletions(-) diff --git a/src/prep.py b/src/prep.py index c06b9212a..c28e5acfe 100644 --- a/src/prep.py +++ b/src/prep.py @@ -136,11 +136,11 @@ async def check_and_collect(image_dict): meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] if 'MTV' in meta.get('trackers', []): if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/red]") + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list elif 'MTV' in trackers_list: if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will fail if you keep these images.[/red]") + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") return valid_images diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index af9b93682..667edca47 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -12,6 +12,8 @@ from str2bool import str2bool from src.trackers.COMMON import COMMON from datetime import datetime +import glob +import multiprocessing class MTV(): @@ -217,56 +219,105 @@ def enforce_size_limit(image_list, image_sizes): console.print(data) return - async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None): + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox'] retry_mode = False images_reuploaded = False + new_images_key = f'mtv_images_key_{img_host_index}' + discs = meta.get('discs', []) # noqa F841 + filelist = meta.get('video', []) + filename = meta['filename'] - while True: - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) - - if not current_img_host: - console.print("[red]No more image hosts left to try.") - raise Exception("No valid image host found in the config.") - - if current_img_host not in approved_image_hosts: - console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") - retry_mode = True # Ensure retry_mode is set to True when switching hosts - images_reuploaded = True # Mark that reuploading occurred - img_host_index += 1 # Move to the next image host in the config - continue - else: - meta['imghost'] = current_img_host - break # Exit the loop when a valid host is found + if isinstance(filelist, str): + filelist = [filelist] + multi_screens = int(self.config['DEFAULT'].get('screens', 6)) + base_dir = meta['base_dir'] + folder_id = meta['uuid'] from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - return_dict = {} - prep.upload_screens( - meta, - screens=meta['screens'], - img_host_num=img_host_index, - i=0, - total_screens=meta['screens'], - custom_img_list=[], # This remains to handle any custom logic in the original function - return_dict=return_dict, - retry_mode=retry_mode # Honor the retry_mode flag passed in - ) - - # Overwrite meta['image_list'] with the newly uploaded images - new_image_list = return_dict.get('image_list', []) - if new_image_list: - meta['image_list'] = new_image_list # Overwrite with new images - - # Ensure images are from approved hosts - if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta['image_list']): + if new_images_key not in meta: + meta[new_images_key] = [] + + screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) + all_screenshots = [] + + for i, file in enumerate(filelist): + filename_pattern = f"{filename}*.png" + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if len(existing_screens) < multi_screens: + if meta.get('debug'): + console.print("[yellow]The image host of exsting images is not supported.") + console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") + + if meta['type'] == "BDMV": + s = multiprocessing.Process( + target=prep.disc_screenshots, + args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, + meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) + ) + elif meta['type'] == "DVD": + s = multiprocessing.Process( + target=prep.dvd_screenshots, + args=(meta, img_host_index, img_host_index) + ) + else: + s = multiprocessing.Process( + target=prep.screenshots, + args=(file, f"{filename}", meta['uuid'], base_dir, + meta, multi_screens + 1, True, None) + ) + + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + all_screenshots.extend(existing_screens) + + if all_screenshots: + return_dict = {} + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break # Exit loop when a valid host is found + + uploaded_images, _ = prep.upload_screens( + meta, + screens=multi_screens, + img_host_num=img_host_index, + i=0, + total_screens=multi_screens, + custom_img_list=all_screenshots, + return_dict=return_dict, + retry_mode=retry_mode + ) + + if uploaded_images: + meta[new_images_key] = uploaded_images + + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - return meta['image_list'], True, images_reuploaded # Trigger retry_mode if switching hosts + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta['image_list'], False, images_reuploaded # Return retry_mode and images_reuploaded + return meta[new_images_key], False, images_reuploaded # Return retry_mode and images_reuploaded async def edit_desc(self, meta, images_reuploaded, valid_images): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() From 910e0bdca8107ff115b546fb7c3e5d5fd5a63062 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 9 Nov 2024 15:55:09 +1000 Subject: [PATCH 449/741] Fix argument overrides --- upload.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/upload.py b/upload.py index 64ceefbf6..ae19e3e76 100644 --- a/upload.py +++ b/upload.py @@ -157,16 +157,29 @@ async def do_the_thing(base_dir): try: with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: saved_meta = json.load(f) + + # Define the list of keys that can be overwritten + overwrite_list = [ + 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', + 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' + ] + + sanitized_saved_meta = {} for key, value in saved_meta.items(): - overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', - 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' - ] - if meta.get(key, None) != value and key in overwrite_list: - saved_meta[key] = meta[key] - meta = saved_meta - f.close() + clean_key = key.strip().strip("'").strip('"') + + if clean_key in overwrite_list: + if clean_key in meta and meta.get(clean_key) is not None: + sanitized_saved_meta[clean_key] = meta[clean_key] + if meta['debug']: + console.print(f"Overriding {clean_key} with meta value:", meta[clean_key]) + else: + sanitized_saved_meta[clean_key] = value + else: + sanitized_saved_meta[clean_key] = value + meta.update(sanitized_saved_meta) except FileNotFoundError: pass console.print(f"[green]Gathering info for {os.path.basename(path)}") From 7800befa2106db5eaa62bf33ca64930588bbcfe3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 9 Nov 2024 15:57:11 +1000 Subject: [PATCH 450/741] Close meta.json --- upload.py | 1 + 1 file changed, 1 insertion(+) diff --git a/upload.py b/upload.py index ae19e3e76..21bc66175 100644 --- a/upload.py +++ b/upload.py @@ -180,6 +180,7 @@ async def do_the_thing(base_dir): else: sanitized_saved_meta[clean_key] = value meta.update(sanitized_saved_meta) + f.close() except FileNotFoundError: pass console.print(f"[green]Gathering info for {os.path.basename(path)}") From 74cbbec3343fe749b9d7277ba5611fa0221315dd Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 9 Nov 2024 17:28:31 +1000 Subject: [PATCH 451/741] Fix unattended skipping through unit3d selection --- src/trackers/COMMON.py | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index f5c0c2daf..3b6dc1332 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -385,7 +385,7 @@ async def unit3d_distributor_ids(self, distributor): }.get(distributor, 0) return distributor_id - async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, mal=None, filename=None, tracker_name=None): + async def prompt_user_for_id_selection(self, meta, tmdb=None, imdb=None, tvdb=None, mal=None, filename=None, tracker_name=None): if not tracker_name: tracker_name = "Tracker" # Fallback if tracker_name is not provided @@ -407,14 +407,17 @@ async def prompt_user_for_id_selection(self, tmdb=None, imdb=None, tvdb=None, ma if filename: console.print(f"Filename: {filename}") # Ensure filename is printed if available - selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() - try: - if selection == '' or selection == 'y' or selection == 'yes': - return True - else: - return False - except (KeyboardInterrupt, EOFError): - sys.exit(1) + if not meta['unattended']: + selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() + try: + if selection == '' or selection == 'y' or selection == 'yes': + return True + else: + return False + except (KeyboardInterrupt, EOFError): + sys.exit(1) + else: + return True async def prompt_user_for_confirmation(self, message): response = input(f"{message} (Y/n): ").strip().lower() @@ -512,8 +515,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N console.print(f"[green]Successfully grabbed description from {tracker}") console.print(f"Extracted description: {description}", markup=False) - # Allow user to edit or discard the description - if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta.get('unattended'): + if meta.get('unattended') or (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')): + meta['description'] = description + meta['saved_description'] = True + else: console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is:") @@ -521,9 +526,8 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N edited_description = click.edit(description) if edited_description: description = edited_description.strip() - meta['description'] = description - meta['saved_description'] = True - console.print(f"Final description after editing: {description}", markup=False) + meta['description'] = description + meta['saved_description'] = True elif edit_choice.lower() == 'd': description = None console.print("[yellow]Description discarded.[/yellow]") @@ -531,9 +535,8 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N console.print("[green]Keeping the original description.[/green]") meta['description'] = description meta['saved_description'] = True - else: - meta['description'] = description - meta['saved_description'] = True + + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name From 77be042ca5d3be2a227ae531f1d928c7116c7431 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 9 Nov 2024 18:36:58 +1000 Subject: [PATCH 452/741] Add preliminary dvdrip support --- src/prep.py | 10 ++++++++-- src/trackers/AITHER.py | 3 ++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index c28e5acfe..c66b6c5b1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1692,8 +1692,8 @@ def get_type(self, video, scene, is_disc): elif is_disc is not None: type = "DISC" elif "dvdrip" in filename: - console.print("[bold red]DVDRip Detected, exiting") - exit() + type = "DVDRIP" + # exit() else: type = "ENCODE" return type @@ -3170,6 +3170,9 @@ async def get_name(self, meta): elif type == "HDTV": # HDTV name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] + elif type == "DVDRIP": + name = f"{title} {alt_title} {year} {resolution} DVDRip {audio} {video_encode}" + potential_missing = [] elif meta['category'] == "TV": # TV SPECIFIC if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': @@ -3199,6 +3202,9 @@ async def get_name(self, meta): elif type == "HDTV": # HDTV name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] + elif type == "DVDRIP": + name = f"{title} {alt_title} {season} {resolution} DVDRip {audio} {video_encode}" + potential_missing = [] try: name = ' '.join(name.split()) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index a3005520f..51a3bf320 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -193,7 +193,8 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6', - 'ENCODE': '3' + 'ENCODE': '3', + 'DVDRIP': '3' }.get(type, '0') return type_id From d0e97379c7fdde1dad86be89b9e00ddf97a84214 Mon Sep 17 00:00:00 2001 From: Zips-sipZ Date: Sat, 9 Nov 2024 10:19:41 +0100 Subject: [PATCH 453/741] Update NBL.py to use CLEANPATH Changing the use from the full MEDIAINFO.txt to the MEDIAINFO_CLEANPATH.txt to clean up uploads --- src/trackers/NBL.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 3711c54c2..7587f7706 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -53,7 +53,7 @@ async def upload(self, meta, disctype): if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read()[:-65].strip() + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()[:-65].strip() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file_input': open_torrent} data = { From 295b93ea1e383109a5b00d7b411a455df8c50898 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 9 Nov 2024 21:55:12 +1000 Subject: [PATCH 454/741] Fix screenshot header --- src/trackers/COMMON.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 3b6dc1332..4f3217639 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -48,6 +48,10 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) process_limit = int(self.config['DEFAULT'].get('processLimit', 10)) + try: + screenheader = self.config['DEFAULT']['screenshot_header'] + except Exception: + screenheader = None with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: if desc_header: descfile.write(desc_header) @@ -56,7 +60,6 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des discs = meta.get('discs', []) filelist = meta.get('filelist', []) desc = base - # Regular expression to find and remove the specific spoiler section wrapped with [center] tags desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) @@ -72,6 +75,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]\n\n") descfile.write("[/center]") images = meta['image_list'] + if screenheader is not None: + descfile.write(screenheader + '\n') descfile.write("[center]") for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] @@ -202,6 +207,8 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des filename = os.path.splitext(os.path.basename(file.strip()))[0] descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") images = meta['image_list'] + if screenheader is not None: + descfile.write(screenheader + '\n') descfile.write("[center]") for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] @@ -503,7 +510,7 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N if not id: # Only prompt the user for ID selection if not searching by ID try: - if not await self.prompt_user_for_id_selection(tmdb, imdb, tvdb, mal, file_name): + if not await self.prompt_user_for_id_selection(meta, tmdb, imdb, tvdb, mal, file_name): console.print("[yellow]User chose to skip based on IDs.[/yellow]") return None, None, None, None, None, None, None, None, None except (KeyboardInterrupt, EOFError): @@ -538,8 +545,6 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name - except Exception as e: console.print_exception() console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") @@ -616,7 +621,6 @@ async def filter_dupes(self, dupes, meta): types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} normalized_meta_type = {t.replace('-', '').upper() for t in meta['type']} if isinstance(meta['type'], list) else {meta['type'].replace('-', '').upper()} - file_type_present = {t for t in types_to_check if t in normalized_meta_type} for each in dupes: @@ -666,10 +670,9 @@ async def filter_dupes(self, dupes, meta): dupe_type_matches = {t for t in types_to_check if t in normalized_each_type} if file_type_present: - # Allow WEB-DL and similar matches if types are related (e.g., WEB-DL vs AMZN WEB-DL) if 'WEBDL' in normalized_meta_type and 'WEBDL' in normalized_each_type: - console.log(f"[green]Allowing result we will catch later: {each}") - # Allow based on matching resolution, HDR, and audio despite type mismatch + if meta['debug']: + console.log(f"[green]Allowing result we will catch later: {each}") elif meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each: if meta['debug']: console.log(f"[green]Allowing result we will catch later: {each}") From cb74e3b859512c44f11dcf7dd09d279faa6a2b8a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 12:13:48 +1000 Subject: [PATCH 455/741] More DVDRip stuff --- src/prep.py | 19 +++++++++++++++---- src/trackers/LST.py | 3 ++- src/trackers/OE.py | 2 ++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index c66b6c5b1..be8d4ffef 100644 --- a/src/prep.py +++ b/src/prep.py @@ -751,9 +751,9 @@ async def gather_prep(self, meta, mode): meta['3D'] = self.is_3d(mi, bdinfo) if meta.get('manual_source', None): meta['source'] = meta['manual_source'] - _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, base_dir) else: - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, base_dir) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) elif meta.get('service'): @@ -979,6 +979,7 @@ def filter_mediainfo(data): "MaxCLL_Source": track.get("MaxCLL_Source", {}), "MaxFALL": track.get("MaxFALL", {}), "MaxFALL_Source": track.get("MaxFALL_Source", {}), + "Encoded_Library_Settings": track.get("Encoded_Library_Settings", {}), }) elif track["@type"] == "Audio": filtered["media"]["track"].append({ @@ -2267,7 +2268,10 @@ def get_tag(self, video, meta): tag = "" return tag - def get_source(self, type, video, path, is_disc, meta): + def get_source(self, type, video, path, is_disc, meta, base_dir): + folder_id = meta['uuid'] + with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + mi = json.load(f) try: try: source = guessit(video)['source'] @@ -2319,6 +2323,13 @@ def get_source(self, type, video, path, is_disc, meta): source = "Web" if source == "Ultra HDTV": source = "UHDTV" + if type == "DVDRIP": + console.print("correct source type") + framerate = mi['media']['track'][1].get('FrameRate', '') + if framerate == 25 and framerate == 50: + source = "PAL" + else: + source = "NTSC" except Exception: console.print(traceback.format_exc()) source = "BluRay" @@ -2487,7 +2498,7 @@ def get_video_encode(self, mi, type, bdinfo): except Exception: format = bdinfo['video'][0]['codec'] format_profile = bdinfo['video'][0]['profile'] - if type in ("ENCODE", "WEBRIP"): # ENCODE or WEBRIP + if type in ("ENCODE", "WEBRIP", "DVDRIP"): # ENCODE or WEBRIP or DVDRIP if format == 'AVC': codec = 'x264' elif format == 'HEVC': diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 09da2581a..ea25043c9 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -56,7 +56,8 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6', - 'ENCODE': '3' + 'ENCODE': '3', + 'DVDRIP': '3' }.get(type, '0') return type_id diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 7aec84b24..795501a51 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -51,6 +51,8 @@ async def upload(self, meta, disctype): await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta, self.tracker, self.signature) cat_id = await self.get_cat_id(meta['category']) + if meta.get('type') == "DVDRIP": + meta['type'] = "ENCODE" type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) From e038c5b5fe8cd1a2d7d91e8d6cbd6f8310095135 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 15:37:50 +1000 Subject: [PATCH 456/741] Banned groups --- src/trackers/AITHER.py | 8 +++++--- src/trackers/OE.py | 2 +- src/trackers/PTP.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index a3005520f..c0f03ab21 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -29,9 +29,11 @@ def __init__(self, config): self.upload_url = 'https://aither.cc/api/torrents/upload' self.torrent_url = 'https://aither.cc/api/torrents/' self.signature = "\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', - 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', - 'Will1869', 'x0r', 'YIFY'] + self.banned_groups = [ + '4K4U', 'afm72', 'AROMA', 'Bandi', 'BiTOR', 'Bluespots', 'Chivaman', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Garshasp', 'Ghost', 'Grym', 'Hi10', 'HiQVE', 'ImE', 'ION10', + 'iVy', 'Judas', 'LAMA', 'Langbard', 'LION', 'MeGusta', 'MONOLITH', 'Natty', 'nikt0', 'noxxus', 'OEPlus', 'OFT', 'OsC', 'Panda', 'PYC', 'QxR', 'r00t', 'Ralphy', 'RARBG', 'RCVR', 'RetroPeeps', + 'RZeroX', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SM737', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 't3nzin', 'TAoE', 'Telly', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Vyndros', 'Weasley[HONE]', + 'Will1869', 'x0r', 'YIFY'] pass async def upload(self, meta, disctype): diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 7aec84b24..36d33739b 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -32,7 +32,7 @@ def __init__(self, config): self.banned_groups = [ '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', - 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', + 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DE3PM', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 344d59dac..926a348dc 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -35,7 +35,7 @@ def __init__(self, config): self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', - 'WAF', 'x0r', 'YIFY',] + 'WAF', 'x0r', 'YIFY', 'LAMA', 'WORLD'] self.sub_lang_map = { ("Arabic", "ara", "ar"): 22, From 8504f24ad54939c5eb44fa25f38c9d276cb62f6b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 17:15:55 +1000 Subject: [PATCH 457/741] Limit source mi to dvdrip --- src/prep.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index be8d4ffef..652a5cf8a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2270,8 +2270,9 @@ def get_tag(self, video, meta): def get_source(self, type, video, path, is_disc, meta, base_dir): folder_id = meta['uuid'] - with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: - mi = json.load(f) + if type == "DVDRIP": + with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + mi = json.load(f) try: try: source = guessit(video)['source'] From d4987580827d265b9ba435fc5799c86443998d48 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 19:45:36 +1000 Subject: [PATCH 458/741] Rearrange dvdrip naming --- src/prep.py | 4 ++-- src/trackers/AITHER.py | 8 ++++++++ src/trackers/LST.py | 18 +++++++++++++++++- src/trackers/OE.py | 10 ++++++++++ 4 files changed, 37 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 652a5cf8a..7f59beda7 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3183,7 +3183,7 @@ async def get_name(self, meta): name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] elif type == "DVDRIP": - name = f"{title} {alt_title} {year} {resolution} DVDRip {audio} {video_encode}" + name = f"{title} {alt_title} {year} {source} {video_encode} DVDRip {audio}" potential_missing = [] elif meta['category'] == "TV": # TV SPECIFIC if type == "DISC": # Disk @@ -3215,7 +3215,7 @@ async def get_name(self, meta): name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] elif type == "DVDRIP": - name = f"{title} {alt_title} {season} {resolution} DVDRip {audio} {video_encode}" + name = f"{title} {alt_title} {season} {source} DVDRip {video_encode}" potential_missing = [] try: diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 51a3bf320..0c92c0458 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -134,9 +134,17 @@ async def edit_name(self, meta): media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 resolution = meta.get('resolution') video_codec = meta.get('video_codec') + video_encode = meta.get('video_encode') name_type = meta.get('type', "") source = meta.get('source', "") + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + aither_name = aither_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + aither_name = aither_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + else: + aither_name = aither_name.replace(f"{meta['source']}", f"{resolution}", 1) + if not meta['is_disc']: def has_english_audio(tracks=None, media_info_text=None): diff --git a/src/trackers/LST.py b/src/trackers/LST.py index ea25043c9..5b74b49d4 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -85,6 +85,7 @@ async def upload(self, meta, disctype): resolution_id = await self.get_res_id(meta['resolution']) modq = await self.get_flag(meta, 'modq') draft = await self.get_flag(meta, 'draft') + name = await self.edit_name(meta) await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) @@ -116,7 +117,7 @@ async def upload(self, meta, disctype): if nfo_file: files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name': meta['name'], + 'name': name, 'description': desc, 'mediainfo': mi_dump, 'bdinfo': bd_dump, @@ -173,6 +174,21 @@ async def upload(self, meta, disctype): console.print(data) open_torrent.close() + async def edit_name(self, meta): + lst_name = meta['name'] + resolution = meta.get('resolution') + video_encode = meta.get('video_encode') + name_type = meta.get('type', "") + + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + lst_name = lst_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + lst_name = lst_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + else: + lst_name = lst_name.replace(f"{meta['source']}", f"{resolution}", 1) + + return lst_name + async def get_flag(self, meta, flag_name): config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) if config_flag is not None: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 795501a51..352ea6c22 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -132,6 +132,16 @@ async def upload(self, meta, disctype): async def edit_name(self, meta): oe_name = meta.get('name') media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + resolution = meta.get('resolution') + video_encode = meta.get('video_encode') + name_type = meta.get('type', "") + + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + oe_name = oe_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + oe_name = oe_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + else: + oe_name = oe_name.replace(f"{meta['source']}", f"{resolution}", 1) if not meta['is_disc']: def has_english_audio(media_info_text=None): From 594a5121b08afcdf7bb17aa5636be3040be8b4e1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 23:28:59 +1000 Subject: [PATCH 459/741] Repack dupe dismissal should only when when group matches --- src/trackers/COMMON.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index e6684e8b6..ecce6c2ec 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -687,10 +687,10 @@ async def filter_dupes(self, dupes, meta): console.log(f"[red]Excluding extra result with new type match: {each}") continue - # If "repack" is in the UUID, only keep results that also contain "repack" - if has_repack_in_uuid and "repack" not in each.lower(): + # Only check for "repack" if `meta['tag']` is in `each` + if meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(): if meta['debug']: - console.log(f"[yellow]Excluding result because it lacks 'repack': {each}") + console.log(f"[yellow]Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}") continue for s in search_combos: From 757a5aea4c1a30d2d829b648784c3127536044a5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 10 Nov 2024 23:42:21 +1000 Subject: [PATCH 460/741] Relax dupe exclusions --- src/trackers/COMMON.py | 64 +++++++++++++----------------------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index ecce6c2ec..4dac78173 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -617,23 +617,32 @@ async def filter_dupes(self, dupes, meta): if meta['debug']: console.log("[cyan]Pre-filtered dupes") console.log(dupes) - new_dupes = [] - types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} + new_dupes = [] + types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} # noqa F841 normalized_meta_type = {t.replace('-', '').upper() for t in meta['type']} if isinstance(meta['type'], list) else {meta['type'].replace('-', '').upper()} - file_type_present = {t for t in types_to_check if t in normalized_meta_type} has_repack_in_uuid = "repack" in meta['uuid'].lower() if meta.get('uuid') else False for each in dupes: - if meta.get('sd', 0) == 1: - remove_set = set() - else: - remove_set = set({meta['resolution']}) - + remove_set = set({meta['resolution']}) normalized_each_type = each.replace('-', '').upper() - # console.log(f"normalized results: {normalized_each_type}") + # Check if types match loosely, based on core attributes (resolution, HDR, audio) + type_match = any(t in normalized_each_type for t in normalized_meta_type) or \ + (meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each) + + if not type_match: + if meta['debug']: + console.log(f"[yellow]Excluding result due to type mismatch: {each}") + continue + # Repack filtering if the tag matches + if meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(): + if meta['debug']: + console.log(f"[yellow]Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}") + continue + + # Define search combos for more nuanced matching search_combos = [ { 'search': meta['hdr'], @@ -661,38 +670,8 @@ async def filter_dupes(self, dupes, meta): 'update': {meta['season'], meta['episode']} } ] - search_matches = [ - { - 'if': {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, - 'in': meta['type'] - } - ] - # Check if the type of the dupe matches or is sufficiently similar - dupe_type_matches = {t for t in types_to_check if t in normalized_each_type} - - if file_type_present: - if 'WEBDL' in normalized_meta_type and 'WEBDL' in normalized_each_type: - if meta['debug']: - console.log(f"[green]Allowing result we will catch later: {each}") - elif meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each: - if meta['debug']: - console.log(f"[green]Allowing result we will catch later: {each}") - else: - if meta['debug']: - console.log(f"[yellow]Excluding result due to type mismatch: {each}") - continue - else: - if dupe_type_matches: - if meta['debug']: - console.log(f"[red]Excluding extra result with new type match: {each}") - continue - - # Only check for "repack" if `meta['tag']` is in `each` - if meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(): - if meta['debug']: - console.log(f"[yellow]Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}") - continue + # Apply search combos to refine remove_set for s in search_combos: if s.get('search_for') not in (None, ''): if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): @@ -700,12 +679,9 @@ async def filter_dupes(self, dupes, meta): if s.get('search_not') not in (None, ''): if not any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_not']): remove_set.update(s['update']) - for sm in search_matches: - for a in sm['if']: - if a in sm['in']: - remove_set.add(a) search = each.lower().replace('-', '').replace(' ', '').replace('.', '') + for x in remove_set.copy(): if "|" in x: look_for = x.split('|') From 29c3587a91b700aae27ffa705e49ae99db720ca5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 11 Nov 2024 19:48:56 +1000 Subject: [PATCH 461/741] Add DVDRIP to type --- src/args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/args.py b/src/args.py index 2c36bed47..40dbfebf0 100644 --- a/src/args.py +++ b/src/args.py @@ -24,7 +24,7 @@ def parse(self, args, meta): parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) - parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) + parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV, DVDRIP]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv', 'dvdrip']) parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc, DCP]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc', 'DCP'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') From cd5565dca5c1b1a8b545bc674ec173af29d6c0cc Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 11 Nov 2024 20:00:41 +1000 Subject: [PATCH 462/741] Fix source detection for dvdrip --- src/prep.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7f59beda7..3147be5da 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2325,11 +2325,17 @@ def get_source(self, type, video, path, is_disc, meta, base_dir): if source == "Ultra HDTV": source = "UHDTV" if type == "DVDRIP": - console.print("correct source type") - framerate = mi['media']['track'][1].get('FrameRate', '') - if framerate == 25 and framerate == 50: - source = "PAL" + framerate_str = mi['media']['track'][1].get('FrameRate', '') + match = re.match(r"(\d+(\.\d+)?)", framerate_str) + if match: + framerate = float(match.group(0)) + + if 24.9 <= framerate <= 25.1: + source = "PAL" + else: + source = "NTSC" else: + console.print("Invalid framerate format, using NTSC") source = "NTSC" except Exception: console.print(traceback.format_exc()) From eee46ea02adb9a40dcf53c925dbaa07e4857d1d5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 11 Nov 2024 20:15:04 +1000 Subject: [PATCH 463/741] Use resolution instead --- src/prep.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3147be5da..6c446fec6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -751,9 +751,9 @@ async def gather_prep(self, meta, mode): meta['3D'] = self.is_3d(mi, bdinfo) if meta.get('manual_source', None): meta['source'] = meta['manual_source'] - _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, base_dir) + _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) else: - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, base_dir) + meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) elif meta.get('service'): @@ -2268,11 +2268,8 @@ def get_tag(self, video, meta): tag = "" return tag - def get_source(self, type, video, path, is_disc, meta, base_dir): - folder_id = meta['uuid'] - if type == "DVDRIP": - with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: - mi = json.load(f) + def get_source(self, type, video, path, is_disc, meta): + resolution = meta['resolution'] try: try: source = guessit(video)['source'] @@ -2325,17 +2322,9 @@ def get_source(self, type, video, path, is_disc, meta, base_dir): if source == "Ultra HDTV": source = "UHDTV" if type == "DVDRIP": - framerate_str = mi['media']['track'][1].get('FrameRate', '') - match = re.match(r"(\d+(\.\d+)?)", framerate_str) - if match: - framerate = float(match.group(0)) - - if 24.9 <= framerate <= 25.1: - source = "PAL" - else: - source = "NTSC" + if resolution in [540, 576]: + source = "PAL" else: - console.print("Invalid framerate format, using NTSC") source = "NTSC" except Exception: console.print(traceback.format_exc()) From f50d0386aa9b38d169e6c22dd0f5c6fe8124b5c5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 11 Nov 2024 20:27:06 +1000 Subject: [PATCH 464/741] UNIT3D sites want audio with dvdrip --- src/trackers/AITHER.py | 3 ++- src/trackers/LST.py | 1 + src/trackers/OE.py | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 2adc246fa..45ec59f53 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -143,9 +143,10 @@ async def edit_name(self, meta): if name_type == "DVDRIP": if meta.get('category') == "MOVIE": aither_name = aither_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) - aither_name = aither_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + aither_name = aither_name.replace((meta['audio']), f"{meta['audio']} {video_encode}", 1) else: aither_name = aither_name.replace(f"{meta['source']}", f"{resolution}", 1) + aither_name = aither_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) if not meta['is_disc']: diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 5b74b49d4..b0c3b318d 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -186,6 +186,7 @@ async def edit_name(self, meta): lst_name = lst_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) else: lst_name = lst_name.replace(f"{meta['source']}", f"{resolution}", 1) + lst_name = lst_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) return lst_name diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 8fc4ebbb3..b101d222a 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -142,6 +142,7 @@ async def edit_name(self, meta): oe_name = oe_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) else: oe_name = oe_name.replace(f"{meta['source']}", f"{resolution}", 1) + oe_name = oe_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) if not meta['is_disc']: def has_english_audio(media_info_text=None): From 92598fcc81383a0c49ee958ca89a22afd6112c5a Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 12 Nov 2024 15:27:39 +1000 Subject: [PATCH 465/741] ANT prohibit TV --- src/trackers/ANT.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 83169b6ca..80f7e0f49 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -61,6 +61,9 @@ async def get_flags(self, meta): return flags async def upload(self, meta, disctype): + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + return common = COMMON(config=self.config) torrent_filename = "BASE" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" From 14b97364d977993821c81ed20bb69cf20fb24dee Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 12 Nov 2024 18:07:25 +1000 Subject: [PATCH 466/741] RTF quicker skip --- src/trackers/RTF.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index b5ddf485f..5b4a67313 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -30,6 +30,9 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + if datetime.date.today().year - meta['year'] <= 9: + console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -74,10 +77,6 @@ async def upload(self, meta, disctype): 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - if datetime.date.today().year - meta['year'] <= 9: - console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") - return - if meta['debug'] is False: response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: From c88a36d16b122d64cb166f9db18525804caa2cd1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 12 Nov 2024 19:28:20 +1000 Subject: [PATCH 467/741] Fix PTP skip ask with ua --- src/trackers/PTP.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 926a348dc..07316d3f3 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -209,7 +209,7 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): console.print("[bold green]Successfully grabbed description from PTP") console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity - if not meta.get('skipit') or meta['unattended']: + if not meta.get('skipit') and not meta['unattended']: # Allow user to edit or discard the description console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") @@ -228,6 +228,9 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): console.print("[green]Keeping the original description.[/green]") meta['description'] = ptp_desc meta['saved_description'] = True + else: + meta['description'] = ptp_desc + meta['saved_description'] = True return desc, imagelist From 7f7ffeca5c23123311197c620d664b0b74878598 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 12 Nov 2024 20:28:53 +1000 Subject: [PATCH 468/741] Pull PTP disc based screens --- src/bbcode.py | 30 +++++++++++++++++++------ src/prep.py | 51 ++++++++++++++++++++++++++---------------- src/trackers/COMMON.py | 2 +- src/trackers/PTP.py | 2 +- 4 files changed, 57 insertions(+), 28 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index 07cb7ff1c..102dcff17 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -37,7 +37,7 @@ def __init__(self): pass def clean_ptp_description(self, desc, is_disc): - # console.print(f"[yellow]Cleaning PTP description...") + # console.print("[yellow]Cleaning PTP description...") # Convert Bullet Points to - desc = desc.replace("•", "-") @@ -61,18 +61,34 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') - # Remove Mediainfo Tags / Attempt to regex out mediainfo - mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) - if mediainfo_tags: + if is_disc == "DVD": + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + + elif is_disc == "BDMV": + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + desc = re.sub(r"Disc Title:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Disc Size:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Protection:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"BD-Java:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"BDInfo:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"PLAYLIST REPORT:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Name:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Length:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Size:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Total Bitrate:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"VIDEO:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"AUDIO:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"SUBTITLES:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Codec\s+Bitrate\s+Description[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Codec\s+Language\s+Bitrate\s+Description[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + + else: desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) - elif is_disc != "BDMV": desc = re.sub(r"(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "", [] # Convert Quote tags: desc = re.sub(r"\[quote.*?\]", "[code]", desc) diff --git a/src/prep.py b/src/prep.py index 6c446fec6..24ae74b7a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -258,12 +258,11 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write((ptp_desc or "") + "\n") - if not meta['is_disc']: - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - await self.handle_image_list(meta, tracker_name) + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + await self.handle_image_list(meta, tracker_name) else: found_match = False @@ -276,11 +275,10 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met description.write((ptp_desc or "") + "\n") meta['saved_description'] = True - if not meta['is_disc']: - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) - if valid_images: - meta['image_list'] = valid_images + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist) + if valid_images: + meta['image_list'] = valid_images else: console.print("[yellow]Skipping PTP as no match found[/yellow]") found_match = False @@ -299,12 +297,11 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(ptp_desc + "\n") meta['saved_description'] = True - if not meta['is_disc']: - if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist, meta) - if valid_images: - meta['image_list'] = valid_images - console.print("[green]PTP images added to metadata.[/green]") + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + console.print("[green]PTP images added to metadata.[/green]") else: console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") found_match = False @@ -676,7 +673,7 @@ async def gather_prep(self, meta, mode): else: use_vs = False try: - ds = multiprocessing.Process(target=self.disc_screenshots, args=(filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) + ds = multiprocessing.Process(target=self.disc_screenshots, args=(meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) @@ -1243,7 +1240,15 @@ def sanitize_filename(self, filename): # Replace invalid characters like colons with an underscore return re.sub(r'[<>:"/\\|?*]', '_', filename) - def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): + def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= 3: + console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + return + if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: @@ -1335,6 +1340,14 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ os.remove(smallest) def dvd_screenshots(self, meta, disc_num, num_screens=None): + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= 3: + console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + return + if num_screens is None: num_screens = self.screens if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 4dac78173..73664ca33 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -158,7 +158,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Run prep.screenshots if no screenshots are present if each['type'] == "BDMV": use_vs = meta.get('vapoursynth', False) - s = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) s.start() while s.is_alive(): await asyncio.sleep(1) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 07316d3f3..764e07a45 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -689,7 +689,7 @@ async def edit_desc(self, meta): new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: use_vs = meta.get('vapoursynth', False) - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) From f8de6c3ffc8b7787c178eb3ffabcacecd6e44bce Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 12 Nov 2024 23:40:52 +1000 Subject: [PATCH 469/741] Check for Interlaced in scan type before framerate check --- data/example-config.py | 2 +- src/prep.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 7e9d9855d..0fbb5f2eb 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -59,7 +59,7 @@ "optimize_images": True, # Use only half available CPU cores to avoid memory allocation errors - # Only when usig lossless compression + # Only when using lossless compression "shared_seedbox": False, # The name of your default torrent client, set in the torrent client sections below diff --git a/src/prep.py b/src/prep.py index 24ae74b7a..4df9edb74 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1084,6 +1084,8 @@ def get_resolution(self, guess, folder_id, base_dir): scan = "Progressive" if scan == "Progressive": scan = "p" + elif scan == "Interlaced": + scan = 'i' elif framerate == "25.000": scan = "p" else: From 65d5ee2ae435d96f5cdadf5c94abadd86735ac6b Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 13 Nov 2024 19:23:36 +1000 Subject: [PATCH 470/741] Rely only on useAPI = true for auto searching Allows to auto search a tracker without any other unneeded requirement. --- src/prep.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/prep.py b/src/prep.py index 4df9edb74..ee73b05ca 100644 --- a/src/prep.py +++ b/src/prep.py @@ -565,10 +565,7 @@ async def gather_prep(self, meta, mode): found_match = True else: - # Process all trackers with API = true if no specific tracker is set in meta - default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") - - if "PTP" in default_trackers and not found_match: + if "PTP" not in found_match: if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) try: @@ -581,7 +578,7 @@ async def gather_prep(self, meta, mode): print(f"PTP tracker request failed due to connection error: {conn_err}") if not meta['is_disc']: - if "BLU" in default_trackers and not found_match: + if "BLU" not in found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) try: @@ -593,7 +590,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"BLU tracker request failed due to connection error: {conn_err}") - if "AITHER" in default_trackers and not found_match: + if "AITHER" not in found_match: if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": aither = AITHER(config=self.config) try: @@ -605,7 +602,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"AITHER tracker request failed due to connection error: {conn_err}") - if "LST" in default_trackers and not found_match: + if "LST" not in found_match: if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": lst = LST(config=self.config) try: @@ -617,7 +614,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"LST tracker request failed due to connection error: {conn_err}") - if "OE" in default_trackers and not found_match: + if "OE" not in found_match: if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": oe = OE(config=self.config) try: @@ -629,7 +626,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"OE tracker request failed due to connection error: {conn_err}") - if "TIK" in default_trackers and not found_match: + if "TIK" not in found_match: if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": tik = TIK(config=self.config) try: @@ -641,7 +638,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"TIK tracker request failed due to connection error: {conn_err}") - if "HDB" in default_trackers and not found_match: + if "HDB" not in found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) try: From b932faebd3d1c6a7c5dc05223a0a14429d5c5eac Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 13 Nov 2024 22:51:13 +1000 Subject: [PATCH 471/741] Revert "Rely only on useAPI = true for auto searching" This reverts commit 65d5ee2ae435d96f5cdadf5c94abadd86735ac6b. --- src/prep.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index ee73b05ca..4df9edb74 100644 --- a/src/prep.py +++ b/src/prep.py @@ -565,7 +565,10 @@ async def gather_prep(self, meta, mode): found_match = True else: - if "PTP" not in found_match: + # Process all trackers with API = true if no specific tracker is set in meta + default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + + if "PTP" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": ptp = PTP(config=self.config) try: @@ -578,7 +581,7 @@ async def gather_prep(self, meta, mode): print(f"PTP tracker request failed due to connection error: {conn_err}") if not meta['is_disc']: - if "BLU" not in found_match: + if "BLU" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": blu = BLU(config=self.config) try: @@ -590,7 +593,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"BLU tracker request failed due to connection error: {conn_err}") - if "AITHER" not in found_match: + if "AITHER" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": aither = AITHER(config=self.config) try: @@ -602,7 +605,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"AITHER tracker request failed due to connection error: {conn_err}") - if "LST" not in found_match: + if "LST" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": lst = LST(config=self.config) try: @@ -614,7 +617,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"LST tracker request failed due to connection error: {conn_err}") - if "OE" not in found_match: + if "OE" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": oe = OE(config=self.config) try: @@ -626,7 +629,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"OE tracker request failed due to connection error: {conn_err}") - if "TIK" not in found_match: + if "TIK" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": tik = TIK(config=self.config) try: @@ -638,7 +641,7 @@ async def gather_prep(self, meta, mode): except requests.exceptions.ConnectionError as conn_err: print(f"TIK tracker request failed due to connection error: {conn_err}") - if "HDB" not in found_match: + if "HDB" in default_trackers and not found_match: if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": hdb = HDB(config=self.config) try: From 654d56c2004f306178fa87d905b2c0623bc48138 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 14 Nov 2024 22:50:33 +1000 Subject: [PATCH 472/741] Add option to set ptp id from info hash --- src/args.py | 1 + src/clients.py | 38 ++++++++++++++++++++++++++++++++++++++ src/prep.py | 7 +++++++ 3 files changed, 46 insertions(+) diff --git a/src/args.py b/src/args.py index 40dbfebf0..2f49aed3c 100644 --- a/src/args.py +++ b/src/args.py @@ -94,6 +94,7 @@ def parse(self, args, meta): parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") + parser.add_argument('--infohash', nargs='*', required=False, help="V1 Info Hash") args, before_args = parser.parse_known_args(input) args = vars(args) # console.print(args) diff --git a/src/clients.py b/src/clients.py index d0228fee7..fe28bac0e 100644 --- a/src/clients.py +++ b/src/clients.py @@ -13,6 +13,7 @@ import shutil import time from src.console import console +import re class Clients(): @@ -452,3 +453,40 @@ async def remote_path_map(self, meta): remote_path = remote_path + os.sep return local_path, remote_path + + async def get_ptp_from_hash(self, meta): + default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + client = self.config['TORRENT_CLIENTS'][default_torrent_client] + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) + + try: + qbt_client.auth_log_in() + except qbittorrentapi.LoginFailed as e: + console.print(f"[bold red]Login failed while trying to get info hash: {e}") + exit(1) + + info_hash_v1 = meta.get('infohash') + torrents = qbt_client.torrents_info() + found = False + + for torrent in torrents: + if torrent.get('infohash_v1') == info_hash_v1: + comment = torrent.get('comment', "") + match = re.search(r'torrentid=(\d+)', comment) + if match: + meta['ptp'] = match.group(1) + console.print(f"[bold cyan]meta['ptp'] set to torrentid: {meta['ptp']}") + else: + console.print("[bold red]No torrentid found in comment.") + found = True + break + + if not found: + console.print("[bold red]Torrent with the specified infohash_v1 not found.") + return meta diff --git a/src/prep.py b/src/prep.py index 4df9edb74..3b3638496 100644 --- a/src/prep.py +++ b/src/prep.py @@ -10,6 +10,8 @@ from src.trackers.HDB import HDB from src.trackers.TIK import TIK from src.trackers.COMMON import COMMON +from src.clients import Clients +from data.config import config try: import traceback @@ -496,6 +498,11 @@ async def gather_prep(self, meta, mode): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text) + client = Clients(config=config) + if meta.get('infohash') is not None: + meta = await client.get_ptp_from_hash(meta) + console.print("PTP meta:", meta['ptp']) + if not meta.get('image_list'): # Reuse information from trackers with fallback found_match = False From f31734459cd6f01e3881859ee670bdb8339f233d Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 14 Nov 2024 22:53:43 +1000 Subject: [PATCH 473/741] Remove some console --- src/prep.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3b3638496..3222a742a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -501,7 +501,6 @@ async def gather_prep(self, meta, mode): client = Clients(config=config) if meta.get('infohash') is not None: meta = await client.get_ptp_from_hash(meta) - console.print("PTP meta:", meta['ptp']) if not meta.get('image_list'): # Reuse information from trackers with fallback @@ -527,7 +526,6 @@ async def gather_prep(self, meta, mode): # If a specific tracker is found, only process that one if specific_tracker: - console.print(f"[blue]Processing only the {specific_tracker} tracker based on meta.[/blue]") if specific_tracker == 'PTP': ptp = PTP(config=self.config) From 335f0d03935d74170b39a4939b31b8da24e20c2c Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 14 Nov 2024 23:05:08 +1000 Subject: [PATCH 474/741] Might as well make them all work --- src/clients.py | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/src/clients.py b/src/clients.py index fe28bac0e..82e0acf9d 100644 --- a/src/clients.py +++ b/src/clients.py @@ -478,15 +478,41 @@ async def get_ptp_from_hash(self, meta): for torrent in torrents: if torrent.get('infohash_v1') == info_hash_v1: comment = torrent.get('comment', "") - match = re.search(r'torrentid=(\d+)', comment) - if match: - meta['ptp'] = match.group(1) - console.print(f"[bold cyan]meta['ptp'] set to torrentid: {meta['ptp']}") - else: - console.print("[bold red]No torrentid found in comment.") + + if "https://passthepopcorn.me" in comment: + match = re.search(r'torrentid=(\d+)', comment) + if match: + meta['ptp'] = match.group(1) + console.print(f"[bold cyan]meta['ptp'] set to torrentid: {meta['ptp']}") + + elif "https://aither.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['aither'] = match.group(1) + console.print(f"[bold cyan]meta['aither'] set to ID: {meta['aither']}") + + elif "https://lst.gg" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['lst'] = match.group(1) + console.print(f"[bold cyan]meta['lst'] set to ID: {meta['lst']}") + + elif "https://onlyencodes.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['oe'] = match.group(1) + console.print(f"[bold cyan]meta['oe'] set to ID: {meta['oe']}") + + elif "https://blutopia.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['blu'] = match.group(1) + console.print(f"[bold cyan]meta['blu'] set to ID: {meta['blu']}") + found = True break if not found: console.print("[bold red]Torrent with the specified infohash_v1 not found.") + return meta From f7e30853efab5c5211e8e29239e6d1e99c7663a4 Mon Sep 17 00:00:00 2001 From: drd00m Date: Thu, 14 Nov 2024 10:26:42 -0700 Subject: [PATCH 475/741] Adding support for yoinked.org --- README.md | 2 +- data/example-config.py | 9 +++++++-- upload.py | 5 +++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 420004264..b932cc3a7 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP + - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 0fbb5f2eb..b4c7cf04f 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -76,9 +76,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP + # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP", + "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK", "ACM": { "api_key": "ACM api key", @@ -324,6 +324,11 @@ "announce_url": "https://UTP/announce/customannounceurl", # "anon" : False }, + "YOINK": { + "api_key": "YOINK api key", + "announce_url": "https://yoinked.org/announce/customannounceurl", + # "anon" : "False" + }, }, # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents diff --git a/upload.py b/upload.py index 273ddb27c..d0897c2e8 100644 --- a/upload.py +++ b/upload.py @@ -45,6 +45,7 @@ from src.trackers.PSS import PSS from src.trackers.ULCX import ULCX from src.trackers.SPD import SPD +from src.trackers.YOINK import YOINK import json from pathlib import Path import asyncio @@ -257,7 +258,7 @@ async def do_the_thing(base_dir): common = COMMON(config=config) api_trackers = [ 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', - 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP' + 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' ] other_api_trackers = [ 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' @@ -270,7 +271,7 @@ async def do_the_thing(base_dir): 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, - 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP + 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, } tracker_capabilities = { From ad33dbb43e194aa63e5479667c9e8cd2c3cfa521 Mon Sep 17 00:00:00 2001 From: drd00m Date: Thu, 14 Nov 2024 10:35:40 -0700 Subject: [PATCH 476/741] Look how silly I am, I forgot the tracker class, here it is --- src/trackers/YOINK.py | 219 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 src/trackers/YOINK.py diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py new file mode 100644 index 000000000..67cec3c6c --- /dev/null +++ b/src/trackers/YOINK.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class YOINK(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'YOiNK' + self.source_flag = 'YOiNK' + self.upload_url = 'https://yoinked.org/api/torrents/upload' + self.search_url = 'https://yoinked.org/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.banned_groups = ["YTS,YiFY,LAMA,MeGUSTA,NAHOM,GalaxyRG,RARBG"] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None From 99b2a580ab4d00a2f7cd31122b8b8908373d5273 Mon Sep 17 00:00:00 2001 From: drd00m Date: Thu, 14 Nov 2024 10:39:49 -0700 Subject: [PATCH 477/741] I think case-sensitivity causes potential issues here so fixing --- src/trackers/YOINK.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index 67cec3c6c..6370fa5c6 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -23,8 +23,8 @@ class YOINK(): def __init__(self, config): self.config = config - self.tracker = 'YOiNK' - self.source_flag = 'YOiNK' + self.tracker = 'YOINK' + self.source_flag = 'YOINK' self.upload_url = 'https://yoinked.org/api/torrents/upload' self.search_url = 'https://yoinked.org/api/torrents/filter' self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" From 8381280fcad36ecbb5699d45f644ef5134fbe5fb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 16 Nov 2024 21:10:16 +1000 Subject: [PATCH 478/741] Rely only on useAPI for auto searching - attempt 2 --- src/prep.py | 196 ++++++++++++---------------------------------------- 1 file changed, 45 insertions(+), 151 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3222a742a..77b966479 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2,13 +2,13 @@ from src.args import Args from src.console import console from src.exceptions import * # noqa: F403 -from src.trackers.PTP import PTP -from src.trackers.BLU import BLU -from src.trackers.AITHER import AITHER -from src.trackers.LST import LST -from src.trackers.OE import OE -from src.trackers.HDB import HDB -from src.trackers.TIK import TIK +from src.trackers.PTP import PTP # noqa F401 +from src.trackers.BLU import BLU # noqa F401 +from src.trackers.AITHER import AITHER # noqa F401 +from src.trackers.LST import LST # noqa F401 +from src.trackers.OE import OE # noqa F401 +from src.trackers.HDB import HDB # noqa F401 +from src.trackers.TIK import TIK # noqa F401 from src.trackers.COMMON import COMMON from src.clients import Clients from data.config import config @@ -507,161 +507,55 @@ async def gather_prep(self, meta, mode): found_match = False if search_term: - # Check if specific trackers are already set in meta - specific_tracker = None - if meta.get('ptp'): - specific_tracker = 'PTP' - elif meta.get('hdb'): - specific_tracker = 'HDB' - elif meta.get('blu'): - specific_tracker = 'BLU' - elif meta.get('aither'): - specific_tracker = 'AITHER' - elif meta.get('lst'): - specific_tracker = 'LST' - elif meta.get('oe'): - specific_tracker = 'OE' - elif meta.get('tik'): - specific_tracker = 'TIK' - - # If a specific tracker is found, only process that one - if specific_tracker: - - if specific_tracker == 'PTP': - ptp = PTP(config=self.config) - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'BLU': - blu = BLU(config=self.config) - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'AITHER': - aither = AITHER(config=self.config) - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'LST': - lst = LST(config=self.config) - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) - if match: - found_match = True - - elif specific_tracker == 'OE': - oe = OE(config=self.config) - meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) - if match: - found_match = True + # Check if a specific tracker is already set in meta + tracker_keys = { + 'ptp': 'PTP', + 'hdb': 'HDB', + 'blu': 'BLU', + 'aither': 'AITHER', + 'lst': 'LST', + 'oe': 'OE', + 'tik': 'TIK', + } + specific_tracker = next((tracker_keys[key] for key in tracker_keys if meta.get(key)), None) - elif specific_tracker == 'TIK': - tik = TIK(config=self.config) - meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) - if match: - found_match = True + async def process_tracker(tracker_name, meta): + nonlocal found_match + tracker_class = globals().get(tracker_name) + if tracker_class is None: + print(f"Tracker class for {tracker_name} not found.") + return meta - elif specific_tracker == 'HDB': - hdb = HDB(config=self.config) - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + tracker_instance = tracker_class(config=self.config) + try: + updated_meta, match = await self.update_metadata_from_tracker( + tracker_name, tracker_instance, meta, search_term, search_file_folder + ) if match: found_match = True + console.print(f"[green]Match found on tracker: {tracker_name}[/green]") + return updated_meta + except aiohttp.ClientSSLError: + print(f"{tracker_name} tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"{tracker_name} tracker request failed due to connection error: {conn_err}") + return meta + # If a specific tracker is found, process only that tracker + if specific_tracker: + meta = await process_tracker(specific_tracker, meta) else: # Process all trackers with API = true if no specific tracker is set in meta - default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") + tracker_order = ["PTP", "BLU", "AITHER", "LST", "OE", "TIK", "HDB"] - if "PTP" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - ptp = PTP(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("PTP tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"PTP tracker request failed due to connection error: {conn_err}") - - if not meta['is_disc']: - if "BLU" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("BLU tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"BLU tracker request failed due to connection error: {conn_err}") - - if "AITHER" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('AITHER', {}).get('useAPI')).lower() == "true": - aither = AITHER(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('AITHER', aither, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("AITHER tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"AITHER tracker request failed due to connection error: {conn_err}") - - if "LST" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('LST', {}).get('useAPI')).lower() == "true": - lst = LST(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('LST', lst, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("LST tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"LST tracker request failed due to connection error: {conn_err}") - - if "OE" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('OE', {}).get('useAPI')).lower() == "true": - oe = OE(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('OE', oe, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("OE tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"OE tracker request failed due to connection error: {conn_err}") - - if "TIK" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('TIK', {}).get('useAPI')).lower() == "true": - tik = TIK(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('TIK', tik, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("TIK tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"TIK tracker request failed due to connection error: {conn_err}") - - if "HDB" in default_trackers and not found_match: - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - hdb = HDB(config=self.config) - try: - meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) - if match: - found_match = True - except aiohttp.ClientSSLError: - print("HDB tracker request failed due to SSL error.") - except requests.exceptions.ConnectionError as conn_err: - print(f"HDB tracker request failed due to connection error: {conn_err}") + for tracker_name in tracker_order: + if not found_match: # Stop checking once a match is found + tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('useAPI', 'false')).lower() == "true": + meta = await process_tracker(tracker_name, meta) if not found_match: console.print("[yellow]No matches found on any trackers.[/yellow]") - else: - console.print(f"[green]Match found: {found_match}[/green]") else: console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") else: From 10259ee123d01bd4369ab3415dbdedd5529dce40 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 17 Nov 2024 08:37:58 +1000 Subject: [PATCH 479/741] Update example url to correct domain --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index b4c7cf04f..2279437d4 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -134,7 +134,7 @@ "FL": { "username": "FL username", "passkey": "FL passkey", - "uploader_name": "https://hdbits.org/announce/Custom_Announce_URL", + "uploader_name": "https://filelist.io/Custom_Announce_URL", # "anon": False, }, "FNP": { From 0dd5de9df01636b1212e97e9c6eaa8060246720e Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 18 Nov 2024 19:42:08 +1000 Subject: [PATCH 480/741] Clarify PTP config --- data/example-config.py | 4 ++++ src/trackers/PTP.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 2279437d4..74344f11a 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -31,8 +31,12 @@ # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites. # 0 equals old behavior where only the original description and images are added. + # This setting also effect PTP, however PTP requries at least 2 images for each. + # PTP will always use a *minimum* of 2, regardless of what is set here. "multiScreens": "2", + # The below options for packed content do not effect PTP. PTP has a set standard. + # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. "pack_thumb_size": "300", diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 764e07a45..fb8efcd72 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -619,9 +619,9 @@ async def edit_desc(self, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) - if multi_screens == 0: + if multi_screens < 2: multi_screens = 2 - console.print("[yellow]PTP requires screenshots for multi disc/file content, overriding config") + console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: images = meta['image_list'] From 730fd447e7856e69a8d876e9be2ec816929bfdb3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 20 Nov 2024 07:36:59 +1000 Subject: [PATCH 481/741] HUNO language --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index afd3d4633..9fe9526cf 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -119,7 +119,7 @@ def get_audio(self, meta): if dual: language = "DUAL" elif 'mediainfo' in meta: - language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language_String', "English") + language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language', "English") language = re.sub(r'\(.+\)', '', language) if language == "zxx": language = "Silent" From 8c6887492aa666ffcf4bfe8aa9e4b46bb732636a Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 20 Nov 2024 18:36:05 +1000 Subject: [PATCH 482/741] HUNO - use mediainfo text for audio language --- src/trackers/HUNO.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 9fe9526cf..08dd1ce44 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -118,11 +118,29 @@ def get_audio(self, meta): if dual: language = "DUAL" - elif 'mediainfo' in meta: - language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language', "English") - language = re.sub(r'\(.+\)', '', language) + else: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + try: + audio_lines = [ + line for line in media_info_text.splitlines() if "Audio" in line + ] + if audio_lines: + language_line = next( + (line for line in audio_lines if "Language" in line), None + ) + if language_line: + language = re.search(r'Language\s*:\s*(.+)', language_line) + language = language.group(1) if language else "English" + language = re.sub(r'\(.+\)', '', language) + except StopIteration: + language = "English" + if language == "zxx": language = "Silent" + return f'{codec} {channels} {language}' def get_basename(self, meta): From 17ddd571a88b017268c3e5cea6cbb475ca3eb690 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 22 Nov 2024 11:38:23 +1000 Subject: [PATCH 483/741] ULCX update banned groups --- src/trackers/ULCX.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index cb5a3eed1..78a5d4938 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -23,7 +23,7 @@ def __init__(self, config): self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = ['Tigole', 'x0r', 'Judas', 'SPDVD', 'MeGusta', 'YIFY', 'SWTYBLZ', 'TAoE', 'TSP', 'TSPxL', 'LAMA', '4K4U', 'ION10', 'Will1869', 'TGx', 'Sicario', 'QxR', 'Hi10', 'EMBER', 'FGT', 'AROMA', 'd3g', 'nikt0', 'Grym', - 'RARBG', 'iVy', 'FnP', 'EDGE2020', 'NuBz', 'NAHOM'] + 'RARBG', 'iVy', 'FnP', 'EDGE2020', 'NuBz', 'NAHOM', 'Ralphy'] pass async def get_cat_id(self, category_name): From d9c71758ab03264a8cb6484292d383fbcf2db857 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 22 Nov 2024 19:46:03 +1000 Subject: [PATCH 484/741] Fix unit3d multi disc screens --- src/prep.py | 4 ++-- src/trackers/COMMON.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 77b966479..c6d6b36cc 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1141,12 +1141,12 @@ def sanitize_filename(self, filename): # Replace invalid characters like colons with an underscore return re.sub(r'[<>:"/\\|?*]', '_', filename) - def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): + def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None, force_screenshots=False): if 'image_list' not in meta: meta['image_list'] = [] existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= 3: + if len(existing_images) >= 3 and not force_screenshots: console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") return diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 73664ca33..9337c7fad 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -158,7 +158,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Run prep.screenshots if no screenshots are present if each['type'] == "BDMV": use_vs = meta.get('vapoursynth', False) - s = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) s.start() while s.is_alive(): await asyncio.sleep(1) From cf8b8429291f133535f8286a160a6a885d84eb7b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 22 Nov 2024 22:38:27 +1000 Subject: [PATCH 485/741] HUNO - fix language insertion fixes https://github.com/Audionut/Upload-Assistant/issues/128 --- src/trackers/HUNO.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 08dd1ce44..6a93ace5f 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -119,27 +119,28 @@ def get_audio(self, meta): if dual: language = "DUAL" else: + # Read the MEDIAINFO.txt file media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" with open(media_info_path, 'r', encoding='utf-8') as f: media_info_text = f.read() - try: - audio_lines = [ - line for line in media_info_text.splitlines() if "Audio" in line - ] - if audio_lines: - language_line = next( - (line for line in audio_lines if "Language" in line), None - ) - if language_line: - language = re.search(r'Language\s*:\s*(.+)', language_line) - language = language.group(1) if language else "English" - language = re.sub(r'\(.+\)', '', language) - except StopIteration: - language = "English" + # Extract the first audio section + first_audio_section = re.search(r'Audio\s+ID\s+:\s+2(.*?)\n\n', media_info_text, re.DOTALL) + if not first_audio_section: # Fallback in case of a different structure + first_audio_section = re.search(r'Audio(.*?)Text', media_info_text, re.DOTALL) + + if first_audio_section: + # Extract language information from the first audio track + language_match = re.search(r'Language\s*:\s*(.+)', first_audio_section.group(1)) + if language_match: + language = language_match.group(1).strip() + language = re.sub(r'\(.+\)', '', language) # Remove text in parentheses + # Handle special cases if language == "zxx": language = "Silent" + elif not language: + language = "Unknown" # Default if no language is found return f'{codec} {channels} {language}' From 04d6a5ecb1ecdd510f24a3f28d3386b6314844e8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 22 Nov 2024 22:53:00 +1000 Subject: [PATCH 486/741] Fix an error when description is discarded --- src/prep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index c6d6b36cc..827753e6b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3545,7 +3545,8 @@ def clean_text(text): content_written = True if not content_written: - description.write(meta['description'] + "\n") + description_text = meta.get('description', '') or '' + description.write(description_text + "\n") description.write("\n") return meta From 8be5834523daf39a6abcc9c51b03afa1d73a6f1c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 23 Nov 2024 10:06:48 +1000 Subject: [PATCH 487/741] Update DVD screens handling --- src/prep.py | 180 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 111 insertions(+), 69 deletions(-) diff --git a/src/prep.py b/src/prep.py index 827753e6b..5dd225384 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1185,8 +1185,11 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: - loglevel = 'verbose' if bool(ffdebug) else 'quiet' - debug = not ffdebug + if (meta.get('ffdebug', False)): + loglevel = 'verbose' + debug = False + else: + loglevel = 'quiet' with Progress( TextColumn("[bold green]Saving Screens..."), BarColumn(), @@ -1295,9 +1298,11 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): i = num_screens console.print('[bold green]Reusing screenshots') else: - if bool(meta.get('ffdebug', False)) is True: + if (meta.get('ffdebug', False)): loglevel = 'verbose' debug = False + else: + loglevel = 'quiet' looped = 0 retake = False with Progress( @@ -1314,94 +1319,132 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): if n >= num_screens: n -= num_screens image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - if not os.path.exists(image) or retake is not False: + if not os.path.exists(image) or retake: retake = False - loglevel = 'quiet' - debug = True - if bool(meta.get('debug', False)): - loglevel = 'error' - debug = False def _is_vob_good(n, loops, num_screens): - voblength = 300 - vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') - vob_mi = json.loads(vob_mi) - try: - voblength = float(vob_mi['media']['track'][1]['Duration']) - return voblength, n - except Exception: + max_loops = 6 + fallback_duration = 300 + voblength = fallback_duration + + while loops < max_loops: try: - voblength = float(vob_mi['media']['track'][2]['Duration']) - return voblength, n - except Exception: - n += 1 - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - if loops < 6: - loops = loops + 1 - voblength, n = _is_vob_good(n, loops, num_screens) - return voblength, n - else: - return 300, n + vob_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + output='JSON' + ) + vob_mi = json.loads(vob_mi) + if meta['debug']: + console.print("[yellow]Analyzing VOB file:[/yellow]", main_set[n]) + + for track in vob_mi.get('media', {}).get('track', []): + duration = track.get('Duration') + width = track.get('Width') + height = track.get('Height') + if meta['debug']: + console.print(f"Track {n}: Duration={duration}, Width={width}, Height={height}") + + if duration and width and height: + if float(width) > 0 and float(height) > 0: + voblength = float(duration) + if meta['debug']: + console.print(f"[green]Valid track found: voblength={voblength}, n={n}[/green]") + return voblength, n + + except Exception as e: + console.print(f"[red]Error parsing VOB {n}: {e}") + + n = (n + 1) % len(main_set) + if n >= num_screens: + n -= num_screens + loops += 1 + if meta['debug']: + console.print(f"[yellow]Retrying: loops={loops}, current voblength={voblength}[/yellow]") + if meta['debug']: + console.print(f"[red]Fallback triggered: returning fallback_duration={fallback_duration}[/red]") + return fallback_duration, n + try: voblength, n = _is_vob_good(n, 0, num_screens) - # img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[i]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: + + if ss_times[i] < 0 or ss_times[i] > voblength: + raise ValueError(f"Invalid seek time: {ss_times[i]} for video length {voblength}") + + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}" + if not os.path.exists(input_file): + console.print(f"[red]Missing input file: {input_file}") + retake = True + continue + + # Run FFmpeg with timeout + try: + ff = ffmpeg.input(input_file, ss=ss_times[i]) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + ff.output( + image, + vframes=1, + pix_fmt="rgb24" + ).overwrite_output().global_args('-loglevel', loglevel).run(quiet=debug) + + if not os.path.exists(image): + if meta['debug']: + console.print(f"[red]Image not created: {image}, retaking...") + retake = True + continue + + except ffmpeg.Error as e: + console.print(f"[red]FFmpeg error: {e.stderr}") + retake = True + continue + + except Exception as e: + console.print(f"[red]Error processing video file: {e}") console.print(traceback.format_exc()) + retake = True + continue + self.optimize_images(image) + progress.update(screen_task, advance=1) n += 1 try: - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + file_size = os.path.getsize(image) + if self.img_host == "imgbb" and file_size <= 31000000: i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + elif self.img_host in ["imgbox", "pixhost"] and file_size <= 10000000: i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") + elif file_size <= 75000: + console.print("[yellow]Image too small (likely a single color), retaking...") retake = True time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - elif self.img_host == "ptscreens": - i += 1 - elif self.img_host == "oeimg": + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: i += 1 else: - console.print("[red]Image too large for your image host, retaking") + console.print("[red]Image too large for image host, retaking...") retake = True time.sleep(1) + looped = 0 - except Exception: - if looped >= 25: - console.print('[red]Failed to take screenshots') - exit() + + except Exception as e: + console.print(f"[red]Error validating image file: {e}") looped += 1 - progress.advance(screen_task) - # remove smallest image + if looped >= 15: + console.print('[red]Failed to take screenshots after multiple attempts') + raise RuntimeError("Screenshot process failed") + smallest = None - smallestsize = 99**99 + smallest_size = float('inf') for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) - screensize = os.path.getsize(screen_path) - if screensize < smallestsize: - smallestsize = screensize + screen_size = os.path.getsize(screen_path) + if screen_size < smallest_size: + smallest_size = screen_size smallest = screen_path - if smallest is not None: + if smallest: os.remove(smallest) def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): @@ -1442,12 +1485,11 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non length = round(float(length)) os.chdir(f"{base_dir}/tmp/{folder_id}") i = 0 - - loglevel = 'quiet' - debug = True - if bool(meta.get('ffdebug', False)) is True: + if (meta.get('ffdebug', False)): loglevel = 'verbose' debug = False + else: + loglevel = 'quiet' retake = False with Progress( From a5ed7d7e6e40c1f48b4937dc38b4297a03d4c636 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 23 Nov 2024 10:48:55 +1000 Subject: [PATCH 488/741] Pass option from PTP also --- src/trackers/PTP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index fb8efcd72..8ca791501 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -689,7 +689,7 @@ async def edit_desc(self, meta): new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: use_vs = meta.get('vapoursynth', False) - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens)) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) From 20949b5764fba2ddeb4fd9ccd96f100f97e37005 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 23 Nov 2024 12:45:07 +1000 Subject: [PATCH 489/741] Fix one, break one --- src/prep.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/prep.py b/src/prep.py index 5dd225384..75be11573 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1187,7 +1187,6 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, else: if (meta.get('ffdebug', False)): loglevel = 'verbose' - debug = False else: loglevel = 'quiet' with Progress( @@ -1208,7 +1207,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, .output(image, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) - .run(quiet=debug) + .run() ) except Exception: console.print(traceback.format_exc()) @@ -1300,7 +1299,6 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): else: if (meta.get('ffdebug', False)): loglevel = 'verbose' - debug = False else: loglevel = 'quiet' looped = 0 @@ -1387,7 +1385,7 @@ def _is_vob_good(n, loops, num_screens): image, vframes=1, pix_fmt="rgb24" - ).overwrite_output().global_args('-loglevel', loglevel).run(quiet=debug) + ).overwrite_output().global_args('-loglevel', loglevel).run() if not os.path.exists(image): if meta['debug']: @@ -1487,7 +1485,6 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non i = 0 if (meta.get('ffdebug', False)): loglevel = 'verbose' - debug = False else: loglevel = 'quiet' @@ -1539,7 +1536,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non .output(image_path, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) - .run(quiet=debug) + .run() ) except Exception as e: From 72b6f2791e098982acd6e76cf833983ce2f9cadf Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 23 Nov 2024 15:14:20 +1000 Subject: [PATCH 490/741] Get imgbb response --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index 75be11573..c9ce35100 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2881,6 +2881,8 @@ def exponential_backoff(retry_count, initial_timeout): } response = requests.post(url, data=data, timeout=timeout) response = response.json() + if meta.get('debug'): + console.print('imgbb API response', response) img_url = response['data'].get('medium', response['data']['image'])['url'] raw_url = response['data']['image']['url'] web_url = response['data']['url_viewer'] From 3b8a99cdeb331e184d4acd77826354b8a9199c01 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 23 Nov 2024 20:58:27 +1000 Subject: [PATCH 491/741] Make keep-folder work again --- src/clients.py | 136 ++++++++++++++++++++++++++++++++----------------- src/prep.py | 22 ++++---- upload.py | 2 +- 3 files changed, 101 insertions(+), 59 deletions(-) diff --git a/src/clients.py b/src/clients.py index 82e0acf9d..704bcaebe 100644 --- a/src/clients.py +++ b/src/clients.py @@ -68,41 +68,42 @@ async def find_existing_torrent(self, meta): if meta.get('client', None) == 'none' or default_torrent_client == 'none': return None client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_storage_dir = client.get('torrent_storage_dir', None) - torrent_client = client.get('torrent_client', None).lower() + torrent_storage_dir = client.get('torrent_storage_dir') + torrent_client = client.get('torrent_client', '').lower() + if torrent_storage_dir is None and torrent_client != "watch": console.print(f'[bold red]Missing torrent_storage_dir for {default_torrent_client}') return None - elif not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": + if not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") + torrenthash = None - if torrent_storage_dir is not None and os.path.exists(torrent_storage_dir): - if meta.get('torrenthash', None) is not None: - console.print("torrenthash:", torrenthash) - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) - if valid: - torrenthash = meta['torrenthash'] - elif meta.get('ext_torrenthash', None) is not None: - console.print("ext_torrenthash:", meta.get('ext_torrenthash')) - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) + for hash_key in ['torrenthash', 'ext_torrenthash']: + hash_value = meta.get(hash_key) + if hash_value: + valid, torrent_path = await self.is_valid_torrent( + meta, f"{torrent_storage_dir}/{hash_value}.torrent", + hash_value, torrent_client, print_err=True + ) if valid: - torrenthash = meta['ext_torrenthash'] - if torrent_client == 'qbit' and torrenthash is None and client.get('enable_search') is True: - torrenthash = await self.search_qbit_for_torrent(meta, client) - if not torrenthash: - console.print("[bold yellow]No Valid .torrent found") - if not torrenthash: - console.print("No torrenthash in find_existing") - return None + torrenthash = hash_value + break + + if torrent_client == 'qbit' and not torrenthash and client.get('enable_search'): + torrenthash = await self.search_qbit_for_torrent(meta, client) + + if torrenthash: torrent_path = f"{torrent_storage_dir}/{torrenthash}.torrent" - valid2, torrent_path = await self.is_valid_torrent(meta, torrent_path, torrenthash, torrent_client, print_err=False) + valid2, torrent_path = await self.is_valid_torrent( + meta, torrent_path, torrenthash, torrent_client, print_err=False + ) if valid2: return torrent_path + console.print("[bold yellow]No Valid .torrent found") return None async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): - console.print("We've moved into torrent validation") valid = False wrong_file = False @@ -322,50 +323,89 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c return async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): - # infohash = torrent.infohash # Remote path mount - isdir = os.path.isdir(path) - if not isdir and len(filelist) == 1: - path = os.path.dirname(path) - if len(filelist) != 1: + if meta.get('keep_folder'): + # Keep only the root folder (e.g., "D:\\Movies") path = os.path.dirname(path) - if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): - path = path.replace(local_path, remote_path) + else: + # Adjust path based on filelist and directory status + isdir = os.path.isdir(path) + if len(filelist) != 1 or not isdir: + path = os.path.dirname(path) + + # Ensure remote path replacement and normalization + if path.startswith(local_path) and local_path.lower() != remote_path.lower(): + path = path.replace(local_path, remote_path, 1) path = path.replace(os.sep, '/') - if not path.endswith(os.sep): - path = f"{path}/" - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + + # Ensure trailing slash for qBittorrent + if not path.endswith('/'): + path += '/' + + console.print("[red]Final Path:", path) + + # Initialize qBittorrent client + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) console.print("[bold yellow]Adding and rechecking torrent") + try: qbt_client.auth_log_in() except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return + + # Check for automatic management auto_management = False am_config = client.get('automatic_management_paths', '') if isinstance(am_config, list): - for each in am_config: - if os.path.normpath(each).lower() in os.path.normpath(path).lower(): - auto_management = True - else: - if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": - auto_management = True - qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') - + auto_management = any( + os.path.normpath(each).lower() in os.path.normpath(path).lower() + for each in am_config + ) + elif am_config.strip(): + auto_management = os.path.normpath(am_config).lower() in os.path.normpath(path).lower() + + # Set qBittorrent category and content layout + qbt_category = meta.get("qbit_cat", client.get("qbit_cat")) content_layout = client.get('content_layout', 'Original') - qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) - # Wait for up to 30 seconds for qbit to actually return the download - # there's an async race conditiion within qbt that it will return ok before the torrent is actually added - for _ in range(0, 30): + # Add the torrent + try: + qbt_client.torrents_add( + torrent_files=torrent.dump(), + save_path=path, + use_auto_torrent_management=auto_management, + is_skip_checking=True, + content_layout=content_layout, + category=qbt_category + ) + except qbittorrentapi.APIConnectionError as e: + console.print(f"[red]Failed to add torrent: {e}") + return + + # Wait for torrent to be added + timeout = 30 + for _ in range(timeout): if len(qbt_client.torrents_info(torrent_hashes=torrent.infohash)) > 0: break await asyncio.sleep(1) + else: + console.print("[red]Torrent addition timed out.") + return + + # Resume and tag torrent qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) is not None: - qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') is not None: - qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) + if client.get('qbit_tag'): + qbt_client.torrents_add_tags(tags=client['qbit_tag'], torrent_hashes=torrent.infohash) + if meta.get('qbit_tag'): + qbt_client.torrents_add_tags(tags=meta['qbit_tag'], torrent_hashes=torrent.infohash) + console.print(f"Added to: {path}") def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): diff --git a/src/prep.py b/src/prep.py index c9ce35100..0d7730f67 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2681,14 +2681,18 @@ def validate_piece_size(self): def create_torrent(self, meta, path, output_filename): # Handle directories and file inclusion logic if meta['isdir']: - os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") - no_sample_globs = [] - for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): - no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) - if len(no_sample_globs) == 1: - path = meta['filelist'][0] + if meta['keep_folder']: + cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') + path = path + else: + os.chdir(path) + globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + no_sample_globs = [] + for file in globs: + if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) + if len(no_sample_globs) == 1: + path = meta['filelist'][0] if meta['is_disc']: include, exclude = "", "" else: @@ -2881,8 +2885,6 @@ def exponential_backoff(retry_count, initial_timeout): } response = requests.post(url, data=data, timeout=timeout) response = response.json() - if meta.get('debug'): - console.print('imgbb API response', response) img_url = response['data'].get('medium', response['data']['image'])['url'] raw_url = response['data']['image']['url'] web_url = response['data']['url_viewer'] diff --git a/upload.py b/upload.py index d0897c2e8..71b436719 100644 --- a/upload.py +++ b/upload.py @@ -574,7 +574,7 @@ def get_confirmation(meta): cli_ui.info(ring_the_bell) # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' - if meta.get('is disc', False) is False: + if meta.get('is disc', False) is True: meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True if meta.get('keep_folder'): From 0537f09b852695ae3fc01f9bec1864aa5df21c7a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 00:17:26 +1000 Subject: [PATCH 492/741] Missed a meta call --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 0d7730f67..b6c99962a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -278,7 +278,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met meta['saved_description'] = True if not meta.get('image_list'): # Only handle images if image_list is not already populated - valid_images = await self.check_images_concurrently(ptp_imagelist) + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) if valid_images: meta['image_list'] = valid_images else: From 12cb2bda8dda23f99d4ded3792483dd225bf1314 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 00:28:58 +1000 Subject: [PATCH 493/741] Remove pointless console --- src/clients.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/clients.py b/src/clients.py index 704bcaebe..08f0bbab6 100644 --- a/src/clients.py +++ b/src/clients.py @@ -342,8 +342,6 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d if not path.endswith('/'): path += '/' - console.print("[red]Final Path:", path) - # Initialize qBittorrent client qbt_client = qbittorrentapi.Client( host=client['qbit_url'], From 41548a42baf34ca2f0dd46b3cad2d20ddd26c0d1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 00:44:18 +1000 Subject: [PATCH 494/741] Always https for ptpimg urls --- src/prep.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/prep.py b/src/prep.py index b6c99962a..a12e70847 100644 --- a/src/prep.py +++ b/src/prep.py @@ -98,6 +98,11 @@ async def check_and_collect(image_dict): if not img_url: return None + if "ptpimg.me" in img_url and img_url.startswith("http://"): + img_url = img_url.replace("http://", "https://") + image_dict['raw_url'] = img_url + image_dict['web_url'] = img_url + # Verify the image link if await self.check_image_link(img_url): # Check if the image is hosted on an approved image host From bd4f025178c18e59e42ab43eb7e9787cb5b585a4 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 01:06:09 +1000 Subject: [PATCH 495/741] ULCX - skip concerts todo: fix other sites --- src/trackers/ULCX.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 78a5d4938..489ee64ea 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -61,6 +61,9 @@ async def get_res_id(self, resolution): return resolution_id async def upload(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) From 8c0f315acbf31b1927304f556d8b26259257eee8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 01:17:19 +1000 Subject: [PATCH 496/741] Disable markup for ptp description --- src/trackers/PTP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 8ca791501..5d5307816 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -207,7 +207,7 @@ async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) console.print("[bold green]Successfully grabbed description from PTP") - console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity + console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...", markup=False) # Show first 1000 characters for brevity if not meta.get('skipit') and not meta['unattended']: # Allow user to edit or discard the description From 5c0db2b4f5573ae2cb6dc3bb9c770dbf8fe69a98 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 01:35:24 +1000 Subject: [PATCH 497/741] ULCX - skip when resolution < 720p --- src/trackers/ULCX.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 489ee64ea..9b66c6851 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -45,6 +45,8 @@ async def get_type_id(self, type): return type_id async def get_res_id(self, resolution): + if resolution not in ['8640p', '4320p', '2160p', '1440p', '1080p', '1080i', '720p']: + return None resolution_id = { '8640p': '10', '4320p': '1', @@ -69,6 +71,9 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + if resolution_id is None: + console.print("Resolution is below 720p; skipping.") + return await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) From 5478cf32a13b33e4269973dbb0f8d3c91ede2b16 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 01:58:23 +1000 Subject: [PATCH 498/741] Check image resolution pulled from description Skip placeholder images returned by some hosts --- src/prep.py | 65 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/src/prep.py b/src/prep.py index a12e70847..3c94d8d95 100644 --- a/src/prep.py +++ b/src/prep.py @@ -51,6 +51,7 @@ import aiohttp from PIL import Image import io + from io import BytesIO import sys except ModuleNotFoundError: console.print(traceback.print_exc()) @@ -92,7 +93,31 @@ async def check_images_concurrently(self, imagelist, meta): if 'image_sizes' not in meta: meta['image_sizes'] = {} - # Function to check each image's URL, host, and log size + # Map fixed resolution names to vertical resolutions + resolution_map = { + '8640p': 8640, + '4320p': 4320, + '2160p': 2160, + '1440p': 1440, + '1080p': 1080, + '1080i': 1080, + '720p': 720, + '576p': 576, + '576i': 576, + '480p': 480, + '480i': 480, + } + + # Get expected vertical resolution + expected_resolution_name = meta.get('resolution', None) + expected_vertical_resolution = resolution_map.get(expected_resolution_name, None) + + # If no valid resolution is found, skip processing + if expected_vertical_resolution is None: + console.print("[red]Meta resolution is invalid or missing. Skipping all images.[/red]") + return [] + + # Function to check each image's URL, host, and log resolution async def check_and_collect(image_dict): img_url = image_dict.get('raw_url') if not img_url: @@ -110,17 +135,37 @@ async def check_and_collect(image_dict): nonlocal invalid_host_found invalid_host_found = True # Mark that we found an invalid host - # Download the image to check its size async with aiohttp.ClientSession() as session: async with session.get(img_url) as response: if response.status == 200: - image_content = await response.read() # Download the entire image content - image_size = len(image_content) # Calculate the size in bytes - # Store the image size in meta['image_sizes'] - meta['image_sizes'][img_url] = image_size - console.print(f"Size of {img_url}: {image_size / 1024:.2f} KiB") + image_content = await response.read() + + try: + image = Image.open(BytesIO(image_content)) + vertical_resolution = image.height + lower_bound = expected_vertical_resolution * 0.70 # 30% below + upper_bound = expected_vertical_resolution * 1.00 + + if not (lower_bound <= vertical_resolution <= upper_bound): + console.print( + f"[red]Image {img_url} resolution ({vertical_resolution}p) " + f"is outside the allowed range ({int(lower_bound)}-{int(upper_bound)}p). Skipping.[/red]" + ) + return None + + meta['image_sizes'][img_url] = { + "size": len(image_content), + "resolution": f"{image.width}x{image.height}", + } + console.print( + f"Valid image {img_url} with resolution {image.width}x{image.height} " + f"and size {len(image_content) / 1024:.2f} KiB" + ) + except Exception as e: + console.print(f"[red]Failed to process image {img_url}: {e}") + return None else: - console.print(f"[red]Failed to get size for {img_url}. Skipping.") + console.print(f"[red]Failed to fetch image {img_url}. Skipping.") return image_dict else: @@ -143,7 +188,9 @@ async def check_and_collect(image_dict): meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] if 'MTV' in meta.get('trackers', []): if invalid_host_found: - console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") + console.print( + "[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]" + ) # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list elif 'MTV' in trackers_list: if invalid_host_found: From 3734c662ee825f4df38c20f40a7e469e62998472 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 02:07:36 +1000 Subject: [PATCH 499/741] Capture PTP urls not in tags --- src/bbcode.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/bbcode.py b/src/bbcode.py index 102dcff17..8ed1a386e 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -47,7 +47,11 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('\r\n', '\n') # Remove url tags with PTP/HDB links - url_tags = re.findall(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + url_tags = re.findall( + r"(?:\[url(?:=|\])[^\]]*https?:\/\/passthepopcorn\.m[^\]]*\]|\bhttps?:\/\/passthepopcorn\.m[^\s]+)", + desc, + flags=re.IGNORECASE, + ) url_tags += re.findall(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) if url_tags: for url_tag in url_tags: From 6e8e0fabcee2024d8802d6ad82f7c32f8ef833d1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 11:58:32 +1000 Subject: [PATCH 500/741] Add better queue support --- src/args.py | 1 + src/prep.py | 2 + upload.py | 383 ++++++++++++++++++++++++++++++++++------------------ 3 files changed, 258 insertions(+), 128 deletions(-) diff --git a/src/args.py b/src/args.py index 2f49aed3c..720be7197 100644 --- a/src/args.py +++ b/src/args.py @@ -21,6 +21,7 @@ def parse(self, args, meta): parser = argparse.ArgumentParser() parser.add_argument('path', nargs='*', help="Path to file/directory") + parser.add_argument('--queue', nargs='*', required=False, help="(--queue queue_name) Process an entire folder (files/subfolders) in a queue") parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) diff --git a/src/prep.py b/src/prep.py index 3c94d8d95..8ccf885c1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1142,6 +1142,8 @@ def is_scene(self, video, meta, imdb=None): meta['scene_name'] = first_result['release'] video = f"{first_result['release']}.mkv" scene = True + if meta['is_dir']: + meta['keep_folder'] = True # NFO Download Handling if first_result.get("hasNFO") == "yes": diff --git a/upload.py b/upload.py index 71b436719..28ab2182e 100644 --- a/upload.py +++ b/upload.py @@ -56,6 +56,7 @@ import glob import cli_ui import traceback +import click from src.console import console from rich.markdown import Markdown @@ -81,147 +82,269 @@ parser = Args(config) +def get_log_file(base_dir, queue_name): + """ + Returns the path to the log file for the given base directory and queue name. + """ + safe_queue_name = queue_name.replace(" ", "_") + return os.path.join(base_dir, "tmp", f"{safe_queue_name}_processed_files.log") + + +def load_processed_files(log_file): + """ + Loads the list of processed files from the log file. + """ + if os.path.exists(log_file): + with open(log_file, "r") as f: + return set(json.load(f)) + return set() + + +def save_processed_file(log_file, file_path): + """ + Adds a processed file to the log. + """ + processed_files = load_processed_files(log_file) + processed_files.add(file_path) + with open(log_file, "w") as f: + json.dump(list(processed_files), f, indent=4) + + +def gather_files_recursive(path, allowed_extensions=None): + """ + Gather files and first-level subfolders. + Each subfolder is treated as a single unit, without exploring deeper. + """ + queue = [] + if os.path.isdir(path): + for entry in os.scandir(path): + if entry.is_dir(): + queue.append(entry.path) + elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): + queue.append(entry.path) + elif os.path.isfile(path): + if allowed_extensions is None or path.lower().endswith(tuple(allowed_extensions)): + queue.append(path) + else: + console.print(f"[red]Invalid path: {path}") + return queue + + +def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): + """ + Handle glob patterns and split path resolution. + Treat subfolders as single units and filter files by allowed_extensions. + """ + queue = [] + if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: + escaped_path = path.replace('[', '[[]') + queue = [ + file for file in glob.glob(escaped_path) + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + if queue: + display_queue(queue) + elif os.path.exists(os.path.dirname(path)) and len(paths) > 1: + queue = [ + file for file in paths + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + display_queue(queue) + elif not os.path.exists(os.path.dirname(path)): + queue = [ + file for file in resolve_split_path(path) # noqa F8221 + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + display_queue(queue) + return queue + + +def merge_meta(meta, saved_meta): + """Merges saved metadata with the current meta, respecting overwrite rules.""" + overwrite_list = [ + 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', + 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' + ] + sanitized_meta = {} + for key, value in saved_meta.items(): + clean_key = key.strip().strip("'").strip('"') + if clean_key in overwrite_list and meta.get(clean_key) is not None: + sanitized_meta[clean_key] = meta[clean_key] + else: + sanitized_meta[clean_key] = value + return sanitized_meta + + +def display_queue(queue, base_dir, queue_name, save_to_log=True): + """Displays the queued files in markdown format and optionally saves them to a log file in the tmp directory.""" + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + + if save_to_log: + tmp_dir = os.path.join(base_dir, "tmp") + os.makedirs(tmp_dir, exist_ok=True) + log_file = os.path.join(tmp_dir, f"{queue_name}_queue.log") + + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue successfully saved to log file: {log_file}") + except Exception as e: + console.print(f"[bold red]Failed to save queue to log file: {e}") + + +async def process_meta(meta, base_dir): + """Process the metadata for each queued path.""" + + if meta['imghost'] is None: + meta['imghost'] = config['DEFAULT']['img_host_1'] + + if not meta['unattended']: + ua = config['DEFAULT'].get('auto_mode', False) + if str(ua).lower() == "true": + meta['unattended'] = True + console.print("[yellow]Running in Auto Mode") + + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + meta = await prep.gather_prep(meta=meta, mode='cli') + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) + + if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: + return_dict = {} + meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + if meta.get('debug', False): + console.print(meta['image_list']) + + elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: + meta['image_list'] = [] + + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + reuse_torrent = None + if meta.get('rehash', False) is False: + reuse_torrent = await client.find_existing_torrent(meta) + if reuse_torrent is not None: + prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) + + if meta['nohash'] is False and reuse_torrent is None: + prep.create_torrent(meta, Path(meta['path']), "BASE") + if meta['nohash']: + meta['client'] = "none" + + elif os.path.exists(torrent_path) and meta.get('rehash', False) is True and meta['nohash'] is False: + prep.create_torrent(meta, Path(meta['path']), "BASE") + + if int(meta.get('randomized', 0)) >= 1: + prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + + async def do_the_thing(base_dir): - meta = dict() - meta['base_dir'] = base_dir + meta = {'base_dir': base_dir} paths = [] + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) for each in sys.argv[1:]: if os.path.exists(each): paths.append(os.path.abspath(each)) else: break + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - if meta['cleanup'] and os.path.exists(f"{base_dir}/tmp"): + if meta.get('cleanup') and os.path.exists(f"{base_dir}/tmp"): shutil.rmtree(f"{base_dir}/tmp") - console.print("[bold green]Sucessfully emptied tmp directory") - if not meta['path']: + console.print("[bold green]Successfully emptied tmp directory") + + if not meta.get('path'): exit(0) - path = meta['path'] - path = os.path.abspath(path) - if path.endswith('"'): - path = path[:-1] + + path = os.path.abspath(meta['path'].strip('"')) queue = [] - if os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - queue = [path] - else: - # Search glob if dirname exists - if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: - escaped_path = path.replace('[', '[[]') - globs = glob.glob(escaped_path) - queue = globs - if len(queue) != 0: - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") + + allowed_extensions = ['.mkv', '.mp4', '.ts'] + if meta.get('queue'): + log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") + if os.path.exists(log_file): + with open(log_file, 'r') as f: + existing_queue = json.load(f) + console.print(f"[bold yellow]Found an existing queue log file: {log_file}[/bold yellow]") + console.print(f"[cyan]The queue log contains {len(existing_queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit, discard, or keep the existing queue?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(existing_queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + queue = existing_queue + else: + console.print("[bold red]No changes were made. Using the original queue.") + queue = existing_queue + elif edit_choice == 'd': + console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") + queue = [] else: - console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") - - elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: - queue = paths - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") - elif not os.path.exists(os.path.dirname(path)): - split_path = path.split() - p1 = split_path[0] - for i, each in enumerate(split_path): - try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): - queue.append(p1) - p1 = split_path[i + 1] - else: - p1 += f" {split_path[i + 1]}" - except IndexError: - if os.path.exists(p1): - queue.append(p1) - else: - console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") - if len(queue) >= 1: - md_text = "\n - ".join(queue) - console.print("\n[bold green]Queuing these files:[/bold green]", end='') - console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) - console.print("\n\n") + console.print("[bold green]Keeping the existing queue as is.") + queue = existing_queue + else: + if os.path.exists(path): + queue = gather_files_recursive(path, allowed_extensions=allowed_extensions) + else: + queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) + else: + queue = [path] + + if not queue: + console.print(f"[red]No valid files or directories found for path: {path}") + exit(1) + + if meta.get('queue'): + queue_name = meta['queue'] + if 'queue' in meta: + log_file = get_log_file(base_dir, meta['queue']) + processed_files = load_processed_files(log_file) + queue = [file for file in queue if file not in processed_files] + if not queue: + console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") + exit(0) + + display_queue(queue, base_dir, queue_name, save_to_log=True) + total_files = len(queue) + processed_files_count = 0 else: - # Add Search Here - console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") - exit() + console.print("[bold yellow]Processing all files without a log file.") + display_queue(queue, base_dir, queue_name, save_to_log=True) + total_files = len(queue) + processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} for path in queue: - meta = {k: v for k, v in base_meta.items()} + meta = base_meta.copy() meta['path'] = path meta['uuid'] = None + try: - with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: - saved_meta = json.load(f) - - # Define the list of keys that can be overwritten - overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', - 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', - 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' - ] - - sanitized_saved_meta = {} - for key, value in saved_meta.items(): - clean_key = key.strip().strip("'").strip('"') - - if clean_key in overwrite_list: - if clean_key in meta and meta.get(clean_key) is not None: - sanitized_saved_meta[clean_key] = meta[clean_key] - if meta['debug']: - console.print(f"Overriding {clean_key} with meta value:", meta[clean_key]) - else: - sanitized_saved_meta[clean_key] = value - else: - sanitized_saved_meta[clean_key] = value - meta.update(sanitized_saved_meta) - f.close() - except FileNotFoundError: - pass + meta_file = f"{base_dir}/tmp/{os.path.basename(path)}/meta.json" + if os.path.exists(meta_file): + with open(meta_file) as f: + saved_meta = json.load(f) + meta.update(merge_meta(meta, saved_meta)) + except Exception as e: + console.print(f"[red]Failed to load metadata for {path}: {e}") + console.print(f"[green]Gathering info for {os.path.basename(path)}") - if meta['imghost'] is None: - meta['imghost'] = config['DEFAULT']['img_host_1'] - if not meta['unattended']: - ua = config['DEFAULT'].get('auto_mode', False) - if str(ua).lower() == "true": - meta['unattended'] = True - console.print("[yellow]Running in Auto Mode") - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - meta = await prep.gather_prep(meta=meta, mode='cli') - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: - return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: - json.dump(meta, f, indent=4) - if meta['debug']: - console.print(meta['image_list']) - # meta['uploaded_screens'] = True - elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: - meta['image_list'] = [] - - if not os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")): - reuse_torrent = None - if meta.get('rehash', False) is False: - reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent is not None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - if meta['nohash'] is False and reuse_torrent is None: - prep.create_torrent(meta, Path(meta['path']), "BASE") - if meta['nohash']: - meta['client'] = "none" - elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) is True and meta['nohash'] is False: - prep.create_torrent(meta, Path(meta['path']), "BASE") - if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + await process_meta(meta, base_dir) if meta.get('trackers', None) is not None: trackers = meta['trackers'] @@ -231,23 +354,20 @@ async def do_the_thing(base_dir): trackers = trackers.split(',') with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) - f.close() confirm = get_confirmation(meta) while confirm is False: - # help.print_help() - editargs = cli_ui.ask_string("Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)") + editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") editargs = (meta['path'],) + tuple(editargs.split()) - if meta['debug']: - editargs = editargs + ("--debug",) + if meta.get('debug', False): + editargs += ("--debug",) meta, help, before_args = parser.parse(editargs, meta) - # meta = await prep.tmdb_other_meta(meta) meta['edit'] = True meta = await prep.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) confirm = get_confirmation(meta) - if isinstance(trackers, list) is False: - trackers = [trackers] + if isinstance(trackers, str): + trackers = trackers.split(',') trackers = [s.strip().upper() for s in trackers] if meta.get('manual', False): trackers.insert(0, "MANUAL") @@ -530,6 +650,13 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): except Exception: console.print(traceback.print_exc()) + # Update progress tracking + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + # Add file to processed log + if 'queue' in meta: + save_processed_file(log_file, path) + def get_confirmation(meta): if meta['debug'] is True: From 4bd549d113abc2099994333ddf43f43075ba88e2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 12:19:23 +1000 Subject: [PATCH 501/741] Keyword blocking --- src/trackers/OE.py | 3 +++ src/trackers/RF.py | 4 ++++ src/trackers/RTF.py | 4 ++++ 3 files changed, 11 insertions(+) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index b101d222a..d71d7b26a 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -47,6 +47,9 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta, self.tracker, self.signature) diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 64f73a4a3..3c0dc9937 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -33,6 +33,10 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]Concerts not allowed.') + return if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') return diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 5b4a67313..485c886cc 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -30,6 +30,10 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]Concerts not allowed.') + return if datetime.date.today().year - meta['year'] <= 9: console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") return From 2cf919f7aff8875cbd9539eda2b1f1133ae9c551 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 14:03:40 +1000 Subject: [PATCH 502/741] fixes --- src/prep.py | 2 +- upload.py | 84 +++++++++++++++++++++++++++++++++-------------------- 2 files changed, 54 insertions(+), 32 deletions(-) diff --git a/src/prep.py b/src/prep.py index 8ccf885c1..9833fac8b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1142,7 +1142,7 @@ def is_scene(self, video, meta, imdb=None): meta['scene_name'] = first_result['release'] video = f"{first_result['release']}.mkv" scene = True - if meta['is_dir']: + if scene and meta.get('isdir', False) and meta.get('queue') is not None: meta['keep_folder'] = True # NFO Download Handling diff --git a/upload.py b/upload.py index 28ab2182e..d5b089404 100644 --- a/upload.py +++ b/upload.py @@ -159,22 +159,31 @@ def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): return queue -def merge_meta(meta, saved_meta): +def merge_meta(meta, saved_meta, path): """Merges saved metadata with the current meta, respecting overwrite rules.""" - overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', - 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', - 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' - ] - sanitized_meta = {} - for key, value in saved_meta.items(): - clean_key = key.strip().strip("'").strip('"') - if clean_key in overwrite_list and meta.get(clean_key) is not None: - sanitized_meta[clean_key] = meta[clean_key] - else: - sanitized_meta[clean_key] = value - return sanitized_meta + with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: + saved_meta = json.load(f) + overwrite_list = [ + 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', + 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' + ] + sanitized_saved_meta = {} + for key, value in saved_meta.items(): + clean_key = key.strip().strip("'").strip('"') + if clean_key in overwrite_list: + if clean_key in meta and meta.get(clean_key) is not None: + sanitized_saved_meta[clean_key] = meta[clean_key] + if meta['debug']: + console.print(f"Overriding {clean_key} with meta value:", meta[clean_key]) + else: + sanitized_saved_meta[clean_key] = value + else: + sanitized_saved_meta[clean_key] = value + meta.update(sanitized_saved_meta) + f.close() + return sanitized_saved_meta def display_queue(queue, base_dir, queue_name, save_to_log=True): @@ -211,13 +220,14 @@ async def process_meta(meta, base_dir): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta = await prep.gather_prep(meta=meta, mode='cli') + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: return_dict = {} meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) - meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" - with open(meta_filename, 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) if meta.get('debug', False): @@ -249,7 +259,6 @@ async def process_meta(meta, base_dir): async def do_the_thing(base_dir): meta = {'base_dir': base_dir} paths = [] - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) for each in sys.argv[1:]: if os.path.exists(each): paths.append(os.path.abspath(each)) @@ -264,11 +273,15 @@ async def do_the_thing(base_dir): if not meta.get('path'): exit(0) - path = os.path.abspath(meta['path'].strip('"')) + path = meta['path'] + path = os.path.abspath(path) + if path.endswith('"'): + path = path[:-1] queue = [] allowed_extensions = ['.mkv', '.mp4', '.ts'] if meta.get('queue'): + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") if os.path.exists(log_file): with open(log_file, 'r') as f: @@ -302,6 +315,7 @@ async def do_the_thing(base_dir): else: queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) else: + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) queue = [path] if not queue: @@ -330,30 +344,37 @@ async def do_the_thing(base_dir): base_meta = {k: v for k, v in meta.items()} for path in queue: - meta = base_meta.copy() - meta['path'] = path - meta['uuid'] = None - try: - meta_file = f"{base_dir}/tmp/{os.path.basename(path)}/meta.json" + meta = base_meta.copy() + meta['path'] = path + meta['uuid'] = None + + if not path: + raise ValueError("The 'path' variable is not defined or is empty.") + + meta_file = os.path.join(base_dir, "tmp", os.path.basename(path), "meta.json") + if os.path.exists(meta_file): - with open(meta_file) as f: + with open(meta_file, "r") as f: saved_meta = json.load(f) - meta.update(merge_meta(meta, saved_meta)) + meta.update(merge_meta(meta, saved_meta, path)) + else: + console.print(f"[yellow]No metadata file found at {meta_file}") + except Exception as e: - console.print(f"[red]Failed to load metadata for {path}: {e}") + import traceback + traceback.print_exc() + console.print(f"[red]Failed to load metadata for path '{path}': {e}") console.print(f"[green]Gathering info for {os.path.basename(path)}") await process_meta(meta, base_dir) - + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] if "," in trackers: trackers = trackers.split(',') - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) confirm = get_confirmation(meta) while confirm is False: editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") @@ -363,6 +384,8 @@ async def do_the_thing(base_dir): meta, help, before_args = parser.parse(editargs, meta) meta['edit'] = True meta = await prep.gather_prep(meta=meta, mode='cli') + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) confirm = get_confirmation(meta) @@ -371,7 +394,6 @@ async def do_the_thing(base_dir): trackers = [s.strip().upper() for s in trackers] if meta.get('manual', False): trackers.insert(0, "MANUAL") - #################################### ####### Upload to Trackers ####### # noqa #F266 #################################### From cb5447fbe18100e1eb56be548fa5ef96177aad14 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 14:10:31 +1000 Subject: [PATCH 503/741] also allow edit initial queue --- upload.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/upload.py b/upload.py index d5b089404..5d57b0fa2 100644 --- a/upload.py +++ b/upload.py @@ -314,6 +314,27 @@ async def do_the_thing(base_dir): queue = gather_files_recursive(path, allowed_extensions=allowed_extensions) else: queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) + + console.print(f"[cyan]A new queue log file will be created: {log_file}[/cyan]") + console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") + edit_choice = input("Enter 'e' to edit, or press Enter to save as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + else: + console.print("[bold red]No changes were made. Using the original queue.") + + # Save the queue to the log file + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") else: meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) queue = [path] From 73ad2a4963620d69d059e9a09138e8b008467d77 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 14:13:13 +1000 Subject: [PATCH 504/741] Lint --- upload.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index 5d57b0fa2..6e76fd8cf 100644 --- a/upload.py +++ b/upload.py @@ -314,7 +314,7 @@ async def do_the_thing(base_dir): queue = gather_files_recursive(path, allowed_extensions=allowed_extensions) else: queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) - + console.print(f"[cyan]A new queue log file will be created: {log_file}[/cyan]") console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") @@ -330,7 +330,7 @@ async def do_the_thing(base_dir): console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") else: console.print("[bold red]No changes were made. Using the original queue.") - + # Save the queue to the log file with open(log_file, 'w') as f: json.dump(queue, f, indent=4) From 380fd4e175a79f93f5009b742027d42aae364f84 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 18:35:40 +1000 Subject: [PATCH 505/741] Progress queue counter only if queue --- upload.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/upload.py b/upload.py index 6e76fd8cf..fa6388650 100644 --- a/upload.py +++ b/upload.py @@ -693,11 +693,9 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): except Exception: console.print(traceback.print_exc()) - # Update progress tracking - processed_files_count += 1 - console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") - # Add file to processed log - if 'queue' in meta: + if meta.get('queue') is not None: + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") save_processed_file(log_file, path) From 66bd0aa0db8eff36c28ca7da9f7400edcd8e003b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 19:27:54 +1000 Subject: [PATCH 506/741] Faster skips --- src/trackers/ANT.py | 7 ++++--- src/trackers/OE.py | 7 ++++--- src/trackers/RF.py | 19 +++++++++--------- src/trackers/RTF.py | 16 ++++++++------- src/trackers/ULCX.py | 7 ++++--- upload.py | 47 ++++++++++++++++++++++++-------------------- 6 files changed, 56 insertions(+), 47 deletions(-) diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 80f7e0f49..c6c8300c9 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -61,9 +61,6 @@ async def get_flags(self, meta): return flags async def upload(self, meta, disctype): - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') - return common = COMMON(config=self.config) torrent_filename = "BASE" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" @@ -145,6 +142,10 @@ async def edit_desc(self, meta): return async def search_existing(self, meta, disctype): + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + meta['skipping'] = "ANT" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/OE.py b/src/trackers/OE.py index d71d7b26a..f4d118a03 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -47,9 +47,6 @@ def __init__(self, config): pass async def upload(self, meta, disctype): - if 'concert' in meta['keywords']: - console.print('[bold red]Concerts not allowed.') - return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta, self.tracker, self.signature) @@ -314,6 +311,10 @@ def process_languages(tracks): return async def search_existing(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + meta['skipping'] = "OE" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 3c0dc9937..a9078754b 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -33,13 +33,6 @@ def __init__(self, config): pass async def upload(self, meta, disctype): - disallowed_keywords = {'XXX', 'Erotic'} - if any(keyword in meta['keywords'] for keyword in disallowed_keywords): - console.print('[bold red]Concerts not allowed.') - return - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') - return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -169,6 +162,15 @@ async def get_res_id(self, resolution): return resolution_id async def search_existing(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]Erotic not allowed.') + meta['skipping'] = "RF" + return + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + meta['skipping'] = "RF" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { @@ -179,9 +181,6 @@ async def search_existing(self, meta, disctype): 'resolutions[]': await self.get_res_id(meta['resolution']), 'name': "" } - if meta['category'] == 'TV': - console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies') - # params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] try: diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 485c886cc..cb146e58c 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -30,13 +30,6 @@ def __init__(self, config): pass async def upload(self, meta, disctype): - disallowed_keywords = {'XXX', 'Erotic'} - if any(keyword in meta['keywords'] for keyword in disallowed_keywords): - console.print('[bold red]Concerts not allowed.') - return - if datetime.date.today().year - meta['year'] <= 9: - console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") - return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) @@ -97,6 +90,15 @@ async def upload(self, meta, disctype): console.print(json_data) async def search_existing(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]XXX not allowed.') + meta['skipping'] = "RTF" + return + if datetime.date.today().year - meta['year'] <= 9: + console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + meta['skipping'] = "RTF" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") headers = { diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 9b66c6851..1db2d1f74 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -63,9 +63,6 @@ async def get_res_id(self, resolution): return resolution_id async def upload(self, meta, disctype): - if 'concert' in meta['keywords']: - console.print('[bold red]Concerts not allowed.') - return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -156,6 +153,10 @@ async def upload(self, meta, disctype): open_torrent.close() async def search_existing(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + meta['skipping'] = "ULCX" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { diff --git a/upload.py b/upload.py index fa6388650..718572bab 100644 --- a/upload.py +++ b/upload.py @@ -507,19 +507,21 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): continue dupes = await tracker_class.search_existing(meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - - # Proceed with upload if the meta is set to upload - if meta.get('upload', False): - await tracker_class.upload(meta, disctype) - perm = config['DEFAULT'].get('get_permalink', False) - if perm: - # need a wait so we don't race the api - await asyncio.sleep(5) - await tracker_class.search_torrent_page(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + if meta['skipping'] is None: + dupes = await common.filter_dupes(dupes, meta) + meta = dupe_check(dupes, meta) + + # Proceed with upload if the meta is set to upload + if meta.get('upload', False): + await tracker_class.upload(meta, disctype) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + meta['skipping'] = None if tracker in other_api_trackers: tracker_class = tracker_class_map[tracker](config=config) @@ -557,15 +559,18 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): await tracker_class.api_test(meta) dupes = await tracker_class.search_existing(meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) + if meta['skipping'] is None: + dupes = await common.filter_dupes(dupes, meta) + meta = dupe_check(dupes, meta) - # Proceed with upload if the meta is set to upload - if tracker == "TL" or meta.get('upload', False): - await tracker_class.upload(meta, disctype) - if tracker == 'SN': - await asyncio.sleep(16) - await client.add_to_client(meta, tracker_class.tracker) + if meta['skipping'] is None: + # Proceed with upload if the meta is set to upload + if tracker == "TL" or meta.get('upload', False): + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await client.add_to_client(meta, tracker_class.tracker) + meta['skipping'] = None if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) From dba2e9d8585f4fd3f61de42627f9f36dc8f4535e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 19:30:25 +1000 Subject: [PATCH 507/741] NBL skip --- src/trackers/NBL.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 7587f7706..4ee317b50 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -44,9 +44,6 @@ async def edit_desc(self, meta): return async def upload(self, meta, disctype): - if meta['category'] != 'TV': - console.print("[red]Only TV Is allowed at NBL") - return common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -83,6 +80,10 @@ async def upload(self, meta, disctype): open_torrent.close() async def search_existing(self, meta, disctype): + if meta['category'] != 'TV': + console.print("[red]Only TV Is allowed at NBL") + meta['skipping'] = "NBL" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") if int(meta.get('tvmaze_id', 0)) != 0: From 381330ef5f1b677079a219505cd8be76d8615ace Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 19:59:17 +1000 Subject: [PATCH 508/741] skipping might not be present --- upload.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/upload.py b/upload.py index 718572bab..ac2155cfd 100644 --- a/upload.py +++ b/upload.py @@ -507,7 +507,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): continue dupes = await tracker_class.search_existing(meta, disctype) - if meta['skipping'] is None: + if 'skipping' not in meta or meta['skipping'] is None: dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) @@ -559,11 +559,11 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): await tracker_class.api_test(meta) dupes = await tracker_class.search_existing(meta, disctype) - if meta['skipping'] is None: + if 'skipping' not in meta or meta['skipping'] is None: dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['skipping'] is None: + if 'skipping' not in meta or meta['skipping'] is None: # Proceed with upload if the meta is set to upload if tracker == "TL" or meta.get('upload', False): await tracker_class.upload(meta, disctype) From cfc82004e0ff8d90cd6d5ea963b620dea9d60040 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 20:16:52 +1000 Subject: [PATCH 509/741] Auto banned group skip if -ua --- upload.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/upload.py b/upload.py index ac2155cfd..470c573e3 100644 --- a/upload.py +++ b/upload.py @@ -821,7 +821,10 @@ def check_banned_group(tracker, banned_group_list, meta): console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") q = True if q: - if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): + return True + else: return True return False From b6e425d22a89f1d0704fb1c5cc7c2b2226b6d9cc Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 21:16:11 +1000 Subject: [PATCH 510/741] Improve log file management --- upload.py | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/upload.py b/upload.py index 470c573e3..169102646 100644 --- a/upload.py +++ b/upload.py @@ -297,6 +297,8 @@ async def do_the_thing(base_dir): try: queue = json.loads(edited_content.strip()) console.print("[bold green]Successfully updated the queue from the editor.") + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) except json.JSONDecodeError as e: console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") queue = existing_queue @@ -345,23 +347,16 @@ async def do_the_thing(base_dir): if meta.get('queue'): queue_name = meta['queue'] - if 'queue' in meta: - log_file = get_log_file(base_dir, meta['queue']) - processed_files = load_processed_files(log_file) - queue = [file for file in queue if file not in processed_files] - if not queue: - console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") - exit(0) - - display_queue(queue, base_dir, queue_name, save_to_log=True) - total_files = len(queue) - processed_files_count = 0 - - else: - console.print("[bold yellow]Processing all files without a log file.") - display_queue(queue, base_dir, queue_name, save_to_log=True) - total_files = len(queue) - processed_files_count = 0 + log_file = get_log_file(base_dir, meta['queue']) + processed_files = load_processed_files(log_file) + queue = [file for file in queue if file not in processed_files] + if not queue: + console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") + exit(0) + if meta['debug']: + display_queue(queue, base_dir, queue_name, save_to_log=False) + total_files = len(queue) + processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} for path in queue: From c434aebabe400197402a3b41d317f0f58bce25e1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 24 Nov 2024 23:16:30 +1000 Subject: [PATCH 511/741] Find and remove old mediainfo descriptions --- src/bbcode.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/bbcode.py b/src/bbcode.py index 8ed1a386e..fa013f6cb 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -94,6 +94,34 @@ def clean_ptp_description(self, desc, is_disc): desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub( + r"\[b\](.*?)(Matroska|DTS|AVC|x264|Progressive|23\.976 fps|16:9|[0-9]+x[0-9]+|[0-9]+ MiB|[0-9]+ Kbps|[0-9]+ bits|cabac=.*?/ aq=.*?|\d+\.\d+ Mbps)\[/b\]", + "", + desc, + flags=re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(Matroska|DTS|AVC|x264|Progressive|23\.976 fps|16:9|[0-9]+x[0-9]+|[0-9]+ MiB|[0-9]+ Kbps|[0-9]+ bits|cabac=.*?/ aq=.*?|\d+\.\d+ Mbps|[0-9]+\s+channels|[0-9]+\.[0-9]+\s+KHz|[0-9]+ KHz|[0-9]+\s+bits)", + "", + desc, + flags=re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"\[u\](Format|Bitrate|Channels|Sampling Rate|Resolution):\[/u\]\s*\d*.*?", + "", + desc, + flags=re.IGNORECASE, + ) + desc = re.sub( + r"^\s*\d+\s*(channels|KHz|bits)\s*$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE, + ) + + desc = re.sub(r"^\s+$", "", desc, flags=re.MULTILINE) + desc = re.sub(r"\n{2,}", "\n", desc) + # Convert Quote tags: desc = re.sub(r"\[quote.*?\]", "[code]", desc) desc = desc.replace("[/quote]", "[/code]") From dc4c7427c8cc76f464ad82fc3e108b51fe1eabef Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 25 Nov 2024 15:56:30 +1000 Subject: [PATCH 512/741] Only store image size in image_size meta --- src/prep.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3c94d8d95..dcf9d1800 100644 --- a/src/prep.py +++ b/src/prep.py @@ -153,10 +153,7 @@ async def check_and_collect(image_dict): ) return None - meta['image_sizes'][img_url] = { - "size": len(image_content), - "resolution": f"{image.width}x{image.height}", - } + meta['image_sizes'][img_url] = len(image_content) console.print( f"Valid image {img_url} with resolution {image.width}x{image.height} " f"and size {len(image_content) / 1024:.2f} KiB" From ea336a7b85fad8f87d28c2ee60e0a142696bedc7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 25 Nov 2024 19:30:19 +1000 Subject: [PATCH 513/741] Revert some client.py --- src/clients.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/clients.py b/src/clients.py index 08f0bbab6..c6e1521a7 100644 --- a/src/clients.py +++ b/src/clients.py @@ -334,8 +334,8 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d path = os.path.dirname(path) # Ensure remote path replacement and normalization - if path.startswith(local_path) and local_path.lower() != remote_path.lower(): - path = path.replace(local_path, remote_path, 1) + if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): + path = path.replace(local_path, remote_path) path = path.replace(os.sep, '/') # Ensure trailing slash for qBittorrent @@ -362,15 +362,13 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d auto_management = False am_config = client.get('automatic_management_paths', '') if isinstance(am_config, list): - auto_management = any( - os.path.normpath(each).lower() in os.path.normpath(path).lower() - for each in am_config - ) - elif am_config.strip(): - auto_management = os.path.normpath(am_config).lower() in os.path.normpath(path).lower() - - # Set qBittorrent category and content layout - qbt_category = meta.get("qbit_cat", client.get("qbit_cat")) + for each in am_config: + if os.path.normpath(each).lower() in os.path.normpath(path).lower(): + auto_management = True + else: + if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": + auto_management = True + qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') content_layout = client.get('content_layout', 'Original') # Add the torrent From ac29c15166dc989c4f3d239425f10367b46fab50 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 07:41:30 +1000 Subject: [PATCH 514/741] Remove unused arg --- src/args.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/args.py b/src/args.py index 2f49aed3c..9cd3e75fd 100644 --- a/src/args.py +++ b/src/args.py @@ -45,7 +45,6 @@ def parse(self, args, meta): parser.add_argument('--no-edition', dest='no_edition', action='store_true', required=False, help="Remove Edition from title") parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") - parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) From ea809ec3129352cf82b86e05e7ad5df1ed7cfeed Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 16:30:28 +1000 Subject: [PATCH 515/741] Separate data from text in print --- upload.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/upload.py b/upload.py index 169102646..cb7665607 100644 --- a/upload.py +++ b/upload.py @@ -286,7 +286,7 @@ async def do_the_thing(base_dir): if os.path.exists(log_file): with open(log_file, 'r') as f: existing_queue = json.load(f) - console.print(f"[bold yellow]Found an existing queue log file: {log_file}[/bold yellow]") + console.print(f"[bold yellow]Found an existing queue log file:[/bold yellow] [green]{log_file}[/green]") console.print(f"[cyan]The queue log contains {len(existing_queue)} items.[/cyan]") console.print("[cyan]Do you want to edit, discard, or keep the existing queue?[/cyan]") edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() @@ -317,7 +317,7 @@ async def do_the_thing(base_dir): else: queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) - console.print(f"[cyan]A new queue log file will be created: {log_file}[/cyan]") + console.print(f"[cyan]A new queue log file will be created:[/cyan] [green]{log_file}[/green]") console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") edit_choice = input("Enter 'e' to edit, or press Enter to save as is: ").strip().lower() @@ -375,7 +375,8 @@ async def do_the_thing(base_dir): saved_meta = json.load(f) meta.update(merge_meta(meta, saved_meta, path)) else: - console.print(f"[yellow]No metadata file found at {meta_file}") + if meta['debug']: + console.print(f"[yellow]No metadata file found at {meta_file}") except Exception as e: import traceback From 46ca89c1804aaa422f9ec0ab962c06805f6d86ec Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 17:15:02 +1000 Subject: [PATCH 516/741] comparison=True --- src/trackers/LST.py | 2 +- src/trackers/OE.py | 3 +-- src/trackers/PSS.py | 2 +- src/trackers/RF.py | 4 ++-- src/trackers/ULCX.py | 2 +- src/trackers/YOINK.py | 2 +- 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/trackers/LST.py b/src/trackers/LST.py index b0c3b318d..2b02b7e45 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -86,7 +86,7 @@ async def upload(self, meta, disctype): modq = await self.get_flag(meta, 'modq') draft = await self.get_flag(meta, 'draft') name = await self.edit_name(meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index f4d118a03..768e41ab4 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -292,8 +292,7 @@ def process_languages(tracks): desc = base desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) - if comparison is False: - desc = bbcode.convert_comparison_to_collapse(desc, 1000) + desc = bbcode.convert_comparison_to_collapse(desc, 1000) desc = desc.replace('[img]', '[img=300]') descfile.write(desc) diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index a25c89d01..eb59d2ae4 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -73,7 +73,7 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: diff --git a/src/trackers/RF.py b/src/trackers/RF.py index a9078754b..688fb10c4 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -28,14 +28,14 @@ def __init__(self, config): self.source_flag = 'ReelFliX' self.upload_url = 'https://reelflix.xyz/api/torrents/upload' self.search_url = 'https://reelflix.xyz/api/torrents/filter' - self.forum_link = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" self.banned_groups = [""] pass async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) cat_id = await self.get_cat_id(meta['category']) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 1db2d1f74..76ec1c345 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -71,7 +71,7 @@ async def upload(self, meta, disctype): if resolution_id is None: console.print("Resolution is below 720p; skipping.") return - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index 6370fa5c6..58af6d7af 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -71,7 +71,7 @@ async def upload(self, meta, disctype): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: From 7c8f5b02c037b753bb42003fcdd2593a6970c0f0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 18:24:28 +1000 Subject: [PATCH 517/741] HUNO - faster skip --- src/trackers/HUNO.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 6a93ace5f..3c1049fb1 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -42,11 +42,6 @@ async def upload(self, meta, disctype): else: anon = 1 - # adding logic to check if its an encode or webrip and not HEVC as only HEVC encodes and webrips are allowed - if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): - console.print('[bold red]Only x265/HEVC encodes are allowed') - return - if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() @@ -277,6 +272,10 @@ async def is_plex_friendly(self, meta): return 0 async def search_existing(self, meta, disctype): + if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): + console.print('[bold red]Only x265/HEVC encodes are allowed') + meta['skipping'] = "HUNO" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") From 43bfafc8586bd60387078368aafefc0940b27cf4 Mon Sep 17 00:00:00 2001 From: lewler Date: Tue, 26 Nov 2024 09:54:09 +0100 Subject: [PATCH 518/741] Add manual year override option to arguments --- src/args.py | 1 + src/prep.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/args.py b/src/args.py index 9cd3e75fd..625781720 100644 --- a/src/args.py +++ b/src/args.py @@ -45,6 +45,7 @@ def parse(self, args, meta): parser.add_argument('--no-edition', dest='no_edition', action='store_true', required=False, help="Remove Edition from title") parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") + parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Override the year found", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) diff --git a/src/prep.py b/src/prep.py index dcf9d1800..3b9ce0900 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3105,6 +3105,8 @@ async def get_name(self, meta): title = meta.get('title', "") alt_title = meta.get('aka', "") year = meta.get('year', "") + if meta.get('manual_year') > 0: + year = meta.get('manual_year') resolution = meta.get('resolution', "") if resolution == "OTHER": resolution = "" From 022a7f0c3b9747d040c7bfaa8e6823679791bcb1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 21:43:15 +1000 Subject: [PATCH 519/741] OE - NOGRP tag --- src/trackers/OE.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index d71d7b26a..84ddb61f7 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -138,6 +138,8 @@ async def edit_name(self, meta): resolution = meta.get('resolution') video_encode = meta.get('video_encode') name_type = meta.get('type', "") + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] if name_type == "DVDRIP": if meta.get('category') == "MOVIE": @@ -176,6 +178,11 @@ def get_audio_lang(media_info_text=None): except (FileNotFoundError, KeyError) as e: print(f"Error processing MEDIAINFO.txt: {e}") + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + oe_name = re.sub(f"-{invalid_tag}", "", oe_name, flags=re.IGNORECASE) + oe_name = f"{oe_name}-NOGRP" + return oe_name async def get_cat_id(self, category_name): From 14d03029929d6532d28e504b46e0508a80083aa7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 26 Nov 2024 22:41:57 +1000 Subject: [PATCH 520/741] Update dupe checking, again --- src/trackers/COMMON.py | 188 +++++++++++++++++++++++++---------------- 1 file changed, 115 insertions(+), 73 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 9337c7fad..ff383f6e7 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -614,96 +614,138 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return ptgen async def filter_dupes(self, dupes, meta): + """ + Allowed entries are returned as dupes. + """ if meta['debug']: console.log("[cyan]Pre-filtered dupes") console.log(dupes) new_dupes = [] - types_to_check = {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'} # noqa F841 - normalized_meta_type = {t.replace('-', '').upper() for t in meta['type']} if isinstance(meta['type'], list) else {meta['type'].replace('-', '').upper()} - has_repack_in_uuid = "repack" in meta['uuid'].lower() if meta.get('uuid') else False + normalized_meta_type = ( + {t.replace('-', '').upper() for t in meta['type']} + if isinstance(meta['type'], list) + else {meta['type'].replace('-', '').upper()} + ) + + has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() + + attribute_checks = [ + { + "key": "repack", + "uuid_flag": has_repack_in_uuid, + "condition": lambda each: meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(), + "exclude_msg": lambda each: f"Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}" + }, + { + "key": "remux", + "uuid_flag": "remux" in meta.get('uuid', '').lower(), + "condition": lambda each: "remux" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'remux' mismatch: {each}" + }, + { + "key": "uhd", + "uuid_flag": "uhd" in meta.get('uuid', '').lower(), + "condition": lambda each: "uhd" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'UHD' mismatch: {each}" + }, + { + "key": "webdl", + "uuid_flag": "webdl" in meta.get('uuid', '').lower(), + "condition": lambda each: "webdl" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'WEBDL' mismatch: {each}" + }, + { + "key": "hdtv", + "uuid_flag": "hdtv" in meta.get('uuid', '').lower(), + "condition": lambda each: "hdtv" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" + }, + { + "key": "blu-ray", + "uuid_flag": "blu-ray" in meta.get('uuid', '').lower(), + "condition": lambda each: "blu-ray" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'Blu-Ray' mismatch: {each}" + } + ] for each in dupes: - remove_set = set({meta['resolution']}) + remove_set = {meta['resolution']} normalized_each_type = each.replace('-', '').upper() - # Check if types match loosely, based on core attributes (resolution, HDR, audio) - type_match = any(t in normalized_each_type for t in normalized_meta_type) or \ - (meta['resolution'] in each and meta['hdr'] in each and meta['audio'] in each) - + # Type matching logic + type_match = ( + any(t in normalized_each_type for t in normalized_meta_type) or + (meta['resolution'] in each) + ) if not type_match: if meta['debug']: console.log(f"[yellow]Excluding result due to type mismatch: {each}") continue - # Repack filtering if the tag matches - if meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(): - if meta['debug']: - console.log(f"[yellow]Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}") - continue - - # Define search combos for more nuanced matching - search_combos = [ - { - 'search': meta['hdr'], - 'search_for': {'HDR', 'PQ10'}, - 'update': {'HDR|PQ10'} - }, - { - 'search': meta['hdr'], - 'search_for': {'DV'}, - 'update': {'DV|DoVi'} - }, - { - 'search': meta['hdr'], - 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, - 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'} - }, - { - 'search': str(meta.get('tv_pack', 0)), - 'search_for': '1', - 'update': {rf"{meta['season']}(?!E\d+)"} - }, - { - 'search': meta['episode'], - 'search_for': meta['episode'], - 'update': {meta['season'], meta['episode']} - } - ] - - # Apply search combos to refine remove_set - for s in search_combos: - if s.get('search_for') not in (None, ''): - if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): - remove_set.update(s['update']) - if s.get('search_not') not in (None, ''): - if not any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_not']): - remove_set.update(s['update']) - - search = each.lower().replace('-', '').replace(' ', '').replace('.', '') - - for x in remove_set.copy(): - if "|" in x: - look_for = x.split('|') - for y in look_for: - if y.lower() in search: - if x in remove_set: - remove_set.remove(x) - remove_set.add(y) - - allow = True - for x in remove_set: - if not x.startswith("!"): - if not re.search(x, search, flags=re.I): - allow = False - else: - if re.search(x.replace("!", "", 1), search, flags=re.I) not in (None, False): - allow = False - if allow and each not in new_dupes: - new_dupes.append(each) + for check in attribute_checks: + if check["key"] == "repack" and check["condition"](each): + if meta['debug']: + console.log(f"[yellow]{check['exclude_msg'](each)}") + break + elif check["uuid_flag"] != check["condition"](each): + if meta['debug']: + console.log(f"[yellow]{check['exclude_msg'](each)}") + break + else: + search_combos = [ + {'search': meta['hdr'], 'search_for': {'HDR', 'PQ10'}, 'update': {'HDR|PQ10'}}, + {'search': meta['hdr'], 'search_for': {'DV'}, 'update': {'DV|DoVi'}}, + {'search': meta['hdr'], 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'}}, + {'search': str(meta.get('tv_pack', 0)), 'search_for': '1', 'update': {rf"{meta['season']}(?!E\d+)"}}, + {'search': meta['episode'], 'search_for': meta['episode'], 'update': {meta['season'], meta['episode']}}, + ] + + for combo in search_combos: + search = combo.get('search', '') + if search: + if combo.get('search_for') and any(re.search(x, search, flags=re.IGNORECASE) for x in combo['search_for']): + remove_set.update(combo['update']) + elif meta['debug']: + console.log(f"[yellow]Skipping update for '{combo['update']}' because none of '{combo['search_for']}' matched in '{search}'") + + remove_set = self.refine_remove_set(remove_set) + + search_normalized = each.lower().replace('-', '').replace(' ', '').replace('.', '') + if self.should_allow_entry(search_normalized, remove_set, meta): + new_dupes.append(each) return new_dupes + def refine_remove_set(self, remove_set): + """ + Process remove_set to split "|" options into individual entries. + """ + refined_set = set() + for item in remove_set: + if "|" in item: + refined_set.update(item.split('|')) + else: + refined_set.add(item) + return refined_set + + def should_allow_entry(self, search, remove_set, meta): + """ + Determine if an entry should be allowed based on the remove_set. + """ + for item in remove_set: + if not item.startswith("!"): + if not re.search(item, search, flags=re.I): + if meta['debug']: + console.log(f"[yellow]Excluding result because '{item}' not found in: {search}") + return False + else: + if re.search(item[1:], search, flags=re.I): + if meta['debug']: + console.log(f"[yellow]Excluding result because '{item[1:]}' found in: {search}") + return False + return True + class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { From 6bf6610b57a0eabaa2a49f0b31c859bba55dc367 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 00:41:52 +1000 Subject: [PATCH 521/741] Clean logging --- src/trackers/COMMON.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index ff383f6e7..403bc6efe 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -680,7 +680,7 @@ async def filter_dupes(self, dupes, meta): ) if not type_match: if meta['debug']: - console.log(f"[yellow]Excluding result due to type mismatch: {each}") + console.log(f"[yellow]Excluding result due to resolution mismatch: {each}") continue for check in attribute_checks: @@ -706,8 +706,6 @@ async def filter_dupes(self, dupes, meta): if search: if combo.get('search_for') and any(re.search(x, search, flags=re.IGNORECASE) for x in combo['search_for']): remove_set.update(combo['update']) - elif meta['debug']: - console.log(f"[yellow]Skipping update for '{combo['update']}' because none of '{combo['search_for']}' matched in '{search}'") remove_set = self.refine_remove_set(remove_set) From 9735b8e401d831d043ab59410580787535e46dc8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 00:59:43 +1000 Subject: [PATCH 522/741] Don't error when processing single file from existing queue --- upload.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/upload.py b/upload.py index cb7665607..38310e8a3 100644 --- a/upload.py +++ b/upload.py @@ -355,11 +355,11 @@ async def do_the_thing(base_dir): exit(0) if meta['debug']: display_queue(queue, base_dir, queue_name, save_to_log=False) - total_files = len(queue) - processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} for path in queue: + total_files = len(queue) + processed_files_count = 0 try: meta = base_meta.copy() meta['path'] = path @@ -697,7 +697,9 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if meta.get('queue') is not None: processed_files_count += 1 console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") - save_processed_file(log_file, path) + if not meta['debug']: + if log_file: + save_processed_file(log_file, path) def get_confirmation(meta): From 5ecfd926d41ea1aab04fe1863a89db3596891be8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 01:04:35 +1000 Subject: [PATCH 523/741] Try some disc matching that will actually work --- src/trackers/COMMON.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 403bc6efe..50efae843 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -662,10 +662,10 @@ async def filter_dupes(self, dupes, meta): "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" }, { - "key": "blu-ray", - "uuid_flag": "blu-ray" in meta.get('uuid', '').lower(), - "condition": lambda each: "blu-ray" in each.lower(), - "exclude_msg": lambda each: f"Excluding result due to 'Blu-Ray' mismatch: {each}" + "key": "disc_check", + "uuid_flag": meta.get('is_disc', False), + "condition": lambda each: not re.search(r'\.\w{2,4}$', each), + "exclude_msg": lambda each: f"Excluding result because it has a file extension but meta['is_disc'] is True: {each}" } ] From 7b95c0c918d2f752542f6ed981ea83a3b560ef6b Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 01:09:38 +1000 Subject: [PATCH 524/741] Fix header again --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 74344f11a..b6342c583 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -57,7 +57,7 @@ "processLimit": "10", # Providing the option to add a header, in bbcode, above the screenshot section where supported - # "screenshot_header": "[centers] SCREENSHOTS [/center]", + # "screenshot_header": "[center] SCREENSHOTS [/center]", # Enable lossless PNG Compression (True/False) "optimize_images": True, From dc5351f76813b5917108d8e62111feedd92d3bec Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 17:40:57 +1000 Subject: [PATCH 525/741] Correct is_disc checking --- src/trackers/COMMON.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 50efae843..fed68663c 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -629,6 +629,7 @@ async def filter_dupes(self, dupes, meta): ) has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() + has_is_disc = bool(meta.get('is_disc', False)) attribute_checks = [ { @@ -661,12 +662,6 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "hdtv" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" }, - { - "key": "disc_check", - "uuid_flag": meta.get('is_disc', False), - "condition": lambda each: not re.search(r'\.\w{2,4}$', each), - "exclude_msg": lambda each: f"Excluding result because it has a file extension but meta['is_disc'] is True: {each}" - } ] for each in dupes: @@ -683,6 +678,11 @@ async def filter_dupes(self, dupes, meta): console.log(f"[yellow]Excluding result due to resolution mismatch: {each}") continue + if has_is_disc and re.search(r'\.\w{2,4}$', each): + if meta['debug']: + console.log(f"[yellow]Excluding result because it has a file extension but meta['is_disc'] is True: {each}") + continue + for check in attribute_checks: if check["key"] == "repack" and check["condition"](each): if meta['debug']: From 5c3552ca92e8d4ecc2f3143db6ede8e034862ae1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 21:40:19 +1000 Subject: [PATCH 526/741] I think I hate this thing, but it maybe works --- src/trackers/COMMON.py | 173 ++++++++++++++++++++++++++--------------- 1 file changed, 110 insertions(+), 63 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index fed68663c..a94c98de8 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -615,21 +615,26 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): async def filter_dupes(self, dupes, meta): """ - Allowed entries are returned as dupes. + Filter duplicates by applying exclusion rules. Only non-excluded entries are returned. """ if meta['debug']: console.log("[cyan]Pre-filtered dupes") console.log(dupes) new_dupes = [] + normalized_meta_type = ( - {t.replace('-', '').upper() for t in meta['type']} + {t.replace("-", "").upper() for t in meta['type']} if isinstance(meta['type'], list) - else {meta['type'].replace('-', '').upper()} + else {meta['type'].replace("-", "").upper()} ) has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() has_is_disc = bool(meta.get('is_disc', False)) + target_hdr = self.refine_hdr_terms(meta.get("hdr")) + target_season = meta.get("season") + target_episode = meta.get("episode") + target_resolution = meta.get("resolution") attribute_checks = [ { @@ -664,86 +669,128 @@ async def filter_dupes(self, dupes, meta): }, ] - for each in dupes: - remove_set = {meta['resolution']} - normalized_each_type = each.replace('-', '').upper() - - # Type matching logic - type_match = ( - any(t in normalized_each_type for t in normalized_meta_type) or - (meta['resolution'] in each) - ) - if not type_match: - if meta['debug']: - console.log(f"[yellow]Excluding result due to resolution mismatch: {each}") - continue + def log_exclusion(reason, item): + if meta['debug']: + console.log(f"[yellow]Excluding result due to {reason}: {item}") + + def process_exclusion(each): + """ + Determine if an entry should be excluded. + Returns True if the entry should be excluded, otherwise allowed as dupe. + """ + normalized = self.normalize_filename(each) + file_hdr = self.refine_hdr_terms(normalized) + + if meta['debug']: + console.log(f"[debug] Evaluating dupe: {each}") + console.log(f"[debug] Normalized dupe: {normalized}") + console.log(f"[debug] File HDR terms: {file_hdr}") + console.log(f"[debug] Target HDR terms: {target_hdr}") if has_is_disc and re.search(r'\.\w{2,4}$', each): - if meta['debug']: - console.log(f"[yellow]Excluding result because it has a file extension but meta['is_disc'] is True: {each}") - continue + log_exclusion("file extension mismatch (is_disc=True)", each) + return True + + if target_resolution and target_resolution not in each: + log_exclusion(f"resolution '{target_resolution}' mismatch", each) + return True for check in attribute_checks: if check["key"] == "repack" and check["condition"](each): if meta['debug']: console.log(f"[yellow]{check['exclude_msg'](each)}") - break + return True elif check["uuid_flag"] != check["condition"](each): if meta['debug']: console.log(f"[yellow]{check['exclude_msg'](each)}") - break - else: - search_combos = [ - {'search': meta['hdr'], 'search_for': {'HDR', 'PQ10'}, 'update': {'HDR|PQ10'}}, - {'search': meta['hdr'], 'search_for': {'DV'}, 'update': {'DV|DoVi'}}, - {'search': meta['hdr'], 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'}}, - {'search': str(meta.get('tv_pack', 0)), 'search_for': '1', 'update': {rf"{meta['season']}(?!E\d+)"}}, - {'search': meta['episode'], 'search_for': meta['episode'], 'update': {meta['season'], meta['episode']}}, - ] - - for combo in search_combos: - search = combo.get('search', '') - if search: - if combo.get('search_for') and any(re.search(x, search, flags=re.IGNORECASE) for x in combo['search_for']): - remove_set.update(combo['update']) - - remove_set = self.refine_remove_set(remove_set) - - search_normalized = each.lower().replace('-', '').replace(' ', '').replace('.', '') - if self.should_allow_entry(search_normalized, remove_set, meta): - new_dupes.append(each) + return True + + if not self.has_matching_hdr(file_hdr, target_hdr): + log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) + return True + + season_episode_match = self.is_season_episode_match(normalized, target_season, target_episode) + if meta['debug']: + console.log(f"[debug] Season/Episode match result: {season_episode_match}") + if not season_episode_match: + log_exclusion("season/episode mismatch", each) + return True + + console.log(f"[debug] Passed all checks: {each}") + return False + + for each in dupes: + console.log(f"[debug] Evaluating dupe: {each}") + if not process_exclusion(each): + new_dupes.append(each) + + if meta['debug']: + console.log(f"[cyan]Final dupes: {new_dupes}") return new_dupes - def refine_remove_set(self, remove_set): + def normalize_filename(self, filename): """ - Process remove_set to split "|" options into individual entries. + Normalize a filename for easier matching. + Retain season/episode information in the format SxxExx. """ - refined_set = set() - for item in remove_set: - if "|" in item: - refined_set.update(item.split('|')) - else: - refined_set.add(item) - return refined_set + normalized = filename.lower().replace("-", "").replace(" ", "").replace(".", "") + + return normalized - def should_allow_entry(self, search, remove_set, meta): + def is_season_episode_match(self, filename, target_season, target_episode): """ - Determine if an entry should be allowed based on the remove_set. + Check if the filename matches the given season and episode. """ - for item in remove_set: - if not item.startswith("!"): - if not re.search(item, search, flags=re.I): - if meta['debug']: - console.log(f"[yellow]Excluding result because '{item}' not found in: {search}") - return False - else: - if re.search(item[1:], search, flags=re.I): - if meta['debug']: - console.log(f"[yellow]Excluding result because '{item[1:]}' found in: {search}") - return False + if target_season: + target_season = int(str(target_season).lstrip('sS')) + if target_episode: + target_episode = int(str(target_episode).lstrip('eE')) + + season_pattern = f"s{target_season:02}" if target_season else None + episode_pattern = f"e{target_episode:02}" if target_episode else None + + if season_pattern and episode_pattern: + return season_pattern in filename and episode_pattern in filename + if season_pattern: + return season_pattern in filename + if episode_pattern: + return episode_pattern in filename return True + def refine_hdr_terms(self, hdr): + """ + Normalize HDR terms for consistent comparison. + Simplifies all HDR entries to 'HDR' and DV entries to 'DV'. + """ + if hdr is None: + return set() + hdr = hdr.upper() + terms = set() + if "DV" in hdr or "DOVI" in hdr: + terms.add("DV") + if "HDR" in hdr: # Any HDR-related term is normalized to 'HDR' + terms.add("HDR") + return terms + + def has_matching_hdr(self, file_hdr, target_hdr): + """ + Check if the HDR terms match or are compatible. + """ + def simplify_hdr(hdr_set): + """Simplify HDR terms to just HDR and DV.""" + simplified = set() + if any(h in hdr_set for h in {"HDR", "HDR10", "HDR10+"}): + simplified.add("HDR") + if "DV" in hdr_set or "DOVI" in hdr_set: + simplified.add("DV") + return simplified + + file_hdr_simple = simplify_hdr(file_hdr) + target_hdr_simple = simplify_hdr(target_hdr) + + return file_hdr_simple == target_hdr_simple + class MediaInfoParser: # Language to ISO country code mapping LANGUAGE_CODE_MAP = { From 1ebad27be2a53ed729f6c7360cc7feef62734910 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 21:42:23 +1000 Subject: [PATCH 527/741] Redundant --- src/trackers/COMMON.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index a94c98de8..2fd3b58e5 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -623,12 +623,6 @@ async def filter_dupes(self, dupes, meta): new_dupes = [] - normalized_meta_type = ( - {t.replace("-", "").upper() for t in meta['type']} - if isinstance(meta['type'], list) - else {meta['type'].replace("-", "").upper()} - ) - has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() has_is_disc = bool(meta.get('is_disc', False)) target_hdr = self.refine_hdr_terms(meta.get("hdr")) From 20e1780eccafbeffbf071bb90907a0542794d071 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 27 Nov 2024 22:02:26 +1000 Subject: [PATCH 528/741] blu-ray matching --- src/trackers/COMMON.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 2fd3b58e5..67b13f7cb 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -661,6 +661,12 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "hdtv" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" }, + { + "key": "bluray", + "uuid_flag": "blu-ray" in meta.get('name', '').lower(), + "condition": lambda each: "blu-ray" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'Blu-ray' mismatch: {each}" + }, ] def log_exclusion(reason, item): @@ -681,6 +687,9 @@ def process_exclusion(each): console.log(f"[debug] File HDR terms: {file_hdr}") console.log(f"[debug] Target HDR terms: {target_hdr}") + if has_is_disc and each.lower().endswith(".m2ts"): + return False + if has_is_disc and re.search(r'\.\w{2,4}$', each): log_exclusion("file extension mismatch (is_disc=True)", each) return True From ed0cb391d7b1fc293ccc1a1004a0bb6cb2fcf89a Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 00:10:47 +1000 Subject: [PATCH 529/741] Add encoder checking --- src/trackers/COMMON.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 67b13f7cb..6a720bb84 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -624,6 +624,7 @@ async def filter_dupes(self, dupes, meta): new_dupes = [] has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() + has_encoder_in_name = meta.get("video_encode").lower() has_is_disc = bool(meta.get('is_disc', False)) target_hdr = self.refine_hdr_terms(meta.get("hdr")) target_season = meta.get("season") @@ -667,6 +668,12 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "blu-ray" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'Blu-ray' mismatch: {each}" }, + { + "key": "encoder", + "uuid_flag": has_encoder_in_name, + "condition": lambda each: has_encoder_in_name in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'Encoder' mismatch: {each}" + }, ] def log_exclusion(reason, item): From f6689d4ddca626df1ae7c32db8fe103b5df32a47 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 00:17:18 +1000 Subject: [PATCH 530/741] Change log file scope --- upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upload.py b/upload.py index 38310e8a3..2bb42bf29 100644 --- a/upload.py +++ b/upload.py @@ -279,10 +279,10 @@ async def do_the_thing(base_dir): path = path[:-1] queue = [] + log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") allowed_extensions = ['.mkv', '.mp4', '.ts'] if meta.get('queue'): meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") if os.path.exists(log_file): with open(log_file, 'r') as f: existing_queue = json.load(f) From a57c7ebbd93592aabcc6cf776601319a5b91e483 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 00:37:17 +1000 Subject: [PATCH 531/741] Fix image upload when less than 3 images returned from description --- upload.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/upload.py b/upload.py index 2bb42bf29..afa68c6d3 100644 --- a/upload.py +++ b/upload.py @@ -224,9 +224,11 @@ async def process_meta(meta, base_dir): json.dump(meta, f, indent=4) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: + if len(meta.get('image_list', [])) < 3 and meta.get('skip_imghost_upload', False) is False: return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) + new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) + meta['image_list'].extend(new_images) + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) From 2e618c4668925afe746309ebebbada17571f732a Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 09:35:39 +1000 Subject: [PATCH 532/741] Remove image upload backoff --- src/prep.py | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3b9ce0900..f932fdc3d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2810,14 +2810,13 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - img_host = meta['imghost'] # Use the correctly updated image host from meta + img_host = meta['imghost'] using_custom_img_list = bool(custom_img_list) image_list = [] - successfully_uploaded = set() # Track successfully uploaded images - initial_timeout = 10 # Set the initial timeout for backoff + successfully_uploaded = set() + initial_timeout = 10 - # Initialize the meta key for image sizes if not already present if 'image_sizes' not in meta: meta['image_sizes'] = {} @@ -2834,16 +2833,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens - def exponential_backoff(retry_count, initial_timeout): - # Exponential backoff logic with jitter - if retry_count == 1: - backoff_time = initial_timeout - else: - backoff_time = initial_timeout * (1.5 ** (retry_count - 1)) - backoff_time += random.uniform(0, 1) - time.sleep(backoff_time) - return backoff_time - while True: remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] @@ -2855,20 +2844,17 @@ def exponential_backoff(retry_count, initial_timeout): ) as progress: upload_task = progress.add_task("[green]Uploading Screens...", total=len(remaining_images)) console.print(f"[cyan]Uploading screens to {img_host}...") - # console.print(f"[debug] Remaining images to upload: {remaining_images}") + for image in remaining_images: retry_count = 0 upload_success = False - # Ensure the correct image path is assigned here image_path = os.path.normpath(os.path.join(os.getcwd(), image)) # noqa F841 - # console.print(f"[debug] Normalized image path: {image_path}") while retry_count < max_retries and not upload_success: try: - timeout = exponential_backoff(retry_count + 1, initial_timeout) + timeout = initial_timeout - # Add imgbox handling here if img_host == "imgbox": try: # console.print("[blue]Uploading images to imgbox...") @@ -3021,22 +3007,21 @@ def exponential_backoff(retry_count, initial_timeout): 'web_url': web_url } image_list.append(image_dict) - successfully_uploaded.add(image) # Track the uploaded image + successfully_uploaded.add(image) # Store size in meta, indexed by the img_url # Storing image_sizes for any multi disc/files will probably break something, so lets not do that. if not using_custom_img_list: - meta['image_sizes'][img_url] = image_size # Keep sizes separate in meta['image_sizes'] + meta['image_sizes'][img_url] = image_size progress.advance(upload_task) - i += 1 # Increment the image counter only after success + i += 1 return_dict['image_list'] = image_list - break # Break retry loop after a successful upload + break except Exception as e: retry_count += 1 console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") - exponential_backoff(retry_count, initial_timeout) if retry_count >= max_retries: next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') @@ -3048,11 +3033,10 @@ def exponential_backoff(retry_count, initial_timeout): return image_list, i break - # Exit the loop after switching hosts if img_host_num > 1 and not upload_success: - continue # Continue to the next host + continue else: - break # Break if upload was successful + break return image_list, i From dee0167103efd356a4df076653e7da75b30d1971 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 15:08:46 +1000 Subject: [PATCH 533/741] Fix DVD remux naming --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index f932fdc3d..c161a16f7 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3186,7 +3186,7 @@ async def get_name(self, meta): elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay Remux name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" # SOURCE potential_missing = ['edition', 'description'] elif type == "ENCODE": # Encode From 1e27585d86ead764b013968b0c1c0a4e3c2398a3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 17:33:26 +1000 Subject: [PATCH 534/741] Fix encoder checking with H.xxx --- src/trackers/COMMON.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 6a720bb84..334f3a0ad 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -616,6 +616,7 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): async def filter_dupes(self, dupes, meta): """ Filter duplicates by applying exclusion rules. Only non-excluded entries are returned. + Everything is a dupe, until it matches a criteria to be excluded. """ if meta['debug']: console.log("[cyan]Pre-filtered dupes") @@ -625,6 +626,7 @@ async def filter_dupes(self, dupes, meta): has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() has_encoder_in_name = meta.get("video_encode").lower() + normalized_encoder = self.normalize_filename(has_encoder_in_name) has_is_disc = bool(meta.get('is_disc', False)) target_hdr = self.refine_hdr_terms(meta.get("hdr")) target_season = meta.get("season") @@ -668,12 +670,6 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "blu-ray" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'Blu-ray' mismatch: {each}" }, - { - "key": "encoder", - "uuid_flag": has_encoder_in_name, - "condition": lambda each: has_encoder_in_name in each.lower(), - "exclude_msg": lambda each: f"Excluding result due to 'Encoder' mismatch: {each}" - }, ] def log_exclusion(reason, item): @@ -725,6 +721,10 @@ def process_exclusion(each): if not season_episode_match: log_exclusion("season/episode mismatch", each) return True + + if normalized_encoder and normalized_encoder not in normalized: + log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) + return True console.log(f"[debug] Passed all checks: {each}") return False From 29f35b6e6f4651af012bf22af902cdd7de2327ae Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 17:46:27 +1000 Subject: [PATCH 535/741] Create empty image_list as needed --- upload.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/upload.py b/upload.py index afa68c6d3..dd508977f 100644 --- a/upload.py +++ b/upload.py @@ -226,6 +226,8 @@ async def process_meta(meta, base_dir): if len(meta.get('image_list', [])) < 3 and meta.get('skip_imghost_upload', False) is False: return_dict = {} + if 'image_list' not in meta: + meta['image_list'] = [] new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) meta['image_list'].extend(new_images) From dd481f9fba6a119a7b815594ead18d5bb1ed8468 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 28 Nov 2024 18:20:53 +1000 Subject: [PATCH 536/741] Debugging queue Will be used later to process a specific queue for debugging --- upload.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/upload.py b/upload.py index dd508977f..0d08466dd 100644 --- a/upload.py +++ b/upload.py @@ -285,6 +285,19 @@ async def do_the_thing(base_dir): log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") allowed_extensions = ['.mkv', '.mp4', '.ts'] + + if path.endswith('.log') and meta['debug']: + console.print(f"[bold yellow]Processing debugging queue:[/bold yellow] [bold green{path}[/bold green]") + if os.path.exists(path): + log_file = path + with open(path, 'r') as f: + queue = json.load(f) + meta['queue'] = "debugging" + + else: + console.print(f"[bold red]Log file not found: {path}. Exiting.[/bold red]") + exit(1) + if meta.get('queue'): meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) if os.path.exists(log_file): From 9e9f5ee8c343c2b201f6656dfd2ab0fa9ba7c59f Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 17:38:49 +1000 Subject: [PATCH 537/741] BHD - add internal release skipping Their API is a little slow to update, which means existing dupe checking does not work. This will brute force ban internal releases, and prevent auto uploading scripts from re-uploading recently released internal releases. --- src/trackers/BHD.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 0c34476b2..404a5a1c2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -216,6 +216,11 @@ async def edit_desc(self, meta): return async def search_existing(self, meta, disctype): + bhd_name = await self.edit_name(meta) + if any(phrase in bhd_name.lower() for phrase in ("-framestor", "-bhdstudio", "-bmf", "-decibel", "-d-zone", "-hifi", "-ncmt", "-tdd", "-flux", "-crfw", "-sonny", "-zr-", "-mkvultra", "-rpg", "-w4nk3r", "-irobot", "-beyondhd")): + console.print("[bold red]This is an internal BHD release, skipping upload[/bold red]") + meta['skipping'] = "BHD" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") category = meta['category'] From 95da4c7bc59113f1201cbef023f10e31bb4f2ef5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 19:38:52 +1000 Subject: [PATCH 538/741] Retain existing queue management --- upload.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/upload.py b/upload.py index 0d08466dd..0c044cb4d 100644 --- a/upload.py +++ b/upload.py @@ -286,7 +286,31 @@ async def do_the_thing(base_dir): log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") allowed_extensions = ['.mkv', '.mp4', '.ts'] - if path.endswith('.log') and meta['debug']: + if path.endswith('.txt') and meta.get('unit3d'): + console.print(f"[bold yellow]Detected a text file for queue input: {path}[/bold yellow]") + if os.path.exists(path): + safe_file_locations = extract_safe_file_locations(path) + if safe_file_locations: + console.print(f"[cyan]Extracted {len(safe_file_locations)} safe file locations from the text file.[/cyan]") + queue = safe_file_locations + meta['queue'] = "unit3d" + + # Save the queue to the log file + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file saved successfully: {log_file}[/bold green]") + except IOError as e: + console.print(f"[bold red]Failed to save the queue log file: {e}[/bold red]") + exit(1) + else: + console.print("[bold red]No safe file locations found in the text file. Exiting.[/bold red]") + exit(1) + else: + console.print(f"[bold red]Text file not found: {path}. Exiting.[/bold red]") + exit(1) + + elif path.endswith('.log') and meta['debug']: console.print(f"[bold yellow]Processing debugging queue:[/bold yellow] [bold green{path}[/bold green]") if os.path.exists(path): log_file = path @@ -298,7 +322,7 @@ async def do_the_thing(base_dir): console.print(f"[bold red]Log file not found: {path}. Exiting.[/bold red]") exit(1) - if meta.get('queue'): + elif meta.get('queue'): meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) if os.path.exists(log_file): with open(log_file, 'r') as f: @@ -354,10 +378,57 @@ async def do_the_thing(base_dir): with open(log_file, 'w') as f: json.dump(queue, f, indent=4) console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") - else: + + elif os.path.exists(path): meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) queue = [path] + else: + # Search glob if dirname exists + if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: + escaped_path = path.replace('[', '[[]') + globs = glob.glob(escaped_path) + queue = globs + if len(queue) != 0: + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + else: + console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") + + elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: + queue = paths + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + elif not os.path.exists(os.path.dirname(path)): + split_path = path.split() + p1 = split_path[0] + for i, each in enumerate(split_path): + try: + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): + queue.append(p1) + p1 = split_path[i + 1] + else: + p1 += f" {split_path[i + 1]}" + except IndexError: + if os.path.exists(p1): + queue.append(p1) + else: + console.print(f"[red]Path: [bold red]{p1}[/bold red] does not exist") + if len(queue) >= 1: + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + + else: + # Add Search Here + console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + exit() + if not queue: console.print(f"[red]No valid files or directories found for path: {path}") exit(1) From 0e4a1d39720769189fe35746c9add74fbfe24388 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 19:40:46 +1000 Subject: [PATCH 539/741] Merge checker branch changes Last commit had half the changes included --- upload.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/upload.py b/upload.py index 0c044cb4d..8362f0014 100644 --- a/upload.py +++ b/upload.py @@ -57,6 +57,7 @@ import cli_ui import traceback import click +import re from src.console import console from rich.markdown import Markdown @@ -159,6 +160,36 @@ def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): return queue +def extract_safe_file_locations(log_file): + """ + Parse the log file to extract file locations under the 'safe' header. + + :param log_file: Path to the log file to parse. + :return: List of file paths from the 'safe' section. + """ + safe_section = False + safe_file_locations = [] + + with open(log_file, 'r') as f: + for line in f: + line = line.strip() + + # Detect the start and end of 'safe' sections + if line.lower() == "safe": + safe_section = True + continue + elif line.lower() in {"danger", "risky"}: + safe_section = False + + # Extract 'File Location' if in a 'safe' section + if safe_section and line.startswith("File Location:"): + match = re.search(r"File Location:\s*(.+)", line) + if match: + safe_file_locations.append(match.group(1).strip()) + + return safe_file_locations + + def merge_meta(meta, saved_meta, path): """Merges saved metadata with the current meta, respecting overwrite rules.""" with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: From 67639a9d3783d6e653fc0d04631a501c04b9f4f3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 19:42:24 +1000 Subject: [PATCH 540/741] Add the checker argument --- src/args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/args.py b/src/args.py index 21a497b66..c4057e767 100644 --- a/src/args.py +++ b/src/args.py @@ -22,6 +22,7 @@ def parse(self, args, meta): parser.add_argument('path', nargs='*', help="Path to file/directory") parser.add_argument('--queue', nargs='*', required=False, help="(--queue queue_name) Process an entire folder (files/subfolders) in a queue") + parser.add_argument('--unit3d', action='store_true', required=False, help="[parse a txt output file from UNIT3D-Upload-Checker]") parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) From 98bbee060f10f74c8f6d7b8bef882244c4c505fa Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 19:45:47 +1000 Subject: [PATCH 541/741] Lint --- src/trackers/COMMON.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 334f3a0ad..2c5b475be 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -721,10 +721,10 @@ def process_exclusion(each): if not season_episode_match: log_exclusion("season/episode mismatch", each) return True - + if normalized_encoder and normalized_encoder not in normalized: log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) - return True + return True console.log(f"[debug] Passed all checks: {each}") return False From ce2395a22ecb10bf7fffa831a3373ca389fb1604 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 29 Nov 2024 20:53:56 +1000 Subject: [PATCH 542/741] Bump version in headers --- src/trackers/ACM.py | 2 +- src/trackers/AITHER.py | 2 +- src/trackers/AL.py | 2 +- src/trackers/ANT.py | 4 ++-- src/trackers/BHD.py | 2 +- src/trackers/BLU.py | 2 +- src/trackers/CBR.py | 2 +- src/trackers/FNP.py | 2 +- src/trackers/HP.py | 2 +- src/trackers/HUNO.py | 2 +- src/trackers/JPTV.py | 2 +- src/trackers/LCD.py | 2 +- src/trackers/LST.py | 2 +- src/trackers/LT.py | 2 +- src/trackers/OE.py | 2 +- src/trackers/OTW.py | 2 +- src/trackers/PSS.py | 2 +- src/trackers/R4E.py | 2 +- src/trackers/RF.py | 2 +- src/trackers/SHRI.py | 2 +- src/trackers/STC.py | 2 +- src/trackers/STT.py | 2 +- src/trackers/THR.py | 2 +- src/trackers/TIK.py | 2 +- src/trackers/TL.py | 2 +- src/trackers/ULCX.py | 2 +- src/trackers/UNIT3D_TEMPLATE.py | 2 +- src/trackers/UTP.py | 2 +- src/trackers/YOINK.py | 2 +- 29 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 71e3d3c32..520ee3db6 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -249,7 +249,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 45ec59f53..1a3f145cc 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -95,7 +95,7 @@ async def upload(self, meta, disctype): 'mod_queue_opt_in': modq, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/AL.py b/src/trackers/AL.py index a0b45caad..66e3949fc 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -142,7 +142,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index c6c8300c9..8a5cba97c 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -113,12 +113,12 @@ async def upload(self, meta, disctype): 'media': 'Blu-ray', 'releasegroup': str(meta['tag'])[1:], 'release_desc': bd_dump, - 'flagchangereason': "BDMV Uploaded with L4G's Upload Assistant"}) + 'flagchangereason': "BDMV Uploaded with Upload Assistant"}) if meta['scene']: # ID of "Scene?" checkbox on upload form is actually "censored" data['censored'] = 1 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } try: diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 404a5a1c2..206913162 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -96,7 +96,7 @@ async def upload(self, meta, disctype): if len(tags) > 0: data['tags'] = ','.join(tags) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index b0bddc66f..ec4a130ad 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -117,7 +117,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 79d98d003..b4e54e66d 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -101,7 +101,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 9802d9508..498b8ea39 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -134,7 +134,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 609fa066b..771949a94 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -123,7 +123,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 3c1049fb1..623ed3f7c 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -84,7 +84,7 @@ async def upload(self, meta, disctype): data['internal'] = 0 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': tracker_config['api_key'].strip() diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index fe28e3df7..1b079e2ba 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -130,7 +130,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 450b05140..2bcaaee3f 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -101,7 +101,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 2b02b7e45..aace023d6 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -156,7 +156,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/LT.py b/src/trackers/LT.py index f748af0ee..4cd5e7c94 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -157,7 +157,7 @@ async def upload(self, meta, disctype): data['season_number'] = int(meta.get('season_int', '0')) data['episode_number'] = int(meta.get('episode_int', '0')) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 83fcb11d6..59d3d035c 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -109,7 +109,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 16e00cfcd..3e9259359 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -134,7 +134,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index eb59d2ae4..97f377bfb 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -136,7 +136,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 6d6717c64..deaf0b095 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -83,7 +83,7 @@ async def upload(self, meta, disctype): # 'sticky' : 0, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } url = f"https://racing4everyone.eu/api/torrents/upload?api_token={self.config['TRACKERS']['R4E']['api_key'].strip()}" if meta.get('category') == "TV": diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 688fb10c4..9e01187c1 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -98,7 +98,7 @@ async def upload(self, meta, disctype): if distributor_id != 0: data['distributor_id'] = distributor_id headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index e93837956..7d79fc731 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -134,7 +134,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 6acdf103d..f5e2b7ee8 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -92,7 +92,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 4715864e8..7da801754 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -85,7 +85,7 @@ async def upload(self, meta, disctype): 'sticky': 0, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 4a91b66e7..f36556978 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -81,7 +81,7 @@ async def upload(self, session, meta, disctype): 'tube': meta.get('youtube', '') } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } # If pronfo fails, put mediainfo into THR parser if meta.get('is_disc', '') != 'BDMV': diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 20659b141..2fc467d04 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -120,7 +120,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 234dcf2b3..e662f2913 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -100,7 +100,7 @@ async def upload(self, meta, disctype): 'category': cat_id } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } if meta['debug'] is False: diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 76ec1c345..b9cdacca8 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -134,7 +134,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 4c242f346..60a7b6297 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -133,7 +133,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index b59993b5d..86f645d78 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -100,7 +100,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index 58af6d7af..90003c132 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -134,7 +134,7 @@ async def upload(self, meta, disctype): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() From 36251021dbd053f204a806ba464e7004e151b717 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 12:04:10 +1000 Subject: [PATCH 543/741] Add dev branch to docker buld --- .github/workflows/docker-image.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index f6facbeb7..aa49b2e40 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -4,6 +4,7 @@ on: push: branches: - master + - development workflow_dispatch: env: From 7b3752fedd3245814ce31da0bba82b52b3accd11 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 12:06:46 +1000 Subject: [PATCH 544/741] Don't overwrite traceback https://github.com/Audionut/Upload-Assistant/issues/136 --- upload.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/upload.py b/upload.py index 8362f0014..fde9401b8 100644 --- a/upload.py +++ b/upload.py @@ -498,8 +498,6 @@ async def do_the_thing(base_dir): console.print(f"[yellow]No metadata file found at {meta_file}") except Exception as e: - import traceback - traceback.print_exc() console.print(f"[red]Failed to load metadata for path '{path}': {e}") console.print(f"[green]Gathering info for {os.path.basename(path)}") @@ -767,7 +765,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): await thr.upload(session, meta, disctype) await client.add_to_client(meta, "THR") except Exception: - console.print(traceback.print_exc()) + console.print(traceback.format_exc()) if tracker == "PTP": if meta['unattended']: @@ -811,7 +809,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): await asyncio.sleep(5) await client.add_to_client(meta, "PTP") except Exception: - console.print(traceback.print_exc()) + console.print(traceback.format_exc()) if meta.get('queue') is not None: processed_files_count += 1 From e7412a3233add17b339602a59c3d58dbe506207b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 21:06:05 +1000 Subject: [PATCH 545/741] Remove blu-ray from dupe filtering --- src/trackers/COMMON.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 2c5b475be..09c8fe94f 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -664,12 +664,6 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "hdtv" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" }, - { - "key": "bluray", - "uuid_flag": "blu-ray" in meta.get('name', '').lower(), - "condition": lambda each: "blu-ray" in each.lower(), - "exclude_msg": lambda each: f"Excluding result due to 'Blu-ray' mismatch: {each}" - }, ] def log_exclusion(reason, item): From 62bd6e93f2f728bd95e3f763e5a05c5692ad19a7 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 21:27:52 +1000 Subject: [PATCH 546/741] Fix queue counter --- upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upload.py b/upload.py index fde9401b8..d7894ebeb 100644 --- a/upload.py +++ b/upload.py @@ -475,10 +475,10 @@ async def do_the_thing(base_dir): if meta['debug']: display_queue(queue, base_dir, queue_name, save_to_log=False) + processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} for path in queue: total_files = len(queue) - processed_files_count = 0 try: meta = base_meta.copy() meta['path'] = path From 8090b927eb31387ea977a4aa728e1c2435019579 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 21:37:37 +1000 Subject: [PATCH 547/741] Catch lack of proper dv/hdr tagging --- src/trackers/COMMON.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 09c8fe94f..34875faf6 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -788,6 +788,8 @@ def simplify_hdr(hdr_set): simplified.add("HDR") if "DV" in hdr_set or "DOVI" in hdr_set: simplified.add("DV") + if "framestor" in meta['tag'].lower(): + simplified.add("HDR") return simplified file_hdr_simple = simplify_hdr(file_hdr) From d1dde04fd0523d4e9cf261b27a2b9a8b853ae48e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 30 Nov 2024 21:40:42 +1000 Subject: [PATCH 548/741] Include meta --- src/trackers/COMMON.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 34875faf6..8a12ae967 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -705,7 +705,7 @@ def process_exclusion(each): console.log(f"[yellow]{check['exclude_msg'](each)}") return True - if not self.has_matching_hdr(file_hdr, target_hdr): + if not self.has_matching_hdr(file_hdr, target_hdr, meta): log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) return True @@ -777,7 +777,7 @@ def refine_hdr_terms(self, hdr): terms.add("HDR") return terms - def has_matching_hdr(self, file_hdr, target_hdr): + def has_matching_hdr(self, file_hdr, target_hdr, meta): """ Check if the HDR terms match or are compatible. """ From b9f8f1a296184d9a2087bc33c205f35b1be41910 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 10:32:16 +1000 Subject: [PATCH 549/741] Handle lack of video_encode --- src/trackers/COMMON.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 8a12ae967..693bca215 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -625,8 +625,12 @@ async def filter_dupes(self, dupes, meta): new_dupes = [] has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() - has_encoder_in_name = meta.get("video_encode").lower() - normalized_encoder = self.normalize_filename(has_encoder_in_name) + video_encode = meta.get("video_encode") + if video_encode is not None: + has_encoder_in_name = video_encode.lower() + normalized_encoder = self.normalize_filename(has_encoder_in_name) + else: + normalized_encoder = False has_is_disc = bool(meta.get('is_disc', False)) target_hdr = self.refine_hdr_terms(meta.get("hdr")) target_season = meta.get("season") From 591b5fb4f177f9ae4173269c19e2628481b98d04 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 16:41:33 +1000 Subject: [PATCH 550/741] Muliprocess screenshots --- src/prep.py | 201 +++++++++++++++++++++++----------------------------- 1 file changed, 89 insertions(+), 112 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6eae6076e..e59305f23 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,6 +17,8 @@ import traceback from src.discparse import DiscParse import multiprocessing + from multiprocessing import Pool + from tqdm import tqdm import os import re import math @@ -1497,6 +1499,8 @@ def _is_vob_good(n, loops, num_screens): os.remove(smallest) def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): + if meta['debug']: + start_time = time.time() if 'image_list' not in meta: meta['image_list'] = [] @@ -1532,109 +1536,59 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non sar = w_sar = par h_sar = 1 length = round(float(length)) - os.chdir(f"{base_dir}/tmp/{folder_id}") - i = 0 - if (meta.get('ffdebug', False)): - loglevel = 'verbose' - else: - loglevel = 'quiet' - - retake = False - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - ss_times = [] - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens) - if manual_frames: - if isinstance(manual_frames, str): - manual_frames = [frame.strip() for frame in manual_frames.split(',') if frame.strip().isdigit()] - elif isinstance(manual_frames, list): - manual_frames = [frame for frame in manual_frames if isinstance(frame, int) or frame.isdigit()] - - # Convert to integers - manual_frames = [int(frame) for frame in manual_frames] - ss_times = [frame / frame_rate for frame in manual_frames] - - # If not enough manual frames, fill in with random frames - if len(ss_times) < num_screens: - console.print(f"[yellow]Not enough manual frames provided. Using random frames for remaining {num_screens - len(ss_times)} screenshots.") - random_times = self.valid_ss_time(ss_times, num_screens - len(ss_times), length) - ss_times.extend(random_times) - - else: - # No manual frames provided, generate random times - # console.print("[yellow]No manual frames provided. Generating random frames.") - ss_times = self.valid_ss_time(ss_times, num_screens, length) - - for i in range(num_screens): - image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image_path) or retake is not False: - retake = False - try: - # console.print(f"Taking screenshot at time (s): {ss_times[i]}") - ff = ffmpeg.input(path, ss=ss_times[i]) - - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - - # console.print(f"Saving screenshot to {image_path}") - ( - ff - .output(image_path, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run() - ) + if (meta.get('ffdebug', False)): + loglevel = 'verbose' + else: + loglevel = 'quiet' - except Exception as e: - console.print(f"[red]Error during screenshot capture: {e}") - sys.exit(1) + os.chdir(f"{base_dir}/tmp/{folder_id}") - self.optimize_images(image_path) - if not manual_frames: - if os.path.getsize(Path(image_path)) <= 75000: - console.print("[yellow]Image is incredibly small, retaking") - retake = True - time.sleep(1) - if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: - i += 1 - elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake is False: - i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and retake is False: - i += 1 - elif self.img_host == "freeimage.host": - console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") - exit() - elif retake is True: - pass - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) + if manual_frames: + manual_frames = [int(frame) for frame in manual_frames] + ss_times = [frame / frame_rate for frame in manual_frames] - progress.advance(screen_task) + if len(ss_times) < num_screens: + random_times = self.valid_ss_time(ss_times, num_screens - len(ss_times), length) + ss_times.extend(random_times) + else: + ss_times = self.valid_ss_time([], num_screens, length) + + # Prepare tasks for screenshot capture + capture_tasks = [] + for i in range(num_screens): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) + + # Capture screenshots in parallel with feedback + with Pool(processes=num_screens) as pool: + capture_results = list( + tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=num_screens, desc="Capturing Screenshots") + ) + + # Filter out errors and prepare optimization tasks + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + + # Optimize images in parallel with feedback + with Pool(processes=len(optimize_tasks)) as pool: + optimize_results = list( + tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images") + ) + + for image_path in optimize_results: + if "Error" not in image_path: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + else: + console.print(f"[red]{image_path}") - new_images = glob.glob(f"{filename}-*.png") - for image in new_images: - img_dict = { - 'img_url': image, - 'raw_url': image, - 'web_url': image - } - meta['image_list'].append(img_dict) - - if len(meta['image_list']) > self.screens: - local_images = [img for img in meta['image_list'] if not img['img_url'].startswith('http')] - if local_images: - smallest = min(local_images, key=lambda x: os.path.getsize(x['img_url'])) - os.remove(smallest['img_url']) - meta['image_list'].remove(smallest) - else: - console.print("[yellow]No local images found to remove.") + if meta['debug']: + finish_time = time.time() + print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): if manual_frames: @@ -1656,17 +1610,40 @@ def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): return ss_times - def optimize_images(self, image): - if self.config['DEFAULT'].get('optimize_images', True) is True: - if self.config['DEFAULT'].get('shared_seedbox', True) is True: - # Get number of CPU cores - num_cores = multiprocessing.cpu_count() - # Limit the number of threads based on half available cores - max_threads = num_cores // 2 - # Set cores for oxipng usage - os.environ['RAYON_NUM_THREADS'] = str(max_threads) - if os.path.exists(image): - try: + def capture_screenshot(self, args): + path, ss_time, image_path, width, height, w_sar, h_sar, loglevel = args + try: + ff = ffmpeg.input(path, ss=ss_time) + + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + ( + ff + .output(image_path, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run() + ) + return image_path + except Exception as e: + return f"Error: {e}" + + def optimize_image_task(self, args): + image, config = args + try: + # Extract shared_seedbox and optimize_images from config + optimize_images = config['DEFAULT'].get('optimize_images', True) + shared_seedbox = config['DEFAULT'].get('shared_seedbox', True) + + if optimize_images: + if shared_seedbox: + # Limit the number of threads for oxipng + num_cores = multiprocessing.cpu_count() + max_threads = num_cores // 2 + os.environ['RAYON_NUM_THREADS'] = str(max_threads) + + if os.path.exists(image): pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: import oxipng @@ -1674,9 +1651,9 @@ def optimize_images(self, image): oxipng.optimize(image, level=6) else: oxipng.optimize(image, level=3) - except (KeyboardInterrupt, Exception): - sys.exit(1) - return + return image # Return image path if successful + except (KeyboardInterrupt, Exception) as e: + return f"Error: {e}" # Return error message """ Get type and category From fd1f93e4c6576d9b177862f39d94f226b6b95e7b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 18:21:49 +1000 Subject: [PATCH 551/741] Multiprocess uploads --- src/prep.py | 390 ++++++++++++++++++++++++---------------------------- upload.py | 6 +- 2 files changed, 178 insertions(+), 218 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6eae6076e..3e5ba147b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,6 +17,8 @@ import traceback from src.discparse import DiscParse import multiprocessing + from multiprocessing import Pool + from tqdm import tqdm import os import re import math @@ -2807,240 +2809,202 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): """ Upload Screenshots """ + def upload_image_task(self, args): + image, img_host, config, meta = args + try: + timeout = 10 # Default timeout + img_url, raw_url, web_url = None, None, None + + if img_host == "imgbox": + loop = asyncio.get_event_loop() + image_list = loop.run_until_complete( + self.imgbox_upload(os.getcwd(), [image], {}, {}) + ) + if image_list and all( + 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list + ): + img_url = image_list[0]['img_url'] + raw_url = image_list[0]['raw_url'] + web_url = image_list[0]['web_url'] + + elif img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post( + "https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout + ) + response_data = response.json() + if response_data: + code = response_data[0]['code'] + ext = response_data[0]['ext'] + img_url = f"https://ptpimg.me/{code}.{ext}" + raw_url = img_url + web_url = img_url + + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response_data = response.json() + img_url = response_data['data']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response_data['data']['image']['url'] + raw_url = img_url + web_url = img_url + + elif img_host == "oeimg": + url = "https://imgoe.download/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['oeimg_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response_data['data']['image']['url'] + raw_url = img_url + web_url = response_data['data']['url_viewer'] + + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350 + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')) + } + response = requests.post(url, data=data, files=files, timeout=timeout) + response_data = response.json() + if response.status_code == 200: + raw_url = response_data['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response_data['th_url'] + web_url = response_data['show_url'] + + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['lensdump_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response_data['data']['image']['url'] + raw_url = img_url + web_url = response_data['data']['url_viewer'] + + if img_url and raw_url and web_url: + return { + 'status': 'success', + 'img_url': img_url, + 'raw_url': raw_url, + 'web_url': web_url, + 'local_file_path': image + } + else: + return { + 'status': 'failed', + 'reason': f"Failed to upload image to {img_host}. No URLs received." + } + + except Exception as e: + return { + 'status': 'failed', + 'reason': str(e) + } + def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): + if meta['debug']: + upload_start_time = time.time() import nest_asyncio nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] using_custom_img_list = bool(custom_img_list) - - image_list = [] - successfully_uploaded = set() - initial_timeout = 10 + meta['image_list'] = list({image['img_url']: image for image in meta['image_list']}.values()) + existing_urls = {image['img_url'] for image in meta['image_list']} if 'image_sizes' not in meta: meta['image_sizes'] = {} if custom_img_list: - image_glob = custom_img_list - existing_images = [] + image_glob = list(set(custom_img_list)) else: image_glob = glob.glob("*.png") if 'POSTER.png' in image_glob: image_glob.remove('POSTER.png') - existing_images = meta.get('image_list', []) - - if len(existing_images) >= total_screens and not retry_mode and img_host == initial_img_host: - console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") - return existing_images, total_screens - - while True: - remaining_images = [img for img in image_glob[-screens:] if img not in successfully_uploaded] - - with Progress( - TextColumn("[bold green]Uploading Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - upload_task = progress.add_task("[green]Uploading Screens...", total=len(remaining_images)) - console.print(f"[cyan]Uploading screens to {img_host}...") - - for image in remaining_images: - retry_count = 0 - upload_success = False - - image_path = os.path.normpath(os.path.join(os.getcwd(), image)) # noqa F841 + image_glob = list(set(image_glob)) - while retry_count < max_retries and not upload_success: - try: - timeout = initial_timeout - - if img_host == "imgbox": - try: - # console.print("[blue]Uploading images to imgbox...") - - # Use the current event loop to run imgbox_upload - loop = asyncio.get_event_loop() - - # Run the imgbox upload in the current event loop - image_list = loop.run_until_complete(self.imgbox_upload(os.getcwd(), image_glob, meta, return_dict)) # Pass all images - - # Ensure the image_list contains valid URLs before continuing - if image_list and all('img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list): - # console.print(f"[green]Successfully uploaded all images to imgbox.") - upload_success = True - - # Track the successfully uploaded images without appending again to image_list - for img in image_glob: - successfully_uploaded.add(img) # Track the uploaded images - - # Exit the loop after a successful upload - return image_list, i - - else: - console.print("[red]Imgbox upload failed, moving to the next image host.") - img_host_num += 1 - next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') - console.print(f"[blue]Moving to next image host: {next_img_host}.") - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i - - except Exception as e: - console.print(f"[yellow]Failed to upload images to imgbox. Exception: {str(e)}") - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i - - elif img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - upload_success = True - - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response['data']['image']['url'] - web_url = response['data']['url_viewer'] - upload_success = True - - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - upload_success = True - - elif img_host == "oeimg": - url = "https://imgoe.download/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['oeimg_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]OEimg failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = response['data']['image']['url'] - web_url = response['data']['url_viewer'] - upload_success = True - - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - break - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - upload_success = True - - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") - break - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - upload_success = True - - # Only increment `i` after a successful upload - if upload_success: - image_size = os.path.getsize(image) # Get the image size in bytes - image_dict = { - 'img_url': img_url, - 'raw_url': raw_url, - 'web_url': web_url - } - image_list.append(image_dict) - successfully_uploaded.add(image) + if len(meta['image_list']) >= total_screens and not retry_mode and img_host == initial_img_host: + console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(meta['image_list'])}, Required: {total_screens}") + return meta['image_list'], total_screens - # Store size in meta, indexed by the img_url - # Storing image_sizes for any multi disc/files will probably break something, so lets not do that. - if not using_custom_img_list: - meta['image_sizes'][img_url] = image_size + upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[-screens:]] - progress.advance(upload_task) - i += 1 - return_dict['image_list'] = image_list - break + with Pool(processes=min(len(upload_tasks), os.cpu_count())) as pool: + results = list( + tqdm( + pool.imap_unordered(self.upload_image_task, upload_tasks), + total=len(upload_tasks), + desc="Uploading Images" + ) + ) - except Exception as e: - retry_count += 1 - console.print(f"[yellow]Failed to upload {image} to {img_host}. Attempt {retry_count}/{max_retries}. Exception: {str(e)}") - - if retry_count >= max_retries: - next_img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num + 1}', 'No more hosts') - console.print(f"[red]Max retries reached for {img_host}. Moving to next image host: {next_img_host}.") - img_host_num += 1 - img_host = self.config['DEFAULT'].get(f'img_host_{img_host_num}') - if not img_host: - console.print("[red]All image hosts failed. Unable to complete uploads.") - return image_list, i - break - - if img_host_num > 1 and not upload_success: - continue - else: - break + successfully_uploaded = [] + for result in results: + if result['status'] == 'success': + successfully_uploaded.append(result) + else: + console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") - return image_list, i + for upload in successfully_uploaded: + img_url = upload['img_url'] + if img_url not in existing_urls: + if meta['debug']: + console.print(f"[blue]Adding {img_url} to image_list") + meta['image_list'].append({ + 'img_url': img_url, + 'raw_url': upload['raw_url'], + 'web_url': upload['web_url'] + }) + existing_urls.add(img_url) + if not using_custom_img_list: + local_file_path = upload.get('local_file_path') + if local_file_path: + image_size = os.path.getsize(local_file_path) + meta['image_sizes'][img_url] = image_size + + console.print(f"[green]Successfully uploaded {len(successfully_uploaded)} images.") + if meta['debug']: + upload_finish_time = time.time() + print(f"Screenshot uploads processed in {upload_finish_time - upload_start_time:.4f} seconds") + return meta['image_list'], len(successfully_uploaded) async def imgbox_upload(self, chdir, image_glob, meta, return_dict): try: diff --git a/upload.py b/upload.py index d7894ebeb..d34bc4d06 100644 --- a/upload.py +++ b/upload.py @@ -256,18 +256,14 @@ async def process_meta(meta, base_dir): meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) if len(meta.get('image_list', [])) < 3 and meta.get('skip_imghost_upload', False) is False: - return_dict = {} if 'image_list' not in meta: meta['image_list'] = [] + return_dict = {} new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) - meta['image_list'].extend(new_images) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) - if meta.get('debug', False): - console.print(meta['image_list']) - elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: meta['image_list'] = [] From 4b45d3327bd281a4f2254000bdb32adcfa514379 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 20:29:01 +1000 Subject: [PATCH 552/741] Skip screens if already exist --- src/prep.py | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/src/prep.py b/src/prep.py index e59305f23..d0e963e48 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1554,37 +1554,39 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non else: ss_times = self.valid_ss_time([], num_screens, length) - # Prepare tasks for screenshot capture capture_tasks = [] for i in range(num_screens): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) - - # Capture screenshots in parallel with feedback - with Pool(processes=num_screens) as pool: - capture_results = list( - tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=num_screens, desc="Capturing Screenshots") - ) + if not os.path.exists(image_path) or meta.get('retake', False): + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) + else: + if meta['debug']: + console.print(f"[yellow]Skipping existing screenshot: {image_path}") - # Filter out errors and prepare optimization tasks - optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + if not capture_tasks: + console.print("[yellow]All screenshots already exist. Skipping capture process.") + else: + with Pool(processes=len(capture_tasks)) as pool: + capture_results = list( + tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=len(capture_tasks), desc="Capturing Screenshots") + ) - # Optimize images in parallel with feedback - with Pool(processes=len(optimize_tasks)) as pool: - optimize_results = list( - tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images") - ) + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + with Pool(processes=len(optimize_tasks)) as pool: + optimize_results = list( + tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images") + ) - for image_path in optimize_results: - if "Error" not in image_path: - img_dict = { - 'img_url': image_path, - 'raw_url': image_path, - 'web_url': image_path - } - meta['image_list'].append(img_dict) - else: - console.print(f"[red]{image_path}") + for image_path in optimize_results: + if "Error" not in image_path: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + else: + console.print(f"[red]{image_path}") if meta['debug']: finish_time = time.time() From b53ba0069301070c3fb8ca5507ccc5be73303262 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 22:40:59 +1000 Subject: [PATCH 553/741] imgbox and option to limit concurrent uploads --- src/prep.py | 153 +++++++++++++++++++++++++++++----------------------- 1 file changed, 85 insertions(+), 68 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3e5ba147b..65386e8b6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2816,16 +2816,28 @@ def upload_image_task(self, args): img_url, raw_url, web_url = None, None, None if img_host == "imgbox": - loop = asyncio.get_event_loop() - image_list = loop.run_until_complete( - self.imgbox_upload(os.getcwd(), [image], {}, {}) - ) - if image_list and all( - 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list - ): - img_url = image_list[0]['img_url'] - raw_url = image_list[0]['raw_url'] - web_url = image_list[0]['web_url'] + try: + # Call the asynchronous imgbox_upload function + loop = asyncio.get_event_loop() + image_list = loop.run_until_complete( + self.imgbox_upload(os.getcwd(), [image], meta, return_dict={}) + ) + if image_list and all( + 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list + ): + img_url = image_list[0]['img_url'] + raw_url = image_list[0]['raw_url'] + web_url = image_list[0]['web_url'] + else: + return { + 'status': 'failed', + 'reason': "Imgbox upload failed. No valid URLs returned." + } + except Exception as e: + return { + 'status': 'failed', + 'reason': f"Error during Imgbox upload: {str(e)}" + } elif img_host == "ptpimg": payload = { @@ -2947,34 +2959,44 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] using_custom_img_list = bool(custom_img_list) - meta['image_list'] = list({image['img_url']: image for image in meta['image_list']}.values()) - existing_urls = {image['img_url'] for image in meta['image_list']} if 'image_sizes' not in meta: meta['image_sizes'] = {} - if custom_img_list: - image_glob = list(set(custom_img_list)) - else: - image_glob = glob.glob("*.png") - if 'POSTER.png' in image_glob: - image_glob.remove('POSTER.png') - image_glob = list(set(image_glob)) + image_glob = list(set(custom_img_list)) if using_custom_img_list else glob.glob("*.png") + if 'POSTER.png' in image_glob: + image_glob.remove('POSTER.png') + image_glob = list(set(image_glob)) if len(meta['image_list']) >= total_screens and not retry_mode and img_host == initial_img_host: console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(meta['image_list'])}, Required: {total_screens}") return meta['image_list'], total_screens + # Define host-specific limits + host_limits = { + "imgbox": 6, + # Other hosts can use the default pool size + } + + default_pool_size = os.cpu_count() + pool_size = host_limits.get(img_host, default_pool_size) + upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[-screens:]] - with Pool(processes=min(len(upload_tasks), os.cpu_count())) as pool: - results = list( - tqdm( - pool.imap_unordered(self.upload_image_task, upload_tasks), - total=len(upload_tasks), - desc="Uploading Images" + try: + with Pool(processes=min(len(upload_tasks), pool_size)) as pool: + results = list( + tqdm( + pool.imap_unordered(self.upload_image_task, upload_tasks), + total=len(upload_tasks), + desc=f"Uploading Images to {img_host}" + ) ) - ) + except KeyboardInterrupt: + console.print("[red]Upload process interrupted by user. Exiting...") + pool.terminate() + pool.join() + return meta['image_list'], len(meta['image_list']) successfully_uploaded = [] for result in results: @@ -2983,27 +3005,32 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i else: console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") + new_images = [] for upload in successfully_uploaded: - img_url = upload['img_url'] - if img_url not in existing_urls: + raw_url = upload['raw_url'] + new_image = { + 'img_url': upload['img_url'], + 'raw_url': raw_url, + 'web_url': upload['web_url'] + } + new_images.append(new_image) + if not using_custom_img_list and raw_url not in {img['raw_url'] for img in meta['image_list']}: if meta['debug']: - console.print(f"[blue]Adding {img_url} to image_list") - meta['image_list'].append({ - 'img_url': img_url, - 'raw_url': upload['raw_url'], - 'web_url': upload['web_url'] - }) - existing_urls.add(img_url) - if not using_custom_img_list: - local_file_path = upload.get('local_file_path') - if local_file_path: - image_size = os.path.getsize(local_file_path) - meta['image_sizes'][img_url] = image_size - - console.print(f"[green]Successfully uploaded {len(successfully_uploaded)} images.") + console.print(f"[blue]Adding {raw_url} to image_list") + meta['image_list'].append(new_image) + local_file_path = upload.get('local_file_path') + if local_file_path: + image_size = os.path.getsize(local_file_path) + meta['image_sizes'][raw_url] = image_size + + console.print(f"[green]Successfully uploaded {len(new_images)} images.") if meta['debug']: upload_finish_time = time.time() print(f"Screenshot uploads processed in {upload_finish_time - upload_start_time:.4f} seconds") + + if using_custom_img_list: + return new_images, len(new_images) + return meta['image_list'], len(successfully_uploaded) async def imgbox_upload(self, chdir, image_glob, meta, return_dict): @@ -3011,44 +3038,34 @@ async def imgbox_upload(self, chdir, image_glob, meta, return_dict): os.chdir(chdir) image_list = [] - console.print(f"[debug] Starting upload of {len(image_glob)} images to imgbox...") async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: for image in image_glob: try: async for submission in gallery.add([image]): if not submission['success']: - console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") - return [] # Return empty list in case of failure + console.print(f"[red]Error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") else: - # Add the uploaded image info to image_list - image_dict = { - 'web_url': submission['web_url'], - 'img_url': submission['thumbnail_url'], - 'raw_url': submission['image_url'] - } - image_list.append(image_dict) - - console.print(f"[green]Successfully uploaded image: {image}") - + web_url = submission.get('web_url') + img_url = submission.get('thumbnail_url') + raw_url = submission.get('image_url') + if web_url and img_url and raw_url: + image_dict = { + 'web_url': web_url, + 'img_url': img_url, + 'raw_url': raw_url + } + image_list.append(image_dict) + else: + console.print(f"[red]Incomplete URLs received for image: {image}") except Exception as e: console.print(f"[red]Error during upload for {image}: {str(e)}") - return [] # Return empty list in case of error - - # After uploading all images, validate URLs and get sizes - valid_images = await self.check_images_concurrently(image_list, meta) - if valid_images: - console.print(f"[yellow]Successfully uploaded and validated {len(valid_images)} images.") - return_dict['image_list'] = valid_images # Set the validated images in return_dict - else: - console.print("[red]Failed to validate any images.") - return [] # Return empty list if no valid images - - return valid_images # Return the valid image list after validation + return_dict['image_list'] = image_list + return image_list except Exception as e: console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") - return [] # Return empty list in case of an unexpected failure + return [] async def get_name(self, meta): type = meta.get('type', "") From fb58ee008013e7d18a57980f981921e9064afc79 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 22:43:52 +1000 Subject: [PATCH 554/741] MTV - update to work with new image changes --- src/trackers/MTV.py | 69 ++++++++------------------------------------- 1 file changed, 12 insertions(+), 57 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 667edca47..189abda5d 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -42,91 +42,51 @@ def __init__(self, config): async def upload(self, meta, disctype): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") - - # Initiate the upload with retry logic await self.upload_with_retry(meta, cookiefile, common) async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] - total_size_limit = 25 * 1024 * 1024 # 25 MiB in bytes images_reuploaded = False - valid_images = [] - - # Helper function to calculate total size of the images - def calculate_total_size(image_list, image_sizes): - total_size = 0 - for image in image_list: - img_url = image['raw_url'] - size = image_sizes.get(img_url, 0) # Get size from meta['image_sizes'], default to 0 if not found - total_size += size - return total_size - - # Helper function to remove images until the total size is under the limit - def enforce_size_limit(image_list, image_sizes): - total_size = calculate_total_size(image_list, image_sizes) - - for image in image_list: - if total_size <= total_size_limit: - valid_images.append(image) - else: - img_url = image['raw_url'] - size = image_sizes.get(img_url, 0) - total_size -= size # Subtract size of the removed image - console.print(f"[red]Removed {img_url} to stay within the 25 MiB limit.") - - return valid_images - image_list = meta['image_list'] - # Check if the images are already hosted on an approved image host + if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") - # Enforce the total size limit on the existing image list - image_list = enforce_size_limit(image_list, meta['image_sizes']) + image_list = meta['image_list'] else: - # Proceed with the retry logic if images are not hosted on an approved image host images_reuploaded = False while img_host_index <= len(approved_image_hosts): - # Call handle_image_upload and pass the updated meta with the current image host index image_list, retry_mode, images_reuploaded = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) - # If retry_mode is True, switch to the next host if retry_mode: console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") img_host_index += 1 continue - # If we successfully uploaded images, enforce the size limit and break out of the loop if image_list is not None: - # Enforce the total size limit on the newly uploaded images - image_list = enforce_size_limit(image_list, meta['image_sizes']) + image_list = meta['new_images_key'] break if image_list is None: console.print("[red]All image hosts failed. Please check your configuration.") return - # Proceed with the rest of the upload process torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" if not os.path.exists(torrent_file_path): torrent_filename = "BASE" torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" torrent = Torrent.read(torrent_path) - if torrent.piece_size > 8388608: # 8 MiB in bytes + if torrent.piece_size > 8388608: console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - # Override the max_piece_size to 8 MiB - meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit - - # Determine include and exclude patterns based on whether it's a disc or not + meta['max_piece_size'] = '8' if meta['is_disc']: - include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list - exclude = [] # Adjust as needed for disc-specific exclusions, make sure it's a list + include = [] + exclude = [] else: include = ["*.mkv", "*.mp4", "*.ts"] exclude = ["*.*", "*sample.mkv", "!sample*.*"] - # Create a new torrent with piece size explicitly set to 8 MiB from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( @@ -142,7 +102,6 @@ def enforce_size_limit(image_list, image_sizes): created_by="L4G's Upload Assistant" ) - # Validate and write the new torrent new_torrent.piece_size = 8 * 1024 * 1024 new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) @@ -157,9 +116,7 @@ def enforce_size_limit(image_list, image_sizes): source_id = await self.get_source_id(meta) origin_id = await self.get_origin_id(meta) des_tags = await self.get_tags(meta) - - # Edit description and other details - await self.edit_desc(meta, images_reuploaded, valid_images) + await self.edit_desc(meta, images_reuploaded, image_list) group_desc = await self.edit_group_desc(meta) mtv_name = await self.edit_name(meta) @@ -312,7 +269,9 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if uploaded_images: meta[new_images_key] = uploaded_images - + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): console.print("[red]Unsupported image host detected, please use one of the approved image hosts") return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts @@ -343,11 +302,7 @@ async def edit_desc(self, meta, images_reuploaded, valid_images): for image in images: raw_url = image['raw_url'] img_url = image['img_url'] - - if images_reuploaded: - desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]\n") - else: - desc.write(f"[img={raw_url}][/img]\n") + desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]\n") desc.write(f"\n\n{base}") desc.close() From d79afcf6622ac6087854bf5dc0d37fff11caa4bc Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 22:51:45 +1000 Subject: [PATCH 555/741] Fix host urls --- src/prep.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/prep.py b/src/prep.py index 65386e8b6..59cc7977d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2865,7 +2865,7 @@ def upload_image_task(self, args): } response = requests.post(url, data=data, timeout=timeout) response_data = response.json() - img_url = response_data['data']['url'] + img_url = response['data'].get('medium', response['data']['image'])['url'] raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] @@ -2880,9 +2880,9 @@ def upload_image_task(self, args): response = requests.post(url, data=data, headers=headers, timeout=timeout) response_data = response.json() if response_data.get('status_code') == 200: - img_url = response_data['data']['image']['url'] - raw_url = img_url - web_url = img_url + img_url = response['data'].get('medium', response['data']['image'])['url'] + raw_url = response['data']['image']['url'] + web_url = response['data']['url_viewer'] elif img_host == "oeimg": url = "https://imgoe.download/api/1/upload" @@ -2895,8 +2895,8 @@ def upload_image_task(self, args): response = requests.post(url, data=data, headers=headers, timeout=timeout) response_data = response.json() if response_data.get('status_code') == 200: - img_url = response_data['data']['image']['url'] - raw_url = img_url + img_url = response['data']['image']['url'] + raw_url = response['data']['image']['url'] web_url = response_data['data']['url_viewer'] elif img_host == "pixhost": @@ -2926,8 +2926,8 @@ def upload_image_task(self, args): response = requests.post(url, data=data, headers=headers, timeout=timeout) response_data = response.json() if response_data.get('status_code') == 200: - img_url = response_data['data']['image']['url'] - raw_url = img_url + img_url = response['data'].get('medium', response['data']['image'])['url'] + raw_url = response['data']['image']['url'] web_url = response_data['data']['url_viewer'] if img_url and raw_url and web_url: From af495947b841d4909440aac330d97e1ca40b6f52 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 23:00:23 +1000 Subject: [PATCH 556/741] Only uploaded needed images to hit required amount --- src/prep.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index 59cc7977d..afabd2a03 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2967,22 +2967,24 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i if 'POSTER.png' in image_glob: image_glob.remove('POSTER.png') image_glob = list(set(image_glob)) + existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] + existing_count = len(existing_images) + images_needed = max(0, total_screens - existing_count) - if len(meta['image_list']) >= total_screens and not retry_mode and img_host == initial_img_host: - console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(meta['image_list'])}, Required: {total_screens}") + if existing_count >= total_screens and not retry_mode and img_host == initial_img_host: + console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") return meta['image_list'], total_screens + upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[:images_needed]] + # Define host-specific limits host_limits = { "imgbox": 6, # Other hosts can use the default pool size } - default_pool_size = os.cpu_count() pool_size = host_limits.get(img_host, default_pool_size) - upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[-screens:]] - try: with Pool(processes=min(len(upload_tasks), pool_size)) as pool: results = list( @@ -3033,6 +3035,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i return meta['image_list'], len(successfully_uploaded) + async def imgbox_upload(self, chdir, image_glob, meta, return_dict): try: os.chdir(chdir) From f1423551783dcf6633bf1d3cc1dc734e4d8f0164 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 1 Dec 2024 23:08:08 +1000 Subject: [PATCH 557/741] lint --- src/prep.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index f034caaab..c1d9a2157 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3012,7 +3012,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i return meta['image_list'], len(successfully_uploaded) - async def imgbox_upload(self, chdir, image_glob, meta, return_dict): try: os.chdir(chdir) From 7eab505a6872774bfe80228d02d0d80b635d94df Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 06:36:26 +1000 Subject: [PATCH 558/741] Fix when description_text is none --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index c1d9a2157..d77b9cb0f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -546,6 +546,8 @@ async def gather_prep(self, meta, mode): meta['description'] = "" description_text = meta.get('description', '') + if description_text is None: + description_text = "" with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write(description_text) From 178337a549d8553bc1fb17ad03648474a9747bb6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 06:52:13 +1000 Subject: [PATCH 559/741] DVD multi processing --- src/prep.py | 244 ++++++++++++++++++++-------------------------------- 1 file changed, 94 insertions(+), 150 deletions(-) diff --git a/src/prep.py b/src/prep.py index d77b9cb0f..f1880fc9b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1310,25 +1310,27 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): num_screens = self.screens if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): return + + if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: + i = num_screens + console.print('[bold green]Reusing screenshots') + return + ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": if isinstance(track.duration, str): - # If the duration is a string, split and find the longest duration durations = [float(d) for d in track.duration.split(' / ')] - length = max(durations) / 1000 # Use the longest duration + length = max(durations) / 1000 else: - # If the duration is already an int or float, use it directly length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds - # Proceed as usual for other fields par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) height = float(track.height) if par < 1: - # multiply that dar by the height and then do a simple width / height new_height = dar * height sar = width / new_height w_sar = 1 @@ -1338,168 +1340,110 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): w_sar = sar h_sar = 1 - main_set_length = len(meta['discs'][disc_num]['main_set']) - if main_set_length >= 3: - main_set = meta['discs'][disc_num]['main_set'][1:-1] - elif main_set_length == 2: - main_set = meta['discs'][disc_num]['main_set'][1:] - elif main_set_length == 1: - main_set = meta['discs'][disc_num]['main_set'] - n = 0 - os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - i = 0 - if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') - else: - if (meta.get('ffdebug', False)): - loglevel = 'verbose' - else: - loglevel = 'quiet' - looped = 0 - retake = False - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - if not os.path.exists(image) or retake: - retake = False - - def _is_vob_good(n, loops, num_screens): - max_loops = 6 - fallback_duration = 300 - voblength = fallback_duration - - while loops < max_loops: - try: - vob_mi = MediaInfo.parse( - f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", - output='JSON' - ) - vob_mi = json.loads(vob_mi) - if meta['debug']: - console.print("[yellow]Analyzing VOB file:[/yellow]", main_set[n]) - - for track in vob_mi.get('media', {}).get('track', []): - duration = track.get('Duration') - width = track.get('Width') - height = track.get('Height') - if meta['debug']: - console.print(f"Track {n}: Duration={duration}, Width={width}, Height={height}") - - if duration and width and height: - if float(width) > 0 and float(height) > 0: - voblength = float(duration) - if meta['debug']: - console.print(f"[green]Valid track found: voblength={voblength}, n={n}[/green]") - return voblength, n - - except Exception as e: - console.print(f"[red]Error parsing VOB {n}: {e}") - - n = (n + 1) % len(main_set) - if n >= num_screens: - n -= num_screens - loops += 1 - if meta['debug']: - console.print(f"[yellow]Retrying: loops={loops}, current voblength={voblength}[/yellow]") - if meta['debug']: - console.print(f"[red]Fallback triggered: returning fallback_duration={fallback_duration}[/red]") - return fallback_duration, n + def _is_vob_good(n, loops, num_screens): + max_loops = 6 + fallback_duration = 300 + voblength = fallback_duration - try: - voblength, n = _is_vob_good(n, 0, num_screens) - ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) + while loops < max_loops: + try: + vob_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + output='JSON' + ) + vob_mi = json.loads(vob_mi) + if meta['debug']: + console.print("[yellow]Analyzing VOB file:[/yellow]", main_set[n]) + + for track in vob_mi.get('media', {}).get('track', []): + duration = track.get('Duration') + width = track.get('Width') + height = track.get('Height') + if meta['debug']: + console.print(f"Track {n}: Duration={duration}, Width={width}, Height={height}") - if ss_times[i] < 0 or ss_times[i] > voblength: - raise ValueError(f"Invalid seek time: {ss_times[i]} for video length {voblength}") + if duration and width and height: + if float(width) > 0 and float(height) > 0: + voblength = float(duration) + if meta['debug']: + console.print(f"[green]Valid track found: voblength={voblength}, n={n}[/green]") + return voblength, n - input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}" - if not os.path.exists(input_file): - console.print(f"[red]Missing input file: {input_file}") - retake = True - continue + except Exception as e: + console.print(f"[red]Error parsing VOB {n}: {e}") - # Run FFmpeg with timeout - try: - ff = ffmpeg.input(input_file, ss=ss_times[i]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - - ff.output( - image, - vframes=1, - pix_fmt="rgb24" - ).overwrite_output().global_args('-loglevel', loglevel).run() - - if not os.path.exists(image): - if meta['debug']: - console.print(f"[red]Image not created: {image}, retaking...") - retake = True - continue - - except ffmpeg.Error as e: - console.print(f"[red]FFmpeg error: {e.stderr}") - retake = True - continue + n = (n + 1) % len(main_set) + if n >= num_screens: + n -= num_screens + loops += 1 + if meta['debug']: + console.print(f"[yellow]Retrying: loops={loops}, current voblength={voblength}[/yellow]") - except Exception as e: - console.print(f"[red]Error processing video file: {e}") - console.print(traceback.format_exc()) - retake = True - continue + if meta['debug']: + console.print(f"[red]Fallback triggered: returning fallback_duration={fallback_duration}[/red]") + return fallback_duration, n - self.optimize_images(image) - progress.update(screen_task, advance=1) - n += 1 - try: - file_size = os.path.getsize(image) - if self.img_host == "imgbb" and file_size <= 31000000: - i += 1 - elif self.img_host in ["imgbox", "pixhost"] and file_size <= 10000000: - i += 1 - elif file_size <= 75000: - console.print("[yellow]Image too small (likely a single color), retaking...") - retake = True - time.sleep(1) - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: - i += 1 - else: - console.print("[red]Image too large for image host, retaking...") - retake = True - time.sleep(1) + main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] + os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - looped = 0 + voblength, n = _is_vob_good(0, 0, num_screens) + ss_times = self.valid_ss_time([], num_screens + 1, voblength) + tasks = [] + for i in range(num_screens + 1): + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" + tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) - except Exception as e: - console.print(f"[red]Error validating image file: {e}") - looped += 1 - if looped >= 15: - console.print('[red]Failed to take screenshots after multiple attempts') - raise RuntimeError("Screenshot process failed") + with Pool(processes=min(num_screens + 1, os.cpu_count())) as pool: + results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots")) + if len(glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*")) > num_screens: smallest = None smallest_size = float('inf') for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) - screen_size = os.path.getsize(screen_path) - if screen_size < smallest_size: - smallest_size = screen_size - smallest = screen_path + try: + screen_size = os.path.getsize(screen_path) + if screen_size < smallest_size: + smallest_size = screen_size + smallest = screen_path + except FileNotFoundError: + console.print(f"[red]File not found: {screen_path}[/red]") + continue if smallest: + console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") os.remove(smallest) + optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] + + with Pool(processes=min(len(optimize_tasks), os.cpu_count())) as pool: + optimize_results = list( # noqa F841 + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images" + ) + ) + + valid_results_count = len([r for r in results if r]) + console.print(f"[green]Successfully captured {valid_results_count - 1} screenshots.") + + def capture_dvd_screenshot(self, task): + input_file, image, seek_time, meta, width, height, w_sar, h_sar = task + if os.path.exists(image): + return image + try: + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + ff = ffmpeg.input(input_file, ss=seek_time) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel).run() + return image if os.path.exists(image) else None + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file}: {str(e)}") + return None + def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): if meta['debug']: start_time = time.time() From 99b9ee3da25dfa4c3a23a48c37c4daafb9dc636f Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 07:27:37 +1000 Subject: [PATCH 560/741] Add back image size checking for hosts And +1 then remove smallest --- src/prep.py | 92 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 66 insertions(+), 26 deletions(-) diff --git a/src/prep.py b/src/prep.py index f1880fc9b..9f1fef9f5 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1322,7 +1322,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): if track.track_type == "Video": if isinstance(track.duration, str): durations = [float(d) for d in track.duration.split(' / ')] - length = max(durations) / 1000 + length = max(durations) / 1000 # Use the longest duration else: length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds @@ -1385,7 +1385,6 @@ def _is_vob_good(n, loops, num_screens): main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - voblength, n = _is_vob_good(0, 0, num_screens) ss_times = self.valid_ss_time([], num_screens + 1, voblength) tasks = [] @@ -1408,11 +1407,12 @@ def _is_vob_good(n, loops, num_screens): smallest_size = screen_size smallest = screen_path except FileNotFoundError: - console.print(f"[red]File not found: {screen_path}[/red]") + console.print(f"[red]File not found: {screen_path}[/red]") # Handle potential edge cases continue if smallest: - console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") os.remove(smallest) optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] @@ -1483,7 +1483,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non h_sar = 1 length = round(float(length)) - if (meta.get('ffdebug', False)): + if meta.get('ffdebug', False): loglevel = 'verbose' else: loglevel = 'quiet' @@ -1498,10 +1498,10 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non random_times = self.valid_ss_time(ss_times, num_screens - len(ss_times), length) ss_times.extend(random_times) else: - ss_times = self.valid_ss_time([], num_screens, length) + ss_times = self.valid_ss_time([], num_screens + 1, length) capture_tasks = [] - for i in range(num_screens): + for i in range(num_screens + 1): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") if not os.path.exists(image_path) or meta.get('retake', False): capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) @@ -1512,31 +1512,71 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non if not capture_tasks: console.print("[yellow]All screenshots already exist. Skipping capture process.") else: + capture_results = [] with Pool(processes=len(capture_tasks)) as pool: - capture_results = list( - tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=len(capture_tasks), desc="Capturing Screenshots") - ) + for result in tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=len(capture_tasks), desc="Capturing Screenshots"): + capture_results.append(result) - optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] - with Pool(processes=len(optimize_tasks)) as pool: - optimize_results = list( - tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images") - ) + if len(capture_results) > num_screens: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) + + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + optimize_results = [] + with Pool(processes=len(optimize_tasks)) as pool: + for result in tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images"): + optimize_results.append(result) + + valid_results = [] + for image_path in optimize_results: + if "Error" in image_path: + console.print(f"[red]{image_path}") + continue + + retake = False + image_size = os.path.getsize(image_path) + if not manual_frames: + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + time.sleep(1) + elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: + pass + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: + pass + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: + pass + elif self.img_host == "freeimage.host": + console.print("[bold red]Support for freeimage.host has been removed. Please remove it from your config.") + exit() + elif not retake: + console.print("[red]Image too large for your image host, retaking.") + retake = True + time.sleep(1) + + if retake: + console.print(f"[yellow]Retaking screenshot for: {image_path}[/yellow]") + capture_tasks.append(image_path) + else: + valid_results.append(image_path) - for image_path in optimize_results: - if "Error" not in image_path: - img_dict = { - 'img_url': image_path, - 'raw_url': image_path, - 'web_url': image_path - } - meta['image_list'].append(img_dict) - else: - console.print(f"[red]{image_path}") + for image_path in valid_results: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + + valid_results_count = len(valid_results) + console.print(f"[green]Successfully captured {valid_results_count} screenshots.") if meta['debug']: finish_time = time.time() - print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") + console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): if manual_frames: From 3c086c782bd2fbdee93d6f20b6fb5c8d4c21c53a Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 08:22:17 +1000 Subject: [PATCH 561/741] Disc multi processing --- src/prep.py | 174 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 104 insertions(+), 70 deletions(-) diff --git a/src/prep.py b/src/prep.py index 9f1fef9f5..8ecad84de 100644 --- a/src/prep.py +++ b/src/prep.py @@ -48,7 +48,7 @@ from imdb import Cinemagoer import itertools import cli_ui - from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn + from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn # noqa F401 import platform import aiohttp from PIL import Image @@ -1210,11 +1210,9 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, if num_screens == 0 or len(image_list) >= num_screens: return - # Sanitize the filename sanitized_filename = self.sanitize_filename(filename) - - # Get longest m2ts length = 0 + file = None for each in bdinfo['files']: int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) if int_length > length: @@ -1222,80 +1220,116 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, for root, dirs, files in os.walk(bdinfo['path']): for name in files: if name.lower() == each['file'].lower(): - file = f"{root}/{name}" + file = os.path.join(root, name) - if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "": - keyframe = 'nokey' - else: - keyframe = 'none' + keyframe = 'nokey' if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "" else 'none' os.chdir(f"{base_dir}/tmp/{folder_id}") - i = len(glob.glob(f"{sanitized_filename}-*.png")) - if i >= num_screens: - i = num_screens + existing_screens = glob.glob(f"{sanitized_filename}-*.png") + if len(existing_screens) >= num_screens: console.print('[bold green]Reusing screenshots') + return + + console.print("[bold yellow]Saving Screens...") + + if use_vs: + from src.vs import vs_screengn + vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: - console.print("[bold yellow]Saving Screens...") - if use_vs is True: - from src.vs import vs_screengn - vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + if meta.get('ffdebug', False): + loglevel = 'verbose' else: - if (meta.get('ffdebug', False)): - loglevel = 'verbose' + loglevel = 'quiet' + + ss_times = self.valid_ss_time([], num_screens + 1, length) + capture_tasks = [ + ( + file, + ss_times[i], + os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{i}.png"), + keyframe, + loglevel + ) + for i in range(num_screens + 1) + ] + + with Pool(processes=min(len(capture_tasks), os.cpu_count())) as pool: + capture_results = list( + tqdm( + pool.imap_unordered(self.capture_disc_task, capture_tasks), + total=len(capture_tasks), + desc="Capturing Screenshots" + ) + ) + + if len(capture_results) > num_screens: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) + + optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] + with Pool(processes=min(len(optimize_tasks), os.cpu_count())) as pool: + optimized_results = list( + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images" + ) + ) + + valid_results = [] + for image_path in optimized_results: + retake = False + if not os.path.exists(image_path): + continue + + image_size = os.path.getsize(image_path) + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + elif image_size <= 31000000 and self.img_host == "imgbb": + pass + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + pass + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + pass else: - loglevel = 'quiet' - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - image = f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{i}.png" - try: - ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) - ( - ffmpeg - .input(file, ss=ss_times[i], skip_frame=keyframe) - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run() - ) - except Exception: - console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - time.sleep(1) - progress.advance(screen_task) - - # Remove the smallest image - smallest = None - smallestsize = float('inf') - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{sanitized_filename}-*"): - screen_path = os.path.join(f"{base_dir}/tmp/{folder_id}/", screens) - screensize = os.path.getsize(screen_path) - if screensize < smallestsize: - smallestsize = screensize - smallest = screen_path + console.print("[red]Image too large for your image host, retaking.") + retake = True - if smallest is not None: - os.remove(smallest) + if retake: + console.print(f"[yellow]Retaking screenshot for: {image_path}[/yellow]") + capture_tasks.append((file, None, image_path, keyframe, loglevel)) + else: + valid_results.append(image_path) + + for image_path in valid_results: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + + console.print(f"[green]Successfully captured {len(meta['image_list'])} screenshots.") + + def capture_disc_task(self, task): + file, ss_time, image_path, keyframe, loglevel = task + try: + ( + ffmpeg + .input(file, ss=ss_time, skip_frame=keyframe) + .output(image_path, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run() + ) + return image_path + except Exception as e: + console.print(f"[red]Error capturing screenshot: {e}[/red]") + return None def dvd_screenshots(self, meta, disc_num, num_screens=None): if 'image_list' not in meta: From ae3c75123a52ab855b757bbc9d3577b956d5c417 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 16:43:40 +1000 Subject: [PATCH 562/741] HUNO - make discs work fixes https://github.com/Audionut/Upload-Assistant/issues/143 --- src/trackers/HUNO.py | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 623ed3f7c..aa8b0b97f 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -114,22 +114,23 @@ def get_audio(self, meta): if dual: language = "DUAL" else: - # Read the MEDIAINFO.txt file - media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" - with open(media_info_path, 'r', encoding='utf-8') as f: - media_info_text = f.read() - - # Extract the first audio section - first_audio_section = re.search(r'Audio\s+ID\s+:\s+2(.*?)\n\n', media_info_text, re.DOTALL) - if not first_audio_section: # Fallback in case of a different structure - first_audio_section = re.search(r'Audio(.*?)Text', media_info_text, re.DOTALL) - - if first_audio_section: - # Extract language information from the first audio track - language_match = re.search(r'Language\s*:\s*(.+)', first_audio_section.group(1)) - if language_match: - language = language_match.group(1).strip() - language = re.sub(r'\(.+\)', '', language) # Remove text in parentheses + if not meta['is_disc']: + # Read the MEDIAINFO.txt file + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + # Extract the first audio section + first_audio_section = re.search(r'Audio\s+ID\s+:\s+2(.*?)\n\n', media_info_text, re.DOTALL) + if not first_audio_section: # Fallback in case of a different structure + first_audio_section = re.search(r'Audio(.*?)Text', media_info_text, re.DOTALL) + + if first_audio_section: + # Extract language information from the first audio track + language_match = re.search(r'Language\s*:\s*(.+)', first_audio_section.group(1)) + if language_match: + language = language_match.group(1).strip() + language = re.sub(r'\(.+\)', '', language) # Remove text in parentheses # Handle special cases if language == "zxx": @@ -235,7 +236,7 @@ async def get_cat_id(self, category_name): async def get_type_id(self, meta): type = meta['type'] - video_encode = meta['video_encode'] + video_encode = meta.get('video_encode') if type == 'REMUX': return '2' From f0d6b2c47f16b2d61d5d8ee19736be6eb3943ead Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 19:54:40 +1000 Subject: [PATCH 563/741] MTV again --- src/prep.py | 11 +++++++++-- src/trackers/MTV.py | 21 +++++---------------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/prep.py b/src/prep.py index 8ecad84de..5fb543672 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2955,18 +2955,25 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] - using_custom_img_list = bool(custom_img_list) + using_custom_img_list = custom_img_list is not None if 'image_sizes' not in meta: meta['image_sizes'] = {} image_glob = list(set(custom_img_list)) if using_custom_img_list else glob.glob("*.png") + + # Exclude 'POSTER.png' from the list if 'POSTER.png' in image_glob: image_glob.remove('POSTER.png') + + # Ensure uniqueness in the image list image_glob = list(set(image_glob)) existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] existing_count = len(existing_images) - images_needed = max(0, total_screens - existing_count) + if not retry_mode: + images_needed = max(0, total_screens - existing_count) + else: + images_needed = total_screens if existing_count >= total_screens and not retry_mode and img_host == initial_img_host: console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 189abda5d..9d8cbe842 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -62,8 +62,9 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): img_host_index += 1 continue + new_images_key = f'mtv_images_key_{img_host_index}' if image_list is not None: - image_list = meta['new_images_key'] + image_list = meta[new_images_key] break if image_list is None: @@ -195,8 +196,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts folder_id = meta['uuid'] from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - if new_images_key not in meta: - meta[new_images_key] = [] + meta[new_images_key] = [] screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) all_screenshots = [] @@ -236,7 +236,6 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts all_screenshots.extend(existing_screens) if all_screenshots: - return_dict = {} while True: current_img_host_key = f'img_host_{img_host_index}' current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) @@ -254,18 +253,8 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts else: meta['imghost'] = current_img_host console.print(f"[green]Uploading to approved host '{current_img_host}'.") - break # Exit loop when a valid host is found - - uploaded_images, _ = prep.upload_screens( - meta, - screens=multi_screens, - img_host_num=img_host_index, - i=0, - total_screens=multi_screens, - custom_img_list=all_screenshots, - return_dict=return_dict, - retry_mode=retry_mode - ) + break + uploaded_images, _ = prep.upload_screens(meta, multi_screens, img_host_index, 0, multi_screens, all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode) if uploaded_images: meta[new_images_key] = uploaded_images From 0bff43072aba03f9b0d1972d7504910695c99d57 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 19:56:49 +1000 Subject: [PATCH 564/741] Revert to oxipng level 2 The compression benefit of 3 doesn't seem to outweigh the additional compute cost --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 5fb543672..b95fa864f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1672,7 +1672,7 @@ def optimize_image_task(self, args): if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: - oxipng.optimize(image, level=3) + oxipng.optimize(image, level=2) return image # Return image path if successful except (KeyboardInterrupt, Exception) as e: return f"Error: {e}" # Return error message From c02ace13e21cfc38ad89f656837080389d6bb707 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 20:43:24 +1000 Subject: [PATCH 565/741] reuploading when packs --- src/prep.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/prep.py b/src/prep.py index b95fa864f..ae04ca52c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2955,27 +2955,32 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] - using_custom_img_list = custom_img_list is not None + using_custom_img_list = isinstance(custom_img_list, list) and bool(custom_img_list) if 'image_sizes' not in meta: meta['image_sizes'] = {} - image_glob = list(set(custom_img_list)) if using_custom_img_list else glob.glob("*.png") + if using_custom_img_list: + image_glob = custom_img_list + existing_images = [] + existing_count = 0 + else: + image_glob = glob.glob("*.png") + if 'POSTER.png' in image_glob: + image_glob.remove('POSTER.png') + image_glob = list(set(image_glob)) + if meta['debug']: + console.print("image globs:", image_glob) - # Exclude 'POSTER.png' from the list - if 'POSTER.png' in image_glob: - image_glob.remove('POSTER.png') + existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] + existing_count = len(existing_images) - # Ensure uniqueness in the image list - image_glob = list(set(image_glob)) - existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] - existing_count = len(existing_images) if not retry_mode: images_needed = max(0, total_screens - existing_count) else: images_needed = total_screens - if existing_count >= total_screens and not retry_mode and img_host == initial_img_host: + if existing_count >= total_screens and not retry_mode and img_host == initial_img_host and not using_custom_img_list: console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") return meta['image_list'], total_screens @@ -2990,7 +2995,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i pool_size = host_limits.get(img_host, default_pool_size) try: - with Pool(processes=min(len(upload_tasks), pool_size)) as pool: + with Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: results = list( tqdm( pool.imap_unordered(self.upload_image_task, upload_tasks), From c1313d5cc0f51949ac63f2bea35e7d6feecbec5e Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 21:16:20 +1000 Subject: [PATCH 566/741] add tqdm to requirements --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 743fbd895..2797cdc55 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,4 @@ str2bool click aiohttp Pillow +tqdm From fee84b8aa9c6fa060d91f9d06578bdee4f07cd3f Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 21:16:45 +1000 Subject: [PATCH 567/741] HUNO - DVD remux naming fix --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index aa8b0b97f..07a4db262 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -214,7 +214,7 @@ async def get_name(self, meta): name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE elif type == "ENCODE": # Encode name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" # SOURCE From bb736fb7faa1e7b9e8ecadd0a63700936ec6f8e3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 22:11:41 +1000 Subject: [PATCH 568/741] HUNO - dvd remux fix fixes https://github.com/Audionut/Upload-Assistant/issues/146 --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 07a4db262..7a4c39952 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -196,7 +196,7 @@ async def get_name(self, meta): name = f"{title} ({year}) {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux name = f"{title} ({year}) {edition} (DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "ENCODE": # Encode name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" From b31ccac0dc309d011aa3c7505261b5cb0d051986 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 2 Dec 2024 22:46:48 +1000 Subject: [PATCH 569/741] HUNO add resolution to dvd remux --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 7a4c39952..d951e6dae 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -197,7 +197,7 @@ async def get_name(self, meta): elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux - name = f"{title} ({year}) {edition} (DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" + name = f"{title} ({year}) {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" elif type == "ENCODE": # Encode name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" elif type in ("WEBDL", "WEBRIP"): # WEB From cf785155953c2ed4562539c51b3b59111818b0d5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 11:44:58 +1000 Subject: [PATCH 570/741] Fix using wrong nfo data with scene auto nfo --- src/prep.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index ae04ca52c..e5e74a61f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1164,6 +1164,7 @@ def is_scene(self, video, meta, imdb=None): with open(nfo_file_path, 'wb') as f: f.write(nfo_response.content) meta['nfo'] = True + meta['auto_nfo'] = True console.print(f"[green]NFO downloaded to {nfo_file_path}") else: console.print("[yellow]NFO file not available for download.") @@ -3562,25 +3563,34 @@ def clean_text(text): uuid = meta['uuid'] current_dir_path = "*.nfo" specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") - + if meta['debug']: + console.print(f"specified_dir_path: {specified_dir_path}") if meta.get('nfo') and not content_written: - nfo_files = glob.glob(current_dir_path) - if not nfo_files: + if meta['auto_nfo'] is True: nfo_files = glob.glob(specified_dir_path) scene_nfo = True + else: + nfo_files = glob.glob(current_dir_path) + if meta['debug']: + console.print(f"Glob current_dir_path matches: {glob.glob(current_dir_path)}") + console.print(f"Glob specified_dir_path matches: {glob.glob(specified_dir_path)}") + if not nfo_files: + console.print("NFO was set but no nfo file was found") + description.write("\n") + return meta if nfo_files: - console.print("We found nfo") nfo = nfo_files[0] try: with open(nfo, 'r', encoding="utf-8") as nfo_file: nfo_content = nfo_file.read() - console.print("NFO content read with utf-8 encoding.") + if meta['debug']: + console.print("NFO content read with utf-8 encoding.") except UnicodeDecodeError: - console.print("utf-8 decoding failed, trying latin1.") + if meta['debug']: + console.print("utf-8 decoding failed, trying latin1.") with open(nfo, 'r', encoding="latin1") as nfo_file: nfo_content = nfo_file.read() - console.print("NFO content read with latin1 encoding.") if scene_nfo is True: description.write(f"[center][spoiler=Scene NFO:][code]{nfo_content}[/code][/spoiler][/center]\n") From 6898d2ecdcf8851ddaee9a5c3e3594e2dbe97f89 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 13:14:24 +1000 Subject: [PATCH 571/741] Add new config "cutoff screens" --- data/example-config.py | 5 +++++ src/prep.py | 35 ++++++++++++++++++++--------------- upload.py | 4 ++-- 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index b6342c583..557d83c0b 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,6 +25,11 @@ # Number of screenshots to capture "screens": "6", + # Number of cutoff screenshots + # If there are at least this many screenshots already, perhaps pulled from existing + # description, skip creating and uploading any further screenshots. + "cutoff_screens": "3", + # Providing the option to change the size of the screenshot thumbnails where supported. # Default is 350, ie [img=350] "thumbnail_size": "350", diff --git a/src/prep.py b/src/prep.py index e5e74a61f..491240d41 100644 --- a/src/prep.py +++ b/src/prep.py @@ -419,6 +419,7 @@ async def handle_image_list(self, meta, tracker_name): console.print(f"[green]Images retained from {tracker_name}.") async def gather_prep(self, meta, mode): + meta['cutoff'] = int(self.config['DEFAULT'].get('cutoff_screens', 3)) meta['mode'] = mode base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) meta['isdir'] = os.path.isdir(meta['path']) @@ -1202,8 +1203,8 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, meta['image_list'] = [] existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= 3 and not force_screenshots: - console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return if num_screens is None: @@ -1232,7 +1233,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, return console.print("[bold yellow]Saving Screens...") - + capture_results = [] if use_vs: from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") @@ -1337,8 +1338,8 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): meta['image_list'] = [] existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= 3: - console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + if len(existing_images) >= meta.get('cutoff'): + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return if num_screens is None: @@ -1487,8 +1488,8 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= 3 and not force_screenshots: - console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return if num_screens is None: @@ -1536,6 +1537,8 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non ss_times = self.valid_ss_time([], num_screens + 1, length) capture_tasks = [] + capture_results = [] + for i in range(num_screens + 1): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") if not os.path.exists(image_path) or meta.get('retake', False): @@ -1547,17 +1550,19 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non if not capture_tasks: console.print("[yellow]All screenshots already exist. Skipping capture process.") else: - capture_results = [] with Pool(processes=len(capture_tasks)) as pool: - for result in tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=len(capture_tasks), desc="Capturing Screenshots"): + for result in tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), + total=len(capture_tasks), + desc="Capturing Screenshots"): capture_results.append(result) - if len(capture_results) > num_screens: - smallest = min(capture_results, key=os.path.getsize) - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") - os.remove(smallest) - capture_results.remove(smallest) + if capture_results: + if len(capture_results) > num_screens: + smallest = min(capture_results, key=os.path.getsize) + if meta.get('debug', False): + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] optimize_results = [] diff --git a/upload.py b/upload.py index d34bc4d06..0fbd12981 100644 --- a/upload.py +++ b/upload.py @@ -254,8 +254,8 @@ async def process_meta(meta, base_dir): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if len(meta.get('image_list', [])) < 3 and meta.get('skip_imghost_upload', False) is False: + meta['cutoff'] = int(config['DEFAULT'].get('cutoff_screens', 3)) + if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: if 'image_list' not in meta: meta['image_list'] = [] return_dict = {} From d2d39d474ce1645fe2e147f1e091b8cfa07d5a17 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 13:17:37 +1000 Subject: [PATCH 572/741] remove smallest only if capture results --- src/prep.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index 491240d41..18230bb58 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1263,13 +1263,13 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, desc="Capturing Screenshots" ) ) - - if len(capture_results) > num_screens: - smallest = min(capture_results, key=os.path.getsize) - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") - os.remove(smallest) - capture_results.remove(smallest) + if capture_results: + if len(capture_results) > num_screens: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] with Pool(processes=min(len(optimize_tasks), os.cpu_count())) as pool: From 12e29df3268fd42e170462cee87e92753d020e9c Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 14:05:52 +1000 Subject: [PATCH 573/741] Add new config "task limit" --- data/example-config.py | 5 +++++ src/prep.py | 37 +++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 557d83c0b..fd55bf3ab 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -30,6 +30,11 @@ # description, skip creating and uploading any further screenshots. "cutoff_screens": "3", + # multi processing task limit + # When capturing/optimizing images, limit to this many concurrent tasks + # defaults to 'os.cpu_count()' + # "task_limit": "2", + # Providing the option to change the size of the screenshot thumbnails where supported. # Default is 350, ie [img=350] "thumbnail_size": "350", diff --git a/src/prep.py b/src/prep.py index 18230bb58..2511f0cbc 100644 --- a/src/prep.py +++ b/src/prep.py @@ -420,6 +420,9 @@ async def handle_image_list(self, meta, tracker_name): async def gather_prep(self, meta, mode): meta['cutoff'] = int(self.config['DEFAULT'].get('cutoff_screens', 3)) + task_limit = self.config['DEFAULT'].get('task_limit', "0") + if int(task_limit) > 0: + meta['task_limit'] = task_limit meta['mode'] = mode base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) meta['isdir'] = os.path.isdir(meta['path']) @@ -1234,6 +1237,8 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, console.print("[bold yellow]Saving Screens...") capture_results = [] + task_limit = int(meta.get('task_limit', os.cpu_count())) + if use_vs: from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") @@ -1255,7 +1260,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, for i in range(num_screens + 1) ] - with Pool(processes=min(len(capture_tasks), os.cpu_count())) as pool: + with Pool(processes=min(len(capture_tasks), task_limit)) as pool: capture_results = list( tqdm( pool.imap_unordered(self.capture_disc_task, capture_tasks), @@ -1272,7 +1277,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, capture_results.remove(smallest) optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] - with Pool(processes=min(len(optimize_tasks), os.cpu_count())) as pool: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: optimized_results = list( tqdm( pool.imap_unordered(self.optimize_image_task, optimize_tasks), @@ -1424,12 +1429,13 @@ def _is_vob_good(n, loops, num_screens): voblength, n = _is_vob_good(0, 0, num_screens) ss_times = self.valid_ss_time([], num_screens + 1, voblength) tasks = [] + task_limit = int(meta.get('task_limit', os.cpu_count())) for i in range(num_screens + 1): image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) - with Pool(processes=min(num_screens + 1, os.cpu_count())) as pool: + with Pool(processes=min(num_screens + 1, task_limit)) as pool: results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots")) if len(glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*")) > num_screens: @@ -1453,7 +1459,7 @@ def _is_vob_good(n, loops, num_screens): optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] - with Pool(processes=min(len(optimize_tasks), os.cpu_count())) as pool: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: optimize_results = list( # noqa F841 tqdm( pool.imap_unordered(self.optimize_image_task, optimize_tasks), @@ -1538,37 +1544,40 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non capture_tasks = [] capture_results = [] + task_limit = int(meta.get('task_limit', os.cpu_count())) + capture_tasks = [] for i in range(num_screens + 1): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") if not os.path.exists(image_path) or meta.get('retake', False): capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) - else: - if meta['debug']: - console.print(f"[yellow]Skipping existing screenshot: {image_path}") + elif meta['debug']: + console.print(f"[yellow]Skipping existing screenshot: {image_path}") if not capture_tasks: console.print("[yellow]All screenshots already exist. Skipping capture process.") else: - with Pool(processes=len(capture_tasks)) as pool: + with Pool(processes=min(len(capture_tasks), task_limit)) as pool: for result in tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), total=len(capture_tasks), desc="Capturing Screenshots"): capture_results.append(result) - if capture_results: - if len(capture_results) > num_screens: + if capture_results and len(capture_results) > num_screens: smallest = min(capture_results, key=os.path.getsize) - if meta.get('debug', False): + if meta['debug']: console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") os.remove(smallest) capture_results.remove(smallest) optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] optimize_results = [] - with Pool(processes=len(optimize_tasks)) as pool: - for result in tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), total=len(optimize_tasks), desc="Optimizing Images"): - optimize_results.append(result) + if optimize_tasks: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + for result in tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images"): + optimize_results.append(result) valid_results = [] for image_path in optimize_results: From e1ee8f96c4cd88ad935a94533c0a18f8a483e9d8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 18:21:01 +1000 Subject: [PATCH 574/741] Manual type fix fixes https://github.com/Audionut/Upload-Assistant/issues/150 --- src/args.py | 2 +- src/prep.py | 43 +++++++++++++++++++++++-------------------- upload.py | 2 +- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/src/args.py b/src/args.py index c4057e767..5c348cfcf 100644 --- a/src/args.py +++ b/src/args.py @@ -26,7 +26,7 @@ def parse(self, args, meta): parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) - parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV, DVDRIP]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv', 'dvdrip']) + parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV, DVDRIP]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv', 'dvdrip'], dest="manual_type") parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc, DCP]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc', 'DCP'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') diff --git a/src/prep.py b/src/prep.py index 2511f0cbc..cd11d0a52 100644 --- a/src/prep.py +++ b/src/prep.py @@ -660,7 +660,7 @@ async def process_tracker(tracker_name, meta): meta['tmdb'] = meta.get('tmdb_manual', None) if meta.get('type', None) is None: - meta['type'] = self.get_type(video, meta['scene'], meta['is_disc']) + meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: @@ -1696,25 +1696,28 @@ def optimize_image_task(self, args): Get type and category """ - def get_type(self, video, scene, is_disc): - filename = os.path.basename(video).lower() - if "remux" in filename: - type = "REMUX" - elif any(word in filename for word in [" web ", ".web.", "web-dl"]): - type = "WEBDL" - elif "webrip" in filename: - type = "WEBRIP" - # elif scene == True: - # type = "ENCODE" - elif "hdtv" in filename: - type = "HDTV" - elif is_disc is not None: - type = "DISC" - elif "dvdrip" in filename: - type = "DVDRIP" - # exit() + def get_type(self, video, scene, is_disc, meta): + if meta.get('manual_type'): + type = meta.get('manual_type') else: - type = "ENCODE" + filename = os.path.basename(video).lower() + if "remux" in filename: + type = "REMUX" + elif any(word in filename for word in [" web ", ".web.", "web-dl"]): + type = "WEBDL" + elif "webrip" in filename: + type = "WEBRIP" + # elif scene == True: + # type = "ENCODE" + elif "hdtv" in filename: + type = "HDTV" + elif is_disc is not None: + type = "DISC" + elif "dvdrip" in filename: + type = "DVDRIP" + # exit() + else: + type = "ENCODE" return type def get_cat(self, video): @@ -3094,7 +3097,7 @@ async def imgbox_upload(self, chdir, image_glob, meta, return_dict): return [] async def get_name(self, meta): - type = meta.get('type', "") + type = meta.get('type', "").upper() title = meta.get('title', "") alt_title = meta.get('aka', "") year = meta.get('year', "") diff --git a/upload.py b/upload.py index 0fbd12981..38a69c598 100644 --- a/upload.py +++ b/upload.py @@ -198,7 +198,7 @@ def merge_meta(meta, saved_meta, path): 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', - 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio' + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio', 'manual_type' ] sanitized_saved_meta = {} for key, value in saved_meta.items(): From 00a293e91a6431c04ab959a35e0dadf0e95650fe Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 19:25:38 +1000 Subject: [PATCH 575/741] MTV cleanup - fixes manual mode --- src/trackers/MTV.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 9d8cbe842..84f402b46 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -62,7 +62,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): img_host_index += 1 continue - new_images_key = f'mtv_images_key_{img_host_index}' + new_images_key = 'mtv_images_key' if image_list is not None: image_list = meta[new_images_key] break @@ -117,7 +117,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): source_id = await self.get_source_id(meta) origin_id = await self.get_origin_id(meta) des_tags = await self.get_tags(meta) - await self.edit_desc(meta, images_reuploaded, image_list) + await self.edit_desc(meta) group_desc = await self.edit_group_desc(meta) mtv_name = await self.edit_name(meta) @@ -183,7 +183,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts retry_mode = False images_reuploaded = False - new_images_key = f'mtv_images_key_{img_host_index}' + new_images_key = 'mtv_images_key' discs = meta.get('discs', []) # noqa F841 filelist = meta.get('video', []) filename = meta['filename'] @@ -267,7 +267,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts return meta[new_images_key], False, images_reuploaded # Return retry_mode and images_reuploaded - async def edit_desc(self, meta, images_reuploaded, valid_images): + async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: @@ -283,8 +283,8 @@ async def edit_desc(self, meta, images_reuploaded, valid_images): elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") - if valid_images: - images = valid_images + if meta['mtv_images_key']: + images = meta['mtv_images_key'] else: images = meta['image_list'] if len(images) > 0: From e39df52d5f9398d310f39e880b8aa58e84db6eaa Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 19:38:34 +1000 Subject: [PATCH 576/741] Add HDB rehost to config --- data/example-config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/data/example-config.py b/data/example-config.py index fd55bf3ab..3e42b8a7d 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -162,6 +162,7 @@ "passkey": "HDB passkey", "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", # "anon": False, + "img_rehost": True, }, "HDT": { "username": "username", From 6cf06bcb21401709feb2dfe9cbf9ccc359bfbda8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 19:40:14 +1000 Subject: [PATCH 577/741] HDB default rehost to true, the safe option --- src/trackers/HDB.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index a59c42e84..e7f0b21fd 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -22,7 +22,7 @@ def __init__(self, config): self.source_flag = 'HDBits' self.username = config['TRACKERS']['HDB'].get('username', '').strip() self.passkey = config['TRACKERS']['HDB'].get('passkey', '').strip() - self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', False) + self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', True) self.signature = None self.banned_groups = [""] From eed6b28161763acf13ac3fd97275374a050ebf0e Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 3 Dec 2024 20:38:10 +1000 Subject: [PATCH 578/741] Add webdl to filename catch list --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index cd11d0a52..b25a8daa2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1703,7 +1703,7 @@ def get_type(self, video, scene, is_disc, meta): filename = os.path.basename(video).lower() if "remux" in filename: type = "REMUX" - elif any(word in filename for word in [" web ", ".web.", "web-dl"]): + elif any(word in filename for word in [" web ", ".web.", "web-dl", "webdl"]): type = "WEBDL" elif "webrip" in filename: type = "WEBRIP" From 96fbd38ba408248a7079c5192dc565c4d8a87ced Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 17:51:44 +1000 Subject: [PATCH 579/741] set task_limit enabled and 1 for testing --- data/example-config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/example-config.py b/data/example-config.py index 3e42b8a7d..989c2d542 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -33,7 +33,7 @@ # multi processing task limit # When capturing/optimizing images, limit to this many concurrent tasks # defaults to 'os.cpu_count()' - # "task_limit": "2", + "task_limit": "1", # Providing the option to change the size of the screenshot thumbnails where supported. # Default is 350, ie [img=350] From ffa7bd98aee2fce49dc17f1bdf82558238fd8ffd Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 19:10:33 +1000 Subject: [PATCH 580/741] type override cleanup --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index b25a8daa2..2965c29ee 100644 --- a/src/prep.py +++ b/src/prep.py @@ -659,8 +659,7 @@ async def process_tracker(tracker_name, meta): s.terminate() meta['tmdb'] = meta.get('tmdb_manual', None) - if meta.get('type', None) is None: - meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) + meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: @@ -3224,6 +3223,7 @@ async def get_name(self, meta): console.print(f"--category [yellow]{meta['category']}") console.print(f"--type [yellow]{meta['type']}") console.print(f"--source [yellow]{meta['source']}") + console.print("[bold green]If you specified type, try also specifying source") exit() name_notag = name From b06178c1bfac215729697b9594c386975933a945 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 19:39:46 +1000 Subject: [PATCH 581/741] Set imgbb uploading limit --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 2965c29ee..43a7916a4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3005,7 +3005,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # Define host-specific limits host_limits = { - "imgbox": 6, + "imgbb": 1, # Other hosts can use the default pool size } default_pool_size = os.cpu_count() From f17af1c87593c0f6dfbaeed2d99c73f23d169c66 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 20:09:36 +1000 Subject: [PATCH 582/741] Ugly abort if image host fail Prevent upload with no images --- src/prep.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/prep.py b/src/prep.py index 43a7916a4..c00f1243e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3033,6 +3033,10 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i else: console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") + if len(successfully_uploaded) < 3 and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + console.print("[red]Less than 3 images were successfully uploaded. Aborting upload process.") + return + new_images = [] for upload in successfully_uploaded: raw_url = upload['raw_url'] From a05338bbc42e32440cf024df7c6a4d584d8912ab Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 21:49:00 +1000 Subject: [PATCH 583/741] Don't set ptp id and imdb if not keeping --- src/prep.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/prep.py b/src/prep.py index c00f1243e..c6f9869e6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -291,27 +291,23 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met found_match = False elif tracker_name == "PTP": - imdb_id = None # Ensure imdb_id is defined - # Check if the PTP ID is already in meta + imdb_id = None if meta.get('ptp') is None: - # No PTP ID in meta, search by search term imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) if ptp_torrent_id: - meta['ptp'] = ptp_torrent_id meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None - console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + if not meta['unattended']: if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): found_match = True - - # Retrieve PTP description and image list + meta['ptp'] = ptp_torrent_id ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) meta['description'] = ptp_desc with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.write((ptp_desc or "") + "\n") - if not meta.get('image_list'): # Only handle images if image_list is not already populated + if not meta.get('image_list'): valid_images = await self.check_images_concurrently(ptp_imagelist, meta) if valid_images: meta['image_list'] = valid_images @@ -319,6 +315,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met else: found_match = False + meta['imdb'] = None else: found_match = True @@ -328,7 +325,7 @@ async def update_metadata_from_tracker(self, tracker_name, tracker_instance, met description.write((ptp_desc or "") + "\n") meta['saved_description'] = True - if not meta.get('image_list'): # Only handle images if image_list is not already populated + if not meta.get('image_list'): valid_images = await self.check_images_concurrently(ptp_imagelist, meta) if valid_images: meta['image_list'] = valid_images From 3975c92c73976d1edaf43bbba6dcb86d375c2734 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 22:10:35 +1000 Subject: [PATCH 584/741] Fix image hosts --- src/prep.py | 121 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 88 insertions(+), 33 deletions(-) diff --git a/src/prep.py b/src/prep.py index c6f9869e6..c140dea8a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2823,7 +2823,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): def upload_image_task(self, args): image, img_host, config, meta = args try: - timeout = 10 # Default timeout + timeout = 40 # Default timeout img_url, raw_url, web_url = None, None, None if img_host == "imgbox": @@ -2870,45 +2870,97 @@ def upload_image_task(self, args): elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" - data = { - 'key': config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response_data = response.json() - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] + try: + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]imgbb failed, trying next image host") + return {'status': 'failed', 'reason': 'imgbb upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['ptscreens_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') == 200: - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response['data']['image']['url'] - web_url = response['data']['url_viewer'] + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]ptscreens failed, trying next image host") + return {'status': 'failed', 'reason': 'ptscreens upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "oeimg": url = "https://imgoe.download/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['oeimg_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') == 200: - img_url = response['data']['image']['url'] - raw_url = response['data']['image']['url'] + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]OEimg failed, trying next image host") + return {'status': 'failed', 'reason': 'OEimg upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "pixhost": url = "https://api.pixhost.to/images" @@ -3002,6 +3054,9 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # Define host-specific limits host_limits = { + "oeimg": 1, + "ptscreens": 1, + "lensdump": 1, "imgbb": 1, # Other hosts can use the default pool size } From 4d9aa7afe137fae33aa2d7439d9e269cc2cb17a1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 22:11:01 +1000 Subject: [PATCH 585/741] Revert "Fix image hosts" This reverts commit 3975c92c73976d1edaf43bbba6dcb86d375c2734. --- src/prep.py | 121 ++++++++++++++-------------------------------------- 1 file changed, 33 insertions(+), 88 deletions(-) diff --git a/src/prep.py b/src/prep.py index c140dea8a..c6f9869e6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2823,7 +2823,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): def upload_image_task(self, args): image, img_host, config, meta = args try: - timeout = 40 # Default timeout + timeout = 10 # Default timeout img_url, raw_url, web_url = None, None, None if img_host == "imgbox": @@ -2870,97 +2870,45 @@ def upload_image_task(self, args): elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" - try: - data = { - 'key': config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response_data.get('status_code') != 200: - console.print("[yellow]imgbb failed, trying next image host") - return {'status': 'failed', 'reason': 'imgbb upload failed'} - - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response_data = response.json() + img_url = response['data'].get('medium', response['data']['image'])['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" - try: - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['ptscreens_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response_data.get('status_code') != 200: - console.print("[yellow]ptscreens failed, trying next image host") - return {'status': 'failed', 'reason': 'ptscreens upload failed'} - - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response['data'].get('medium', response['data']['image'])['url'] + raw_url = response['data']['image']['url'] + web_url = response['data']['url_viewer'] elif img_host == "oeimg": url = "https://imgoe.download/api/1/upload" - try: - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['oeimg_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - if meta['debug']: - console.print(f"[yellow]Response status code: {response.status_code}") - console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") - - response_data = response.json() - if response_data.get('status_code') != 200: - console.print("[yellow]OEimg failed, trying next image host") - return {'status': 'failed', 'reason': 'OEimg upload failed'} - - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['oeimg_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response['data']['image']['url'] + raw_url = response['data']['image']['url'] web_url = response_data['data']['url_viewer'] - if meta['debug']: - console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") - - except requests.exceptions.Timeout: - console.print("[red]Request timed out. The server took too long to respond.") - return {'status': 'failed', 'reason': 'Request timed out'} - except requests.exceptions.RequestException as e: - console.print(f"[red]Request failed with error: {e}") - return {'status': 'failed', 'reason': str(e)} elif img_host == "pixhost": url = "https://api.pixhost.to/images" @@ -3054,9 +3002,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # Define host-specific limits host_limits = { - "oeimg": 1, - "ptscreens": 1, - "lensdump": 1, "imgbb": 1, # Other hosts can use the default pool size } From 62bc6f84afcd9eb310af1c9ed537815b3b7f9c72 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 4 Dec 2024 22:11:42 +1000 Subject: [PATCH 586/741] Fix image hosts --- src/prep.py | 121 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 88 insertions(+), 33 deletions(-) diff --git a/src/prep.py b/src/prep.py index c6f9869e6..c140dea8a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2823,7 +2823,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): def upload_image_task(self, args): image, img_host, config, meta = args try: - timeout = 10 # Default timeout + timeout = 40 # Default timeout img_url, raw_url, web_url = None, None, None if img_host == "imgbox": @@ -2870,45 +2870,97 @@ def upload_image_task(self, args): elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" - data = { - 'key': config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response_data = response.json() - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] + try: + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]imgbb failed, trying next image host") + return {'status': 'failed', 'reason': 'imgbb upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['ptscreens_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') == 200: - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response['data']['image']['url'] - web_url = response['data']['url_viewer'] + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]ptscreens failed, trying next image host") + return {'status': 'failed', 'reason': 'ptscreens upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "oeimg": url = "https://imgoe.download/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': config['DEFAULT']['oeimg_api'] - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response_data = response.json() - if response_data.get('status_code') == 200: - img_url = response['data']['image']['url'] - raw_url = response['data']['image']['url'] + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]OEimg failed, trying next image host") + return {'status': 'failed', 'reason': 'OEimg upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} elif img_host == "pixhost": url = "https://api.pixhost.to/images" @@ -3002,6 +3054,9 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i # Define host-specific limits host_limits = { + "oeimg": 1, + "ptscreens": 1, + "lensdump": 1, "imgbb": 1, # Other hosts can use the default pool size } From a439c1d17b012bbe197372e4e5a22716cef25e0e Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 08:34:03 +1000 Subject: [PATCH 587/741] ptscreens update Based on https://ptscreens.com/api-v1 4 out of 9 success with a 60 second timeout. Still of the opinion of host issues. https://github.com/Audionut/Upload-Assistant/issues/156 --- src/prep.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index c140dea8a..543e938d3 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2823,7 +2823,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): def upload_image_task(self, args): image, img_host, config, meta = args try: - timeout = 40 # Default timeout + timeout = 60 # Default timeout img_url, raw_url, web_url = None, None, None if img_host == "imgbox": @@ -2901,13 +2901,13 @@ def upload_image_task(self, args): elif img_host == "ptscreens": url = "https://ptscreens.com/api/1/upload" try: - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + files = { + 'source': ('file-upload[0]', open(image, 'rb')), } headers = { 'X-API-Key': config['DEFAULT']['ptscreens_api'] } - response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = requests.post(url, headers=headers, files=files, timeout=timeout) if meta['debug']: console.print(f"[yellow]Response status code: {response.status_code}") console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") @@ -2917,9 +2917,9 @@ def upload_image_task(self, args): console.print("[yellow]ptscreens failed, trying next image host") return {'status': 'failed', 'reason': 'ptscreens upload failed'} - img_url = response_data['data']['image']['url'] - raw_url = response_data['data']['image']['url'] - web_url = response_data['data']['url_viewer'] + img_url = response_data['image']['thumb']['url'] + raw_url = response_data['image']['url'] + web_url = response_data['image']['url_viewer'] if meta['debug']: console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") From 87522b1ddcca06046bb052d05afb9497be1cff4f Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 09:15:32 +1000 Subject: [PATCH 588/741] fix MTV images when meta not present --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 84f402b46..46613ef69 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -283,7 +283,7 @@ async def edit_desc(self, meta): elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") - if meta['mtv_images_key']: + if 'mtv_images_key' in meta: images = meta['mtv_images_key'] else: images = meta['image_list'] From 3bfe288a0efdc218980b9e85ae88a7e82c6ea21c Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 09:37:46 +1000 Subject: [PATCH 589/741] lensdump images --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 543e938d3..465aca09f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2989,8 +2989,8 @@ def upload_image_task(self, args): response = requests.post(url, data=data, headers=headers, timeout=timeout) response_data = response.json() if response_data.get('status_code') == 200: - img_url = response['data'].get('medium', response['data']['image'])['url'] - raw_url = response['data']['image']['url'] + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] if img_url and raw_url and web_url: From 7fc488cf9d2ea47877900d9c5129fbb3ad2b076a Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 10:33:47 +1000 Subject: [PATCH 590/741] BHD image rehosting fixes https://github.com/Audionut/Upload-Assistant/issues/160 --- src/trackers/BHD.py | 127 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 206913162..f8a724dc4 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -8,6 +8,8 @@ import platform import hashlib import bencodepy +import glob +import multiprocessing from src.trackers.COMMON import COMMON from src.console import console @@ -31,6 +33,36 @@ def __init__(self, config): pass async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await self.upload_with_retry(meta, common) + + async def upload_with_retry(self, meta, common, img_host_index=1): + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + images_reuploaded = False + + if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): + console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") + image_list = meta['image_list'] + + else: + images_reuploaded = False + while img_host_index <= len(approved_image_hosts): + image_list, retry_mode, images_reuploaded = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + + if retry_mode: + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + img_host_index += 1 + continue + + new_images_key = 'bhd_images_key' + if image_list is not None: + image_list = meta[new_images_key] + break + + if image_list is None: + console.print("[red]All image hosts failed. Please check your configuration.") + return + common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -121,6 +153,96 @@ async def upload(self, meta, disctype): console.print("[cyan]Request Data:") console.print(data) + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): + if approved_image_hosts is None: + approved_image_hosts = ['ptpimg', 'imgbox'] + + retry_mode = False + images_reuploaded = False + new_images_key = 'bhd_images_key' + discs = meta.get('discs', []) # noqa F841 + filelist = meta.get('video', []) + filename = meta['filename'] + + if isinstance(filelist, str): + filelist = [filelist] + + multi_screens = int(self.config['DEFAULT'].get('screens', 6)) + base_dir = meta['base_dir'] + folder_id = meta['uuid'] + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + meta[new_images_key] = [] + + screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) + all_screenshots = [] + + for i, file in enumerate(filelist): + filename_pattern = f"{filename}*.png" + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if len(existing_screens) < multi_screens: + if meta.get('debug'): + console.print("[yellow]The image host of exsting images is not supported.") + console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") + + if meta['type'] == "BDMV": + s = multiprocessing.Process( + target=prep.disc_screenshots, + args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, + meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) + ) + elif meta['type'] == "DVD": + s = multiprocessing.Process( + target=prep.dvd_screenshots, + args=(meta, img_host_index, img_host_index) + ) + else: + s = multiprocessing.Process( + target=prep.screenshots, + args=(file, f"{filename}", meta['uuid'], base_dir, + meta, multi_screens + 1, True, None) + ) + + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + all_screenshots.extend(existing_screens) + + if all_screenshots: + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break + uploaded_images, _ = prep.upload_screens(meta, multi_screens, img_host_index, 0, multi_screens, all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode) + + if uploaded_images: + meta[new_images_key] = uploaded_images + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): + console.print("[red]Unsupported image host detected, please use one of the approved image hosts") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + + return meta[new_images_key], False, images_reuploaded # Return retry_mode and images_reuploaded + async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', @@ -203,7 +325,10 @@ async def edit_desc(self, meta): desc.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") desc.write("\n") desc.write(base.replace("[img]", "[img width=300]")) - images = meta['image_list'] + if 'bhd_images_key' in meta: + images = meta['bhd_images_key'] + else: + images = meta['image_list'] if len(images) > 0: desc.write("[center]") for each in range(len(images[:int(meta['screens'])])): From 96d058e99e5f13dbfd715a864925f06992914443 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 10:40:42 +1000 Subject: [PATCH 591/741] add pixhost to bhd approved list --- src/trackers/BHD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index f8a724dc4..ff1ae42ba 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -37,7 +37,7 @@ async def upload(self, meta, disctype): await self.upload_with_retry(meta, common) async def upload_with_retry(self, meta, common, img_host_index=1): - approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] images_reuploaded = False if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): From d95cba474e51a57dbf052b272724f8205a0ece21 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 17:52:14 +1000 Subject: [PATCH 592/741] MTV - remove new line after images --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 46613ef69..60b2dca64 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -291,7 +291,7 @@ async def edit_desc(self, meta): for image in images: raw_url = image['raw_url'] img_url = image['img_url'] - desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]\n") + desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") desc.write(f"\n\n{base}") desc.close() From af6aff2a5a93e1016d65a62b645617ec2704df68 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 5 Dec 2024 17:53:22 +1000 Subject: [PATCH 593/741] Duping - search name not uuid Fixes UHD in particular. --- src/trackers/COMMON.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 693bca215..1fd49ef6a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -646,25 +646,25 @@ async def filter_dupes(self, dupes, meta): }, { "key": "remux", - "uuid_flag": "remux" in meta.get('uuid', '').lower(), + "uuid_flag": "remux" in meta.get('name', '').lower(), "condition": lambda each: "remux" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'remux' mismatch: {each}" }, { "key": "uhd", - "uuid_flag": "uhd" in meta.get('uuid', '').lower(), + "uuid_flag": "uhd" in meta.get('name', '').lower(), "condition": lambda each: "uhd" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'UHD' mismatch: {each}" }, { "key": "webdl", - "uuid_flag": "webdl" in meta.get('uuid', '').lower(), - "condition": lambda each: "webdl" in each.lower(), + "uuid_flag": "web-dl" in meta.get('name', '').lower(), + "condition": lambda each: "webdl" in each.lower() or "web-dl" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'WEBDL' mismatch: {each}" }, { "key": "hdtv", - "uuid_flag": "hdtv" in meta.get('uuid', '').lower(), + "uuid_flag": "hdtv" in meta.get('name', '').lower(), "condition": lambda each: "hdtv" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" }, From 392c86ac35df6ccffbf9084f7e0d60cbd222db7d Mon Sep 17 00:00:00 2001 From: Hielito <36553765+Hielito2@users.noreply.github.com> Date: Thu, 5 Dec 2024 21:10:43 -0600 Subject: [PATCH 594/741] Update LT.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1.- FIX add “[SUBS]” to the name when it does not have an audio 'es' or 'es-419'. 2.- Add "Hybrid" to the name when the file name has it. --- src/trackers/LT.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 4cd5e7c94..30734d599 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -78,17 +78,28 @@ async def get_res_id(self, resolution): return resolution_id async def edit_name(self, meta): - lt_name = meta['name'].replace('Dubbed', '').replace('Dual-Audio', '').replace(' ', ' ').strip() - # Check if audio Spanish exists, if not append [SUBS] at the end - if meta['type'] != 'DISC': # DISC don't have mediainfo - audio_language_list = meta['mediainfo']['media']['track'][0].get('Audio_Language_List', '') - if 'Spanish' not in audio_language_list and '[SUBS]' not in lt_name: - if not meta['tag']: - lt_name += " [SUBS]" + lt_name = meta['name'].replace('Dual-Audio', '').replace(' ', ' ') + if meta['type'] != 'DISC': # DISC don't have mediainfo + #Check if is HYBRID (Copied from BLU.py) + if 'hybrid' in meta.get('uuid').lower(): + if "repack" in meta.get('uuid').lower(): + lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') else: - lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") - return lt_name - + lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") + # Check if audio Spanish exists, if not append [SUBS] at the end + audios = [ + audio for audio in meta['mediainfo']['media']['track'][2:] + if audio.get('@type') == 'Audio' + ] + for audio in audios: + if audio.get('Language') in {'es-419', 'es'}: + return lt_name + + if not meta.get('tag'): + return lt_name + " [SUBS]" + + return lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -103,7 +114,6 @@ async def upload(self, meta, disctype): anon = 0 else: anon = 1 - if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() From 3b6aad3cdabb64e007ee73d355392b4b9abb719f Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Dec 2024 15:46:08 +1000 Subject: [PATCH 595/741] Fix image host switching --- src/prep.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index 465aca09f..9785040d5 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3052,13 +3052,11 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[:images_needed]] - # Define host-specific limits host_limits = { "oeimg": 1, "ptscreens": 1, "lensdump": 1, "imgbb": 1, - # Other hosts can use the default pool size } default_pool_size = os.cpu_count() pool_size = host_limits.get(img_host, default_pool_size) @@ -3085,9 +3083,15 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i else: console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") - if len(successfully_uploaded) < 3 and not retry_mode and img_host == initial_img_host and not using_custom_img_list: - console.print("[red]Less than 3 images were successfully uploaded. Aborting upload process.") - return + if len(successfully_uploaded) < meta.get('cutoff') and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + img_host_num += 1 + if f'img_host_{img_host_num}' in self.config['DEFAULT']: + meta['imghost'] = self.config['DEFAULT'][f'img_host_{img_host_num}'] + console.print(f"[cyan]Switching to the next image host: {meta['imghost']}") + return self.upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + else: + console.print("[red]No more image hosts available. Aborting upload process.") + return meta['image_list'], len(meta['image_list']) new_images = [] for upload in successfully_uploaded: From 1324a2b4dd877105f064fafb659eb4990fe8e11c Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Dec 2024 16:45:07 +1000 Subject: [PATCH 596/741] fix for imgbb hopefully fixes https://github.com/Audionut/Upload-Assistant/issues/156 based on https://api.imgbb.com/ --- src/prep.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 9785040d5..a335d844f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2871,17 +2871,22 @@ def upload_image_task(self, args): elif img_host == "imgbb": url = "https://api.imgbb.com/1/upload" try: + with open(image, "rb") as img_file: + encoded_image = base64.b64encode(img_file.read()).decode('utf8') + data = { 'key': config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + 'image': encoded_image, } + response = requests.post(url, data=data, timeout=timeout) + if meta['debug']: console.print(f"[yellow]Response status code: {response.status_code}") console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") response_data = response.json() - if response_data.get('status_code') != 200: + if response.status_code != 200 or not response_data.get('success'): console.print("[yellow]imgbb failed, trying next image host") return {'status': 'failed', 'reason': 'imgbb upload failed'} @@ -2891,9 +2896,16 @@ def upload_image_task(self, args): if meta['debug']: console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + except requests.exceptions.Timeout: console.print("[red]Request timed out. The server took too long to respond.") return {'status': 'failed', 'reason': 'Request timed out'} + + except ValueError as e: # JSON decoding error + console.print(f"[red]Invalid JSON response: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} + except requests.exceptions.RequestException as e: console.print(f"[red]Request failed with error: {e}") return {'status': 'failed', 'reason': str(e)} From 946ca64f8916b42a45c3bdd2936c13626c5f7d34 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Dec 2024 16:56:53 +1000 Subject: [PATCH 597/741] imgbb polish Doesn't need limiting here --- src/prep.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index a335d844f..2a444c13c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2890,7 +2890,7 @@ def upload_image_task(self, args): console.print("[yellow]imgbb failed, trying next image host") return {'status': 'failed', 'reason': 'imgbb upload failed'} - img_url = response_data['data']['image']['url'] + img_url = response_data['data']['medium']['url'] raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] if meta['debug']: @@ -3068,7 +3068,6 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i "oeimg": 1, "ptscreens": 1, "lensdump": 1, - "imgbb": 1, } default_pool_size = os.cpu_count() pool_size = host_limits.get(img_host, default_pool_size) From a978b1e429d5fda45628802cbc6d230d7b5118d2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Dec 2024 19:11:37 +1000 Subject: [PATCH 598/741] ULCX - allow DVD --- src/trackers/ULCX.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index b9cdacca8..67f13912a 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -44,9 +44,10 @@ async def get_type_id(self, type): }.get(type, '0') return type_id - async def get_res_id(self, resolution): - if resolution not in ['8640p', '4320p', '2160p', '1440p', '1080p', '1080i', '720p']: - return None + async def get_res_id(self, resolution, type): + if type not in ['DISC']: + if resolution not in ['8640p', '4320p', '2160p', '1440p', '1080p', '1080i', '720p']: + return None resolution_id = { '8640p': '10', '4320p': '1', @@ -67,7 +68,7 @@ async def upload(self, meta, disctype): await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) + resolution_id = await self.get_res_id(meta['resolution'], meta['type']) if resolution_id is None: console.print("Resolution is below 720p; skipping.") return @@ -164,7 +165,7 @@ async def search_existing(self, meta, disctype): 'tmdbId': meta['tmdb'], 'categories[]': await self.get_cat_id(meta['category']), 'types[]': await self.get_type_id(meta['type']), - 'resolutions[]': await self.get_res_id(meta['resolution']), + 'resolutions[]': await self.get_res_id(meta['resolution'], meta['type']), 'name': "" } if meta.get('edition', "") != "": From c46d34ccfb2033e84d774da18e48c55f3de9f258 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 6 Dec 2024 21:10:36 +1000 Subject: [PATCH 599/741] Dupe checking -repack fix --- src/trackers/COMMON.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 1fd49ef6a..06a8112ef 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -636,12 +636,13 @@ async def filter_dupes(self, dupes, meta): target_season = meta.get("season") target_episode = meta.get("episode") target_resolution = meta.get("resolution") + tag = meta.get("tag").lower() attribute_checks = [ { "key": "repack", "uuid_flag": has_repack_in_uuid, - "condition": lambda each: meta['tag'] in each and has_repack_in_uuid and "repack" not in each.lower(), + "condition": lambda each: meta['tag'].lower() in each and has_repack_in_uuid and "repack" not in each.lower(), "exclude_msg": lambda each: f"Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}" }, { @@ -687,6 +688,12 @@ def process_exclusion(each): console.log(f"[debug] Normalized dupe: {normalized}") console.log(f"[debug] File HDR terms: {file_hdr}") console.log(f"[debug] Target HDR terms: {target_hdr}") + console.log(f"[debug] TAG: {tag}") + console.log("[debug] Evaluating repack condition:") + console.log(f" has_repack_in_uuid: {has_repack_in_uuid}") + console.log(f" 'repack' in each.lower(): {'repack' in each.lower()}") + console.log(f"[debug] meta['uuid']: {meta.get('uuid', '')}") + console.log(f"[debug] meta['tag']: {meta.get('tag', '').lower()}") if has_is_disc and each.lower().endswith(".m2ts"): return False @@ -700,13 +707,13 @@ def process_exclusion(each): return True for check in attribute_checks: - if check["key"] == "repack" and check["condition"](each): - if meta['debug']: - console.log(f"[yellow]{check['exclude_msg'](each)}") - return True + if check["key"] == "repack": + if has_repack_in_uuid and "repack" not in normalized: + if tag and tag in normalized: + log_exclusion("missing 'repack'", each) + return True elif check["uuid_flag"] != check["condition"](each): - if meta['debug']: - console.log(f"[yellow]{check['exclude_msg'](each)}") + log_exclusion(f"{check['key']} mismatch", each) return True if not self.has_matching_hdr(file_hdr, target_hdr, meta): @@ -742,7 +749,7 @@ def normalize_filename(self, filename): Normalize a filename for easier matching. Retain season/episode information in the format SxxExx. """ - normalized = filename.lower().replace("-", "").replace(" ", "").replace(".", "") + normalized = filename.lower().replace("-", " -").replace(" ", " ").replace(".", " ") return normalized From 155eb0520bd3105734904584d8470b8aa7b96139 Mon Sep 17 00:00:00 2001 From: Hielito <36553765+Hielito2@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:04:23 -0600 Subject: [PATCH 600/741] Update LT.py 1.- Add replace 'Dubbed' and strip back again. 2.- tabbed as an elif. --- src/trackers/LT.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 30734d599..7ef8597e2 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -78,7 +78,7 @@ async def get_res_id(self, resolution): return resolution_id async def edit_name(self, meta): - lt_name = meta['name'].replace('Dual-Audio', '').replace(' ', ' ') + lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(' ', ' ').strip() if meta['type'] != 'DISC': # DISC don't have mediainfo #Check if is HYBRID (Copied from BLU.py) if 'hybrid' in meta.get('uuid').lower(): @@ -86,19 +86,23 @@ async def edit_name(self, meta): lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') else: lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") - # Check if audio Spanish exists, if not append [SUBS] at the end + # Check if audio Spanish exists + #Get all the audios 'es-419' or 'es' audios = [ audio for audio in meta['mediainfo']['media']['track'][2:] - if audio.get('@type') == 'Audio' + if audio.get('@type') == 'Audio' + and audio.get('Language') in {'es-419', 'es'} + and "commentary" not in audio.get('Title').lower() ] - for audio in audios: - if audio.get('Language') in {'es-419', 'es'}: - return lt_name - - if not meta.get('tag'): - return lt_name + " [SUBS]" - - return lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") + if len(audios) > 0: #If there is at least 1 audio spanish + lt_name = lt_name + #if not audio Spanish exists, add "[SUBS]" + elif not meta.get('tag'): + lt_name = lt_name + " [SUBS]" + else: + lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") + + return lt_name async def upload(self, meta, disctype): common = COMMON(config=self.config) From 2177b0e22cf7802a566eb2a77d69d21bf6769731 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 05:36:36 +1000 Subject: [PATCH 601/741] Move prep import Ensures the data.config exception is caught here instead of in prep.py --- upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upload.py b/upload.py index 38a69c598..4316e4fb7 100644 --- a/upload.py +++ b/upload.py @@ -3,7 +3,6 @@ import requests from src.args import Args from src.clients import Clients -from src.prep import Prep from src.trackers.COMMON import COMMON from src.trackers.HUNO import HUNO from src.trackers.BLU import BLU @@ -79,6 +78,7 @@ else: console.print(traceback.print_exc()) +from src.prep import Prep # noqa E402 client = Clients(config=config) parser = Args(config) From 99c0f4a4a1ccb0e9e7656f424a54dcc9c9321d86 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 06:25:35 +1000 Subject: [PATCH 602/741] Dupe checking DVD fixes --- src/trackers/COMMON.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 06a8112ef..2247348b8 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -637,6 +637,7 @@ async def filter_dupes(self, dupes, meta): target_episode = meta.get("episode") target_resolution = meta.get("resolution") tag = meta.get("tag").lower() + is_dvd = meta['is_disc'] == "DVD" attribute_checks = [ { @@ -702,9 +703,10 @@ def process_exclusion(each): log_exclusion("file extension mismatch (is_disc=True)", each) return True - if target_resolution and target_resolution not in each: - log_exclusion(f"resolution '{target_resolution}' mismatch", each) - return True + if not is_dvd: + if target_resolution and target_resolution not in each: + log_exclusion(f"resolution '{target_resolution}' mismatch", each) + return True for check in attribute_checks: if check["key"] == "repack": @@ -716,9 +718,10 @@ def process_exclusion(each): log_exclusion(f"{check['key']} mismatch", each) return True - if not self.has_matching_hdr(file_hdr, target_hdr, meta): - log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) - return True + if not is_dvd: + if not self.has_matching_hdr(file_hdr, target_hdr, meta): + log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) + return True season_episode_match = self.is_season_episode_match(normalized, target_season, target_episode) if meta['debug']: From 37554d801dfd7238b720df856c32676835baac00 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 06:28:48 +1000 Subject: [PATCH 603/741] Fix DVD screens retry --- src/trackers/BHD.py | 12 ++++++++---- src/trackers/MTV.py | 12 ++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index ff1ae42ba..c34d2d4ea 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -179,22 +179,26 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for i, file in enumerate(filelist): filename_pattern = f"{filename}*.png" - existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + dvd_screens = (glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png")) + if meta['is_disc'] == "DVD": + existing_screens = dvd_screens + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) if len(existing_screens) < multi_screens: if meta.get('debug'): console.print("[yellow]The image host of exsting images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") - if meta['type'] == "BDMV": + if meta['is_disc'] == "BDMV": s = multiprocessing.Process( target=prep.disc_screenshots, args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) ) - elif meta['type'] == "DVD": + elif meta['is_disc'] == "DVD": s = multiprocessing.Process( target=prep.dvd_screenshots, - args=(meta, img_host_index, img_host_index) + args=(meta, 0, None) ) else: s = multiprocessing.Process( diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 60b2dca64..11c2d6464 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -203,22 +203,26 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for i, file in enumerate(filelist): filename_pattern = f"{filename}*.png" - existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + dvd_screens = (glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png")) + if meta['is_disc'] == "DVD": + existing_screens = dvd_screens + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) if len(existing_screens) < multi_screens: if meta.get('debug'): console.print("[yellow]The image host of exsting images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") - if meta['type'] == "BDMV": + if meta['is_disc'] == "BDMV": s = multiprocessing.Process( target=prep.disc_screenshots, args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) ) - elif meta['type'] == "DVD": + elif meta['is_disc'] == "DVD": s = multiprocessing.Process( target=prep.dvd_screenshots, - args=(meta, img_host_index, img_host_index) + args=(meta, 0, None) ) else: s = multiprocessing.Process( From 02ef905dd82a419501942527597f1c9f5146a66a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 06:42:47 +1000 Subject: [PATCH 604/741] HDT - increase debugging feedback --- src/trackers/HDT.py | 59 +++++++++++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 01f034a05..735a7f5ff 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -171,25 +171,48 @@ async def upload(self, meta, disctype): url = "https://hd-torrents.net/upload.php" if meta['debug']: console.print(url) + console.print("Data to be sent:", style="bold blue") console.print(data) - else: - with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - - session.cookies.update(await common.parseCookieFile(cookiefile)) - up = session.post(url=url, data=data, files=files) - torrentFile.close() - - # Match url to verify successful upload - search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text).group(1) - if search: - # modding existing torrent for adding to client instead of downloading torrent from site. - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "") - else: - console.print(data) - console.print("\n\n") - console.print(up.text) - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 + console.print("Files being sent:", style="bold blue") + console.print(files) + with requests.Session() as session: + cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") + + if meta['debug']: + console.print(f"Cookie file path: {cookiefile}") + + session.cookies.update(await common.parseCookieFile(cookiefile)) + + if meta['debug']: + console.print(f"Session cookies: {session.cookies}") + + up = session.post(url=url, data=data, files=files) + torrentFile.close() + + # Debug response + if meta['debug']: + console.print(f"Response URL: {up.url}") + console.print(f"Response Status Code: {up.status_code}") + console.print("Response Headers:", style="bold blue") + console.print(up.headers) + console.print("Response Text (truncated):", style="dim") + console.print(up.text[:500] + "...") + + # Match url to verify successful upload + search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text) + if search: + torrent_id = search.group(1) + if meta['debug']: + console.print(f"Upload Successful: Torrent ID {torrent_id}", style="bold green") + + # Modding existing torrent for adding to client instead of downloading torrent from site + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "") + else: + console.print(data) + console.print("Failed to find download link in response text.", style="bold red") + console.print("Response Data (full):", style="dim") + console.print(up.text) + raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return async def search_existing(self, meta, disctype): From 0eeb38b3c46c69cfb2dee895ec26e397da6019d6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 06:50:38 +1000 Subject: [PATCH 605/741] lint --- src/trackers/LT.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 7ef8597e2..0f1cae61c 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -79,31 +79,31 @@ async def get_res_id(self, resolution): async def edit_name(self, meta): lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(' ', ' ').strip() - if meta['type'] != 'DISC': # DISC don't have mediainfo - #Check if is HYBRID (Copied from BLU.py) + if meta['type'] != 'DISC': # DISC don't have mediainfo + # Check if is HYBRID (Copied from BLU.py) if 'hybrid' in meta.get('uuid').lower(): if "repack" in meta.get('uuid').lower(): lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') else: lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") - # Check if audio Spanish exists - #Get all the audios 'es-419' or 'es' + # Check if audio Spanish exists + # Get all the audios 'es-419' or 'es' audios = [ audio for audio in meta['mediainfo']['media']['track'][2:] - if audio.get('@type') == 'Audio' - and audio.get('Language') in {'es-419', 'es'} + if audio.get('@type') == 'Audio' + and audio.get('Language') in {'es-419', 'es'} and "commentary" not in audio.get('Title').lower() - ] - if len(audios) > 0: #If there is at least 1 audio spanish + ] + if len(audios) > 0: # If there is at least 1 audio spanish lt_name = lt_name - #if not audio Spanish exists, add "[SUBS]" + # if not audio Spanish exists, add "[SUBS]" elif not meta.get('tag'): lt_name = lt_name + " [SUBS]" else: lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") - + return lt_name - + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) From 201d30260279333ab8f84ec9caacdc70779b0fc9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 07:56:38 +1000 Subject: [PATCH 606/741] Allow select tvmaze id --- src/prep.py | 136 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 91 insertions(+), 45 deletions(-) diff --git a/src/prep.py b/src/prep.py index 2a444c13c..e69ae783c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -662,7 +662,7 @@ async def process_tracker(tracker_name, meta): else: meta['category'] = meta['category'].upper() if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb'], meta) if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: @@ -3971,54 +3971,100 @@ async def imdb_other_meta(self, meta): meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() return meta - async def search_tvmaze(self, filename, year, imdbID, tvdbID): - tvdbID = int(tvdbID) + async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): + if meta['debug']: + print(f"Starting search_tvmaze with filename: {filename}, year: {year}, imdbID: {imdbID}, tvdbID: {tvdbID}") + + try: + tvdbID = int(tvdbID) if tvdbID is not None else 0 + except ValueError: + print(f"Error: tvdbID is not a valid integer. Received: {tvdbID}") + tvdbID = 0 + tvmazeID = 0 - lookup = False - show = None + results = [] + if imdbID is None: imdbID = '0' - if tvdbID is None: - tvdbID = 0 + print(f"Processed inputs - imdbID: {imdbID}, tvdbID: {tvdbID}") + if int(tvdbID) != 0: - params = { - "thetvdb": tvdbID - } - url = "https://api.tvmaze.com/lookup/shows" - lookup = True - elif int(imdbID) != 0: - params = { - "imdb": f"tt{imdbID}" - } - url = "https://api.tvmaze.com/lookup/shows" - lookup = True + print(f"Searching TVmaze with TVDB ID: {tvdbID}") + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}) + if tvdb_resp: + results.append(tvdb_resp) + if int(imdbID) != 0: + print(f"Searching TVmaze with IMDb ID: {imdbID}") + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}) + if imdb_resp: + results.append(imdb_resp) + print(f"Searching TVmaze with filename: {filename}") + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) + + if year not in (None, ''): + print(f"Filtering results by year: {year}") + results = [show for show in results if show.get('premiered', '').startswith(str(year))] + + seen = set() + unique_results = [] + for show in results: + if show['id'] not in seen: + seen.add(show['id']) + unique_results.append(show) + results = unique_results + + if not results: + print("No results found.") + return tvmazeID, imdbID, tvdbID + + if meta['manual_date'] is not None: + print("Search results:") + for idx, show in enumerate(results): + console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") + console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") + console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") + + while True: + try: + choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) + if choice == 0: + print("Skipping selection.") + break + if 1 <= choice <= len(results): + selected_show = results[choice - 1] + tvmazeID = selected_show.get('id') + print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + break + else: + print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") + except ValueError: + print("Invalid input. Please enter a number.") else: - params = { - "q": filename - } - url = "https://api.tvmaze.com/search/shows" - resp = requests.get(url=url, params=params) - if resp.ok: - resp = resp.json() - if resp is None: - return tvmazeID, imdbID, tvdbID - if lookup is True: - show = resp + if results: + selected_show = results[0] + tvmazeID = selected_show.get('id') + if meta['debug']: + print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") else: - if year not in (None, ''): - for each in resp: - premier_date = each['show'].get('premiered', '') - if premier_date is not None: - if premier_date.startswith(str(year)): - show = each['show'] - elif len(resp) >= 1: - show = resp[0]['show'] - if show is not None: - tvmazeID = show.get('id') - if int(imdbID) == 0: - if show.get('externals', {}).get('imdb', '0') is not None: - imdbID = str(show.get('externals', {}).get('imdb', '0')).replace('tt', '') - if int(tvdbID) == 0: - if show.get('externals', {}).get('tvdb', '0') is not None: - tvdbID = show.get('externals', {}).get('tvdb', '0') + print("No results to select from. Skipping.") + if meta['debug']: + print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") return tvmazeID, imdbID, tvdbID + + def _make_tvmaze_request(self, url, params): + print(f"Requesting TVmaze API: {url} with params: {params}") + try: + resp = requests.get(url, params=params) + if resp.ok: + return resp.json() + else: + print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") + return None + except Exception as e: + print(f"Error making TVmaze request: {e}") + return None From f7243617f0946cfe8d97cd920723bf43dab6bf42 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 09:01:51 +1000 Subject: [PATCH 607/741] Put meta in correct location fixes https://github.com/Audionut/Upload-Assistant/issues/175 --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index e69ae783c..98f793b9c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -662,7 +662,7 @@ async def process_tracker(tracker_name, meta): else: meta['category'] = meta['category'].upper() if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb'], meta) + meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: @@ -677,7 +677,7 @@ async def process_tracker(tracker_name, meta): else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0)) + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) # If no imdb, search for it if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) From c2d404dc0e3c0df2c95af486ed908febdfcec8a6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 09:59:50 +1000 Subject: [PATCH 608/741] OE - dvd subtitle description --- src/trackers/OE.py | 51 ++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 59d3d035c..ca866b0d3 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -240,11 +240,22 @@ async def edit_desc(self, meta, tracker, signature, comparison=False, desc_heade if desc_header != "": descfile.write(desc_header) - if not meta['is_disc']: - def process_languages(tracks): - audio_languages = [] - subtitle_languages = [] + def process_languages(tracks): + audio_languages = [] + subtitle_languages = [] + + if meta['is_disc'] == "DVD": + subtitle_lang = cli_ui.ask_string( + 'DVD subtitle language cannot be found, you must enter language if subtitles are present:' + ) + if subtitle_lang: + subtitle_languages.append(subtitle_lang) + else: + console.print("[yellow]No subtitle language added for DVD.[/yellow]") + return audio_languages, subtitle_languages + + if not meta['is_disc']: for track in tracks: if track.get('@type') == 'Audio': language = track.get('Language') @@ -254,7 +265,8 @@ def process_languages(tracks): audio_languages.append(audio_lang) else: audio_languages.append("") - if track.get('@type') == 'Text': + + if track.get('@type') == 'Text' and meta['is_disc'] != "DVD": language = track.get('Language') if not language or language is None: subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') @@ -265,19 +277,22 @@ def process_languages(tracks): return audio_languages, subtitle_languages - media_data = meta.get('mediainfo', {}) - if media_data: - tracks = media_data.get('media', {}).get('track', []) - if tracks: - audio_languages, subtitle_languages = process_languages(tracks) - if audio_languages: - descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") - - subtitle_tracks = [track for track in tracks if track.get('@type') == 'Text'] - if subtitle_tracks and subtitle_languages: - descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") - else: - console.print("[red]No media information available in meta.[/red]") + media_data = meta.get('mediainfo', {}) + if media_data: + tracks = media_data.get('media', {}).get('track', []) + if tracks: + audio_languages, subtitle_languages = process_languages(tracks) + + if meta['is_disc'] == "DVD" and subtitle_languages: + descfile.write(f"DVD Subtitle Language: {', '.join(subtitle_languages)}\n") + + if audio_languages: + descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") + + if meta['is_disc'] != "DVD" and subtitle_languages: + descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") + else: + console.print("[red]No media information available in meta.[/red]") # Existing disc metadata handling bbcode = BBCODE() From d7cb9395e44a8f126b501b3e404e0f23e5b1d840 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 10:20:17 +1000 Subject: [PATCH 609/741] Revert "OE - dvd subtitle description" This reverts commit c2d404dc0e3c0df2c95af486ed908febdfcec8a6. --- src/trackers/OE.py | 51 ++++++++++++++++------------------------------ 1 file changed, 18 insertions(+), 33 deletions(-) diff --git a/src/trackers/OE.py b/src/trackers/OE.py index ca866b0d3..59d3d035c 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -240,22 +240,11 @@ async def edit_desc(self, meta, tracker, signature, comparison=False, desc_heade if desc_header != "": descfile.write(desc_header) - def process_languages(tracks): - audio_languages = [] - subtitle_languages = [] - - if meta['is_disc'] == "DVD": - subtitle_lang = cli_ui.ask_string( - 'DVD subtitle language cannot be found, you must enter language if subtitles are present:' - ) - if subtitle_lang: - subtitle_languages.append(subtitle_lang) - else: - console.print("[yellow]No subtitle language added for DVD.[/yellow]") + if not meta['is_disc']: + def process_languages(tracks): + audio_languages = [] + subtitle_languages = [] - return audio_languages, subtitle_languages - - if not meta['is_disc']: for track in tracks: if track.get('@type') == 'Audio': language = track.get('Language') @@ -265,8 +254,7 @@ def process_languages(tracks): audio_languages.append(audio_lang) else: audio_languages.append("") - - if track.get('@type') == 'Text' and meta['is_disc'] != "DVD": + if track.get('@type') == 'Text': language = track.get('Language') if not language or language is None: subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') @@ -277,22 +265,19 @@ def process_languages(tracks): return audio_languages, subtitle_languages - media_data = meta.get('mediainfo', {}) - if media_data: - tracks = media_data.get('media', {}).get('track', []) - if tracks: - audio_languages, subtitle_languages = process_languages(tracks) - - if meta['is_disc'] == "DVD" and subtitle_languages: - descfile.write(f"DVD Subtitle Language: {', '.join(subtitle_languages)}\n") - - if audio_languages: - descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") - - if meta['is_disc'] != "DVD" and subtitle_languages: - descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") - else: - console.print("[red]No media information available in meta.[/red]") + media_data = meta.get('mediainfo', {}) + if media_data: + tracks = media_data.get('media', {}).get('track', []) + if tracks: + audio_languages, subtitle_languages = process_languages(tracks) + if audio_languages: + descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") + + subtitle_tracks = [track for track in tracks if track.get('@type') == 'Text'] + if subtitle_tracks and subtitle_languages: + descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") + else: + console.print("[red]No media information available in meta.[/red]") # Existing disc metadata handling bbcode = BBCODE() From e935534839020c04a867de18df9a55f11eaae644 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 15:49:55 +1000 Subject: [PATCH 610/741] HUNO - fix type override --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index d951e6dae..a5422bb7a 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -150,7 +150,7 @@ async def get_name(self, meta): basename = self.get_basename(meta) hc = meta.get('hardcoded-subs') - type = meta.get('type', "") + type = meta.get('type', "").upper() title = meta.get('title', "") alt_title = meta.get('aka', "") # noqa F841 year = meta.get('year', "") From 32185ac25df62bafff317e0e8f4489a705dc0f0b Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 15:53:24 +1000 Subject: [PATCH 611/741] Also type fix in get type --- src/trackers/HUNO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index a5422bb7a..1e75de732 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -235,7 +235,7 @@ async def get_cat_id(self, category_name): return category_id async def get_type_id(self, meta): - type = meta['type'] + type = meta.get('type').upper() video_encode = meta.get('video_encode') if type == 'REMUX': From 662cfbee6dacbec5c417ff633b437a265a8acc95 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 16:00:14 +1000 Subject: [PATCH 612/741] Force manual_type to upper --- src/args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/args.py b/src/args.py index 5c348cfcf..bef5f8e9d 100644 --- a/src/args.py +++ b/src/args.py @@ -128,8 +128,8 @@ def parse(self, args, meta): if value not in (None, []): if isinstance(value, list): value2 = self.list_to_string(value) - if key == 'type': - meta[key] = value2.upper().replace('-', '') + if key == 'manual_type': + meta['manual_type'] = value2.upper().replace('-', '') elif key == 'tag': meta[key] = f"-{value2}" elif key == 'screens': From b51b30cb7004ab5a528a28243ced86c08337097c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 21:59:38 +1000 Subject: [PATCH 613/741] BHD-MTV rehosting fixes --- src/prep.py | 35 ++++++++++++++------- src/trackers/BHD.py | 71 +++++++++++++++++++++++++----------------- src/trackers/COMMON.py | 2 +- src/trackers/MTV.py | 69 ++++++++++++++++++++++++---------------- src/trackers/PTP.py | 2 +- 5 files changed, 111 insertions(+), 68 deletions(-) diff --git a/src/prep.py b/src/prep.py index 98f793b9c..f3f86c230 100644 --- a/src/prep.py +++ b/src/prep.py @@ -635,7 +635,7 @@ async def process_tracker(tracker_name, meta): elif meta['is_disc'] == "DVD": if meta.get('edit', False) is False: try: - ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None)) + ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None, None)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) @@ -1334,12 +1334,12 @@ def capture_disc_task(self, task): console.print(f"[red]Error capturing screenshot: {e}[/red]") return None - def dvd_screenshots(self, meta, disc_num, num_screens=None): + def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): if 'image_list' not in meta: meta['image_list'] = [] existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - if len(existing_images) >= meta.get('cutoff'): + if len(existing_images) >= meta.get('cutoff') and not retry_cap: console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return @@ -1542,7 +1542,6 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non capture_results = [] task_limit = int(meta.get('task_limit', os.cpu_count())) - capture_tasks = [] for i in range(num_screens + 1): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") if not os.path.exists(image_path) or meta.get('retake', False): @@ -1559,7 +1558,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non desc="Capturing Screenshots"): capture_results.append(result) - if capture_results and len(capture_results) > num_screens: + if capture_results and len(capture_results) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) if meta['debug']: console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") @@ -1646,21 +1645,34 @@ def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): def capture_screenshot(self, args): path, ss_time, image_path, width, height, w_sar, h_sar, loglevel = args try: - ff = ffmpeg.input(path, ss=ss_time) + # Validate inputs + if width <= 0 or height <= 0: + return "Error: Invalid width or height for scaling" + + if ss_time < 0: + return f"Error: Invalid timestamp {ss_time}" + ff = ffmpeg.input(path, ss=ss_time) if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( + command = ( ff .output(image_path, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) - .run() ) + + command.run() + + if not os.path.exists(image_path) or os.path.getsize(image_path) == 0: + return f"Error: Screenshot not generated or is empty at {image_path}" + return image_path + except ffmpeg.Error as e: + return f"FFmpeg Error: {e.stderr.decode()}" except Exception as e: - return f"Error: {e}" + return f"Error: {str(e)}" def optimize_image_task(self, args): image, config = args @@ -2893,6 +2905,7 @@ def upload_image_task(self, args): img_url = response_data['data']['medium']['url'] raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] + if meta['debug']: console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") @@ -2929,7 +2942,7 @@ def upload_image_task(self, args): console.print("[yellow]ptscreens failed, trying next image host") return {'status': 'failed', 'reason': 'ptscreens upload failed'} - img_url = response_data['image']['thumb']['url'] + img_url = response_data['image']['medium']['url'] raw_url = response_data['image']['url'] web_url = response_data['image']['url_viewer'] if meta['debug']: @@ -2957,7 +2970,7 @@ def upload_image_task(self, args): console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") response_data = response.json() - if response_data.get('status_code') != 200: + if response.status_code != 200 or not response_data.get('success'): console.print("[yellow]OEimg failed, trying next image host") return {'status': 'failed', 'reason': 'OEimg upload failed'} diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index c34d2d4ea..4c4ef1fbb 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -155,7 +155,7 @@ async def upload_with_retry(self, meta, common, img_host_index=1): async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): if approved_image_hosts is None: - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] retry_mode = False images_reuploaded = False @@ -163,6 +163,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts discs = meta.get('discs', []) # noqa F841 filelist = meta.get('video', []) filename = meta['filename'] + path = meta['path'] if isinstance(filelist, str): filelist = [filelist] @@ -179,14 +180,15 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for i, file in enumerate(filelist): filename_pattern = f"{filename}*.png" - dvd_screens = (glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png")) + if meta['is_disc'] == "DVD": - existing_screens = dvd_screens + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") else: existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if len(existing_screens) < multi_screens: if meta.get('debug'): - console.print("[yellow]The image host of exsting images is not supported.") + console.print("[yellow]The image host of existing images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") if meta['is_disc'] == "BDMV": @@ -198,12 +200,12 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts elif meta['is_disc'] == "DVD": s = multiprocessing.Process( target=prep.dvd_screenshots, - args=(meta, 0, None) + args=(meta, 0, None, True) ) else: s = multiprocessing.Process( target=prep.screenshots, - args=(file, f"{filename}", meta['uuid'], base_dir, + args=(path, f"{filename}", meta['uuid'], base_dir, meta, multi_screens + 1, True, None) ) @@ -211,41 +213,54 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts while s.is_alive(): await asyncio.sleep(1) - existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) all_screenshots.extend(existing_screens) - if all_screenshots: - while True: - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + if not all_screenshots: + console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") + return [], True, images_reuploaded + + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break - if not current_img_host: - console.print("[red]No more image hosts left to try.") - raise Exception("No valid image host found in the config.") + uploaded_images, _ = prep.upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) - if current_img_host not in approved_image_hosts: - console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") - retry_mode = True - images_reuploaded = True - img_host_index += 1 - continue - else: - meta['imghost'] = current_img_host - console.print(f"[green]Uploading to approved host '{current_img_host}'.") - break - uploaded_images, _ = prep.upload_screens(meta, multi_screens, img_host_index, 0, multi_screens, all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode) + if uploaded_images: + meta[new_images_key] = uploaded_images - if uploaded_images: - meta[new_images_key] = uploaded_images if meta['debug']: for image in uploaded_images: console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): console.print("[red]Unsupported image host detected, please use one of the approved image hosts") return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta[new_images_key], False, images_reuploaded # Return retry_mode and images_reuploaded + return meta[new_images_key], False, images_reuploaded async def get_cat_id(self, category_name): category_id = { diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 2247348b8..94c032785 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -164,7 +164,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") elif each['type'] == "DVD": - s = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) + s = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) s.start() while s.is_alive() is True: await asyncio.sleep(1) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 11c2d6464..1e569110c 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -187,6 +187,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts discs = meta.get('discs', []) # noqa F841 filelist = meta.get('video', []) filename = meta['filename'] + path = meta['path'] if isinstance(filelist, str): filelist = [filelist] @@ -203,14 +204,15 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for i, file in enumerate(filelist): filename_pattern = f"{filename}*.png" - dvd_screens = (glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png")) + if meta['is_disc'] == "DVD": - existing_screens = dvd_screens + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") else: existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if len(existing_screens) < multi_screens: if meta.get('debug'): - console.print("[yellow]The image host of exsting images is not supported.") + console.print("[yellow]The image host of existing images is not supported.") console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") if meta['is_disc'] == "BDMV": @@ -222,12 +224,12 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts elif meta['is_disc'] == "DVD": s = multiprocessing.Process( target=prep.dvd_screenshots, - args=(meta, 0, None) + args=(meta, 0, None, True) ) else: s = multiprocessing.Process( target=prep.screenshots, - args=(file, f"{filename}", meta['uuid'], base_dir, + args=(path, f"{filename}", meta['uuid'], base_dir, meta, multi_screens + 1, True, None) ) @@ -235,41 +237,54 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts while s.is_alive(): await asyncio.sleep(1) - existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) all_screenshots.extend(existing_screens) - if all_screenshots: - while True: - current_img_host_key = f'img_host_{img_host_index}' - current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + if not all_screenshots: + console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") + return [], True, images_reuploaded + + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break - if not current_img_host: - console.print("[red]No more image hosts left to try.") - raise Exception("No valid image host found in the config.") + uploaded_images, _ = prep.upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) - if current_img_host not in approved_image_hosts: - console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") - retry_mode = True - images_reuploaded = True - img_host_index += 1 - continue - else: - meta['imghost'] = current_img_host - console.print(f"[green]Uploading to approved host '{current_img_host}'.") - break - uploaded_images, _ = prep.upload_screens(meta, multi_screens, img_host_index, 0, multi_screens, all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode) + if uploaded_images: + meta[new_images_key] = uploaded_images - if uploaded_images: - meta[new_images_key] = uploaded_images if meta['debug']: for image in uploaded_images: console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): console.print("[red]Unsupported image host detected, please use one of the approved image hosts") return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta[new_images_key], False, images_reuploaded # Return retry_mode and images_reuploaded + return meta[new_images_key], False, images_reuploaded async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 5d5307816..772633c1c 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -741,7 +741,7 @@ async def edit_desc(self, meta): meta[new_images_key] = [] new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") if not new_screens: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens)) + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) ds.start() while ds.is_alive() is True: await asyncio.sleep(1) From fa29e7cb169d77e18e5cdf9c8dba38c91814b216 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 7 Dec 2024 22:29:45 +1000 Subject: [PATCH 614/741] Change from cli_ui to simple print-input --- upload.py | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/upload.py b/upload.py index 4316e4fb7..8b6762e0e 100644 --- a/upload.py +++ b/upload.py @@ -820,25 +820,25 @@ def get_confirmation(meta): console.print("[bold red]DEBUG: True") console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") console.print() - cli_ui.info_section(cli_ui.yellow, "Database Info") - cli_ui.info(f"Title: {meta['title']} ({meta['year']})") + console.print("[bold yellow]Database Info[/bold yellow]") + console.print(f"[bold]Title:[/bold] {meta['title']} ({meta['year']})") console.print() - cli_ui.info(f"Overview: {meta['overview']}") + console.print(f"[bold]Overview:[/bold] {meta['overview']}") console.print() - cli_ui.info(f"Category: {meta['category']}") + console.print(f"[bold]Category:[/bold] {meta['category']}") if int(meta.get('tmdb', 0)) != 0: - cli_ui.info(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") + console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") if int(meta.get('imdb_id', '0')) != 0: - cli_ui.info(f"IMDB: https://www.imdb.com/title/tt{meta['imdb_id']}") + console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb_id']}") if int(meta.get('tvdb_id', '0')) != 0: - cli_ui.info(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") + console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") if int(meta.get('tvmaze_id', '0')) != 0: - cli_ui.info(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}") + console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") if int(meta.get('mal_id', 0)) != 0: - cli_ui.info(f"MAL : https://myanimelist.net/anime/{meta['mal_id']}") + console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") console.print() if int(meta.get('freeleech', '0')) != 0: - cli_ui.info(f"Freeleech: {meta['freeleech']}") + console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") if meta['tag'] == "": tag = "" else: @@ -848,14 +848,15 @@ def get_confirmation(meta): else: res = meta['resolution'] - cli_ui.info(f"{res} / {meta['type']}{tag}") + console.print(f"{res} / {meta['type']}{tag}") if meta.get('personalrelease', False) is True: - cli_ui.info("Personal Release!") + console.print("[bold green]Personal Release![/bold green]") console.print() if meta.get('unattended', False) is False: get_missing(meta) ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" # \a rings the bell - cli_ui.info(ring_the_bell) + if ring_the_bell: + console.print(ring_the_bell) # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' if meta.get('is disc', False) is True: @@ -863,18 +864,19 @@ def get_confirmation(meta): if meta.get('keep_folder'): if meta['isdir']: - cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") - kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) - if not kf_confirm: - cli_ui.info('Aborting...') + console.print("[bold yellow]Uploading with --keep-folder[/bold yellow]") + kf_confirm = input("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed? [y/N]: ").strip().lower() + if kf_confirm != 'y': + console.print("[bold red]Aborting...[/bold red]") exit() - cli_ui.info_section(cli_ui.yellow, "Is this correct?") - cli_ui.info(f"Name: {meta['name']}") - confirm = cli_ui.ask_yes_no("Correct?", default=False) + console.print("[bold yellow]Is this correct?[/bold yellow]") + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm_input = input("Correct? [y/N]: ").strip().lower() + confirm = confirm_input == 'y' else: - cli_ui.info(f"Name: {meta['name']}") + console.print(f"[bold]Name:[/bold] {meta['name']}") confirm = True return confirm From a516375c1e9ae1d5b3ca1a955c762bcbc3e384ad Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 10:39:53 +1000 Subject: [PATCH 615/741] BHD - imgbb = ibb.co Need some mapping to link urls with host --- requirements.txt | 1 + src/trackers/BHD.py | 38 +++++++++++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2797cdc55..d5de5ed5e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,3 +24,4 @@ click aiohttp Pillow tqdm +urllib3 diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 4c4ef1fbb..c9ae3d802 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -10,6 +10,7 @@ import bencodepy import glob import multiprocessing +from urllib.parse import urlparse from src.trackers.COMMON import COMMON from src.console import console @@ -37,13 +38,30 @@ async def upload(self, meta, disctype): await self.upload_with_retry(meta, common) async def upload_with_retry(self, meta, common, img_host_index=1): + url_host_mapping = { + "i.ibb.co": "imgbb", + } + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] images_reuploaded = False + normalized_approved_hosts = set(approved_image_hosts + list(url_host_mapping.keys())) # noqa F841 + for image in meta['image_list']: + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = url_host_mapping.get(hostname, hostname) + if meta['debug']: + if mapped_host in approved_image_hosts: + console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") + else: + console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") - if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): + if all( + url_host_mapping.get(urlparse(image['raw_url']).netloc, urlparse(image['raw_url']).netloc) in approved_image_hosts + for image in meta['image_list'] + ): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") image_list = meta['image_list'] - else: images_reuploaded = False while img_host_index <= len(approved_image_hosts): @@ -157,6 +175,10 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] + url_host_mapping = { + "i.ibb.co": "imgbb", + } + retry_mode = False images_reuploaded = False new_images_key = 'bhd_images_key' @@ -256,9 +278,15 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for image in uploaded_images: console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") - if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): - console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = url_host_mapping.get(hostname, hostname) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts return meta[new_images_key], False, images_reuploaded From acec0afeef5d54d9b1224c2445df9acd678737c1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 16:36:36 +1000 Subject: [PATCH 616/741] Add tvmaze and tvdb arguments See https://github.com/Audionut/Upload-Assistant/issues/179 --- src/args.py | 2 ++ src/prep.py | 49 +++++++++++++++++++++++++++++++------------------ 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/src/args.py b/src/args.py index bef5f8e9d..f51fdfd35 100644 --- a/src/args.py +++ b/src/args.py @@ -32,6 +32,8 @@ def parse(self, args, meta): parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') parser.add_argument('-imdb', '--imdb', nargs='*', required=False, help="IMDb ID", type=str) parser.add_argument('-mal', '--mal', nargs='*', required=False, help="MAL ID", type=str) + parser.add_argument('-tvmaze', '--tvmaze', nargs='*', required=False, help="TVMAZE ID", type=str, dest='tvmaze_manual') + parser.add_argument('-tvdb', '--tvdb', nargs='*', required=False, help="TVDB ID", type=str, dest='tvdb_manual') parser.add_argument('-g', '--tag', nargs='*', required=False, help="Group Tag", type=str) parser.add_argument('-serv', '--service', nargs='*', required=False, help="Streaming Service", type=str) parser.add_argument('-dist', '--distributor', nargs='*', required=False, help="Disc Distributor e.g.(Criterion, BFI, etc.)", type=str) diff --git a/src/prep.py b/src/prep.py index f3f86c230..794b0b45c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1849,10 +1849,13 @@ async def tmdb_other_meta(self, meta): meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) else: meta['imdb_id'] = str(meta['imdb']).replace('tt', '').zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' try: videos = movie.videos() for each in videos.get('results', []): @@ -1899,10 +1902,13 @@ async def tmdb_other_meta(self, meta): meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) else: meta['imdb_id'] = str(int(meta['imdb'].replace('tt', ''))).zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' try: videos = tv.videos() for each in videos.get('results', []): @@ -4021,7 +4027,7 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): if year not in (None, ''): print(f"Filtering results by year: {year}") - results = [show for show in results if show.get('premiered', '').startswith(str(year))] + results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] seen = set() unique_results = [] @@ -4035,7 +4041,16 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): print("No results found.") return tvmazeID, imdbID, tvdbID - if meta['manual_date'] is not None: + if meta.get('tvmaze_manual'): + tvmaze_manual_id = int(meta['tvmaze_manual']) + selected_show = next((show for show in results if show['id'] == tvmaze_manual_id), None) + if selected_show: + tvmazeID = selected_show['id'] + if meta['debug']: + print(f"Selected manual show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + else: + print(f"Manual TVmaze ID {tvmaze_manual_id} not found in results.") + elif meta['manual_date'] is not None: print("Search results:") for idx, show in enumerate(results): console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") @@ -4050,7 +4065,7 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): break if 1 <= choice <= len(results): selected_show = results[choice - 1] - tvmazeID = selected_show.get('id') + tvmazeID = selected_show['id'] print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") break else: @@ -4058,13 +4073,11 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): except ValueError: print("Invalid input. Please enter a number.") else: - if results: - selected_show = results[0] - tvmazeID = selected_show.get('id') - if meta['debug']: - print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") - else: - print("No results to select from. Skipping.") + selected_show = results[0] + tvmazeID = selected_show['id'] + if meta['debug']: + print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + if meta['debug']: print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") return tvmazeID, imdbID, tvdbID From 118d5c430a20596ae941362af332d5ba0e79e434 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 17:03:18 +1000 Subject: [PATCH 617/741] BHD - map all image hosts --- src/trackers/BHD.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index c9ae3d802..d81505aab 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -40,6 +40,9 @@ async def upload(self, meta, disctype): async def upload_with_retry(self, meta, common, img_host_index=1): url_host_mapping = { "i.ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", } approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] @@ -177,6 +180,9 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts url_host_mapping = { "i.ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", } retry_mode = False From 391c205cc178d08a5b3cd65bdb9ddc2c9f23eda8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 17:10:52 +1000 Subject: [PATCH 618/741] BHD - correct host mapping --- src/trackers/BHD.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d81505aab..a81387191 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -41,8 +41,8 @@ async def upload_with_retry(self, meta, common, img_host_index=1): url_host_mapping = { "i.ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "pixhost.to": "pixhost", - "imgbox.com": "imgbox", + "img100.pixhost.to": "pixhost", + "images2.imgbox.com": "imgbox", } approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] @@ -181,8 +181,8 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts url_host_mapping = { "i.ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "pixhost.to": "pixhost", - "imgbox.com": "imgbox", + "img100.pixhost.to": "pixhost", + "images2.imgbox.com": "imgbox", } retry_mode = False From adc9114c913c2efe3f32ab11c22a9f880968fa3e Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 17:13:02 +1000 Subject: [PATCH 619/741] Return, don't exception --- src/trackers/BHD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index a81387191..1e3d5a8d7 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -259,7 +259,7 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if not current_img_host: console.print("[red]No more image hosts left to try.") - raise Exception("No valid image host found in the config.") + return if current_img_host not in approved_image_hosts: console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") From c56e3baad26cbfc0753b07c166883efa945bb6be Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 8 Dec 2024 18:35:35 +1000 Subject: [PATCH 620/741] DVD - select longest vob for screens --- src/prep.py | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/src/prep.py b/src/prep.py index 794b0b45c..d6fc429d4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1380,7 +1380,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): def _is_vob_good(n, loops, num_screens): max_loops = 6 fallback_duration = 300 - voblength = fallback_duration + valid_tracks = [] while loops < max_loops: try: @@ -1389,36 +1389,30 @@ def _is_vob_good(n, loops, num_screens): output='JSON' ) vob_mi = json.loads(vob_mi) - if meta['debug']: - console.print("[yellow]Analyzing VOB file:[/yellow]", main_set[n]) for track in vob_mi.get('media', {}).get('track', []): - duration = track.get('Duration') + duration = float(track.get('Duration', 0)) width = track.get('Width') height = track.get('Height') - if meta['debug']: - console.print(f"Track {n}: Duration={duration}, Width={width}, Height={height}") - if duration and width and height: - if float(width) > 0 and float(height) > 0: - voblength = float(duration) - if meta['debug']: - console.print(f"[green]Valid track found: voblength={voblength}, n={n}[/green]") - return voblength, n + if duration > 1 and width and height: # Minimum 1-second track + valid_tracks.append({ + 'duration': duration, + 'track_index': n + }) + + if valid_tracks: + # Sort by duration, take longest track + longest_track = max(valid_tracks, key=lambda x: x['duration']) + return longest_track['duration'], longest_track['track_index'] except Exception as e: console.print(f"[red]Error parsing VOB {n}: {e}") n = (n + 1) % len(main_set) - if n >= num_screens: - n -= num_screens loops += 1 - if meta['debug']: - console.print(f"[yellow]Retrying: loops={loops}, current voblength={voblength}[/yellow]") - if meta['debug']: - console.print(f"[red]Fallback triggered: returning fallback_duration={fallback_duration}[/red]") - return fallback_duration, n + return fallback_duration, 0 main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") From 8a26ca834839b39aa4e737be35d6b0d8d7996dc6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 10 Dec 2024 13:56:00 +1000 Subject: [PATCH 621/741] Put tvmaze console behind debug --- src/prep.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/prep.py b/src/prep.py index d6fc429d4..4b7b62292 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1397,7 +1397,7 @@ def _is_vob_good(n, loops, num_screens): if duration > 1 and width and height: # Minimum 1-second track valid_tracks.append({ - 'duration': duration, + 'duration': duration, 'track_index': n }) @@ -3999,20 +3999,24 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): if imdbID is None: imdbID = '0' - print(f"Processed inputs - imdbID: {imdbID}, tvdbID: {tvdbID}") + if meta['debug']: + print(f"Processed inputs - imdbID: {imdbID}, tvdbID: {tvdbID}") if int(tvdbID) != 0: - print(f"Searching TVmaze with TVDB ID: {tvdbID}") - tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}) + if meta['debug']: + print(f"Searching TVmaze with TVDB ID: {tvdbID}") + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) if tvdb_resp: results.append(tvdb_resp) if int(imdbID) != 0: - print(f"Searching TVmaze with IMDb ID: {imdbID}") - imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}) + if meta['debug']: + print(f"Searching TVmaze with IMDb ID: {imdbID}") + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) if imdb_resp: results.append(imdb_resp) - print(f"Searching TVmaze with filename: {filename}") - search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}) + if meta['debug']: + print(f"Searching TVmaze with filename: {filename}") + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) if search_resp: if isinstance(search_resp, list): results.extend([each['show'] for each in search_resp if 'show' in each]) @@ -4020,7 +4024,8 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): results.append(search_resp) if year not in (None, ''): - print(f"Filtering results by year: {year}") + if meta['debug']: + print(f"Filtering results by year: {year}") results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] seen = set() @@ -4032,7 +4037,8 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): results = unique_results if not results: - print("No results found.") + if meta['debug']: + print("No results found.") return tvmazeID, imdbID, tvdbID if meta.get('tvmaze_manual'): @@ -4040,8 +4046,7 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): selected_show = next((show for show in results if show['id'] == tvmaze_manual_id), None) if selected_show: tvmazeID = selected_show['id'] - if meta['debug']: - print(f"Selected manual show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + print(f"Selected manual show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") else: print(f"Manual TVmaze ID {tvmaze_manual_id} not found in results.") elif meta['manual_date'] is not None: @@ -4076,8 +4081,9 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") return tvmazeID, imdbID, tvdbID - def _make_tvmaze_request(self, url, params): - print(f"Requesting TVmaze API: {url} with params: {params}") + def _make_tvmaze_request(self, url, params, meta): + if meta['debug']: + print(f"Requesting TVmaze API: {url} with params: {params}") try: resp = requests.get(url, params=params) if resp.ok: From 8fb059a6150ac6b3e67de13134b86606edf885fc Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 10 Dec 2024 14:54:47 +1000 Subject: [PATCH 622/741] Try detect TTY and use minimal progress bars Only file based screens for now, waiting for feedback. Might fix https://github.com/Audionut/Upload-Assistant/issues/184 --- src/prep.py | 132 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 81 insertions(+), 51 deletions(-) diff --git a/src/prep.py b/src/prep.py index 4b7b62292..2842a4cf6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1477,6 +1477,10 @@ def capture_dvd_screenshot(self, task): return None def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): + def use_tqdm(): + """Check if the environment supports TTY (interactive progress bar).""" + return sys.stdout.isatty() + if meta['debug']: start_time = time.time() if 'image_list' not in meta: @@ -1493,33 +1497,33 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non if num_screens <= 0: return - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: - mi = json.load(f) - video_track = mi['media']['track'][1] - length = video_track.get('Duration', mi['media']['track'][0]['Duration']) - width = float(video_track.get('Width')) - height = float(video_track.get('Height')) - par = float(video_track.get('PixelAspectRatio', 1)) - dar = float(video_track.get('DisplayAspectRatio')) - frame_rate = float(video_track.get('FrameRate', 24.0)) - - if par == 1: - sar = w_sar = h_sar = 1 - elif par < 1: - new_height = dar * height - sar = width / new_height - w_sar = 1 - h_sar = sar - else: - sar = w_sar = par - h_sar = 1 - length = round(float(length)) - - if meta.get('ffdebug', False): - loglevel = 'verbose' - else: - loglevel = 'quiet' + try: + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: + mi = json.load(f) + video_track = mi['media']['track'][1] + length = video_track.get('Duration', mi['media']['track'][0]['Duration']) + width = float(video_track.get('Width')) + height = float(video_track.get('Height')) + par = float(video_track.get('PixelAspectRatio', 1)) + dar = float(video_track.get('DisplayAspectRatio')) + frame_rate = float(video_track.get('FrameRate', 24.0)) + + if par == 1: + sar = w_sar = h_sar = 1 + elif par < 1: + new_height = dar * height + sar = width / new_height + w_sar = 1 + h_sar = sar + else: + sar = w_sar = par + h_sar = 1 + length = round(float(length)) + except (FileNotFoundError, KeyError, ValueError) as e: + console.print(f"[red]Error processing MediaInfo.json: {e}") + return + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' os.chdir(f"{base_dir}/tmp/{folder_id}") if manual_frames: @@ -1546,11 +1550,18 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non if not capture_tasks: console.print("[yellow]All screenshots already exist. Skipping capture process.") else: - with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - for result in tqdm(pool.imap_unordered(self.capture_screenshot, capture_tasks), - total=len(capture_tasks), - desc="Capturing Screenshots"): - capture_results.append(result) + if use_tqdm(): + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + with Pool(processes=min(len(capture_tasks), task_limit)) as pool: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + capture_results.append(result) + pbar.update(1) + else: + console.print("[blue]Non-TTY environment detected. Progress bar disabled.") + with Pool(processes=min(len(capture_tasks), task_limit)) as pool: + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): + capture_results.append(result) + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") if capture_results and len(capture_results) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1559,17 +1570,8 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non os.remove(smallest) capture_results.remove(smallest) - optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] - optimize_results = [] - if optimize_tasks: - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - for result in tqdm(pool.imap_unordered(self.optimize_image_task, optimize_tasks), - total=len(optimize_tasks), - desc="Optimizing Images"): - optimize_results.append(result) - valid_results = [] - for image_path in optimize_results: + for image_path in capture_results: if "Error" in image_path: console.print(f"[red]{image_path}") continue @@ -1601,7 +1603,22 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non else: valid_results.append(image_path) - for image_path in valid_results: + optimize_tasks = [(result, self.config) for result in valid_results if "Error" not in result] + optimize_results = [] + if optimize_tasks: + if use_tqdm(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): + optimize_results.append(result) + pbar.update(1) + else: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): + optimize_results.append(result) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") + + for image_path in optimize_results: img_dict = { 'img_url': image_path, 'raw_url': image_path, @@ -1609,8 +1626,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non } meta['image_list'].append(img_dict) - valid_results_count = len(valid_results) - console.print(f"[green]Successfully captured {valid_results_count} screenshots.") + console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") if meta['debug']: finish_time = time.time() @@ -3039,8 +3055,13 @@ def upload_image_task(self, args): } def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): + def use_tqdm(): + """Check if the environment supports TTY (interactive progress bar).""" + return sys.stdout.isatty() + if meta['debug']: upload_start_time = time.time() + import nest_asyncio nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") @@ -3078,7 +3099,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[:images_needed]] host_limits = { - "oeimg": 1, + "oeimg": 6, "ptscreens": 1, "lensdump": 1, } @@ -3087,13 +3108,22 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i try: with Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: - results = list( - tqdm( - pool.imap_unordered(self.upload_image_task, upload_tasks), - total=len(upload_tasks), - desc=f"Uploading Images to {img_host}" + if use_tqdm(): + results = list( + tqdm( + pool.imap_unordered(self.upload_image_task, upload_tasks), + total=len(upload_tasks), + desc=f"Uploading Images to {img_host}", + ascii=True, + dynamic_ncols=False + ) ) - ) + else: + console.print(f"[blue]Non-TTY environment detected. Progress bar disabled. Uploading images to {img_host}.") + results = [] + for i, result in enumerate(pool.imap_unordered(self.upload_image_task, upload_tasks), 1): + results.append(result) + console.print(f"Uploaded {i}/{len(upload_tasks)} images to {img_host}") except KeyboardInterrupt: console.print("[red]Upload process interrupted by user. Exiting...") pool.terminate() From da68274922807e49ae64a20dee59c0c7fbe0128e Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 10 Dec 2024 17:05:18 +1000 Subject: [PATCH 623/741] UNIT3D set ID to none if 0 The required string conversion for IMDB ID because unit3d has dorky imdb handling, means this can return a string of 0's instead of None, which set the IMDB meta as a string of 0's. fixes: https://github.com/Audionut/Upload-Assistant/issues/185 --- src/trackers/COMMON.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 94c032785..272a2ed55 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -479,6 +479,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N mal = attributes.get('mal_id') imdb = attributes.get('imdb_id') infohash = attributes.get('info_hash') + tmdb = None if tmdb == 0 else tmdb + tvdb = None if tvdb == 0 else tvdb + mal = None if mal == 0 else mal + imdb = None if imdb == 0 else imdb else: # Handle response when searching by ID if id and not data: @@ -492,7 +496,10 @@ async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=N mal = attributes.get('mal_id') imdb = attributes.get('imdb_id') infohash = attributes.get('info_hash') - + tmdb = None if tmdb == 0 else tmdb + tvdb = None if tvdb == 0 else tvdb + mal = None if mal == 0 else mal + imdb = None if imdb == 0 else imdb # Handle file name extraction files = attributes.get('files', []) if files: From 31026a691c3e7decad0fbe5027c75c5beb449c7d Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 10 Dec 2024 18:50:51 +1000 Subject: [PATCH 624/741] fix capture+1 and validate after optimize --- src/prep.py | 87 +++++++++++++++++++++++++++++------------------------ 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/src/prep.py b/src/prep.py index 2842a4cf6..27cf4f0e4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1540,38 +1540,62 @@ def use_tqdm(): capture_results = [] task_limit = int(meta.get('task_limit', os.cpu_count())) - for i in range(num_screens + 1): + existing_images = 0 + for i in range(num_screens): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image_path) or meta.get('retake', False): - capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) - elif meta['debug']: - console.print(f"[yellow]Skipping existing screenshot: {image_path}") + if os.path.exists(image_path) and not meta.get('retake', False): + existing_images += 1 - if not capture_tasks: - console.print("[yellow]All screenshots already exist. Skipping capture process.") + if existing_images == num_screens and not meta.get('retake', False): + console.print("[yellow]The correct number of screenshots already exists. Skipping capture process.") else: - if use_tqdm(): - with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + for i in range(num_screens + 1): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or meta.get('retake', False): + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) + elif meta['debug']: + console.print(f"[yellow]Skipping existing screenshot: {image_path}") + + if not capture_tasks: + console.print("[yellow]All screenshots already exist. Skipping capture process.") + else: + if use_tqdm(): + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + with Pool(processes=min(len(capture_tasks), task_limit)) as pool: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + capture_results.append(result) + pbar.update(1) + else: + console.print("[blue]Non-TTY environment detected. Progress bar disabled.") with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): capture_results.append(result) + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + + if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) + + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + optimize_results = [] + if optimize_tasks: + if use_tqdm(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): + optimize_results.append(result) pbar.update(1) else: - console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): - capture_results.append(result) - console.print(f"Processed {i}/{len(capture_tasks)} screenshots") - - if capture_results and len(capture_results) > num_screens and not force_screenshots: - smallest = min(capture_results, key=os.path.getsize) - if meta['debug']: - console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") - os.remove(smallest) - capture_results.remove(smallest) + with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): + optimize_results.append(result) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") valid_results = [] - for image_path in capture_results: + for image_path in optimize_results: if "Error" in image_path: console.print(f"[red]{image_path}") continue @@ -1603,22 +1627,7 @@ def use_tqdm(): else: valid_results.append(image_path) - optimize_tasks = [(result, self.config) for result in valid_results if "Error" not in result] - optimize_results = [] - if optimize_tasks: - if use_tqdm(): - with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): - optimize_results.append(result) - pbar.update(1) - else: - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): - optimize_results.append(result) - console.print(f"Optimized {i}/{len(optimize_tasks)} images") - - for image_path in optimize_results: + for image_path in valid_results: img_dict = { 'img_url': image_path, 'raw_url': image_path, From 628a4b99e3103b903b9e200fb3e2b83b5bfe0e68 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 10 Dec 2024 19:16:17 +1000 Subject: [PATCH 625/741] Fix exception when vobset track is missing --- src/discparse.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/src/discparse.py b/src/discparse.py index b4e6b246e..af0826037 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -213,10 +213,8 @@ async def get_dvdinfo(self, discs): os.chdir(path) files = glob("VTS_*.VOB") files.sort() - # Switch to ordered dictionary filesdict = OrderedDict() main_set = [] - # Use ordered dictionary in place of list of lists for file in files: trimmed = file[4:] if trimmed[:2] not in filesdict: @@ -224,16 +222,32 @@ async def get_dvdinfo(self, discs): filesdict[trimmed[:2]].append(trimmed) main_set_duration = 0 for vob_set in filesdict.values(): - # Parse media info for this VOB set - vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') - vob_set_mi = json.loads(vob_set_mi) - vob_set_duration = vob_set_mi['media']['track'][1]['Duration'] + try: + vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') + vob_set_mi = json.loads(vob_set_mi) + tracks = vob_set_mi.get('media', {}).get('track', []) + if len(tracks) > 1: + vob_set_duration = tracks[1].get('Duration', "Unknown") + else: + console.print("Warning: Expected track[1] is missing.") + vob_set_duration = "Unknown" + + except Exception as e: + console.print(f"Error processing VOB set: {e}") + vob_set_duration = "Unknown" - # If the duration of the new vob set > main set by more than 10% then it's our new main set + if vob_set_duration == "Unknown" or not vob_set_duration.replace('.', '', 1).isdigit(): + console.print(f"Skipping VOB set due to invalid duration: {vob_set_duration}") + continue + + vob_set_duration_float = float(vob_set_duration) + + # If the duration of the new vob set > main set by more than 10%, it's the new main set # This should make it so TV shows pick the first episode - if (float(vob_set_duration) * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: + if (vob_set_duration_float * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: main_set = vob_set - main_set_duration = vob_set_duration + main_set_duration = vob_set_duration_float + each['main_set'] = main_set set = main_set[0][:2] each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" From d37a858d35cb0642e6b974b449ca2c71058eef5f Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 10:25:08 +1000 Subject: [PATCH 626/741] try close and join --- src/prep.py | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/src/prep.py b/src/prep.py index 27cf4f0e4..3e2a3cc07 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1562,15 +1562,23 @@ def use_tqdm(): if use_tqdm(): with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): - capture_results.append(result) - pbar.update(1) + try: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + capture_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): - capture_results.append(result) - console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + try: + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): + capture_results.append(result) + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + finally: + pool.close() + pool.join() if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1585,14 +1593,22 @@ def use_tqdm(): if use_tqdm(): with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): - optimize_results.append(result) - pbar.update(1) + try: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): + optimize_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() else: with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): - optimize_results.append(result) - console.print(f"Optimized {i}/{len(optimize_tasks)} images") + try: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): + optimize_results.append(result) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") + finally: + pool.close() + pool.join() valid_results = [] for image_path in optimize_results: From 057a99fea30ff0e6953f52fb19e0b9e551d77794 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 10:31:47 +1000 Subject: [PATCH 627/741] add get-context --- src/prep.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3e2a3cc07..63972e621 100644 --- a/src/prep.py +++ b/src/prep.py @@ -18,6 +18,7 @@ from src.discparse import DiscParse import multiprocessing from multiprocessing import Pool + from multiprocessing import get_context from tqdm import tqdm import os import re @@ -1561,7 +1562,7 @@ def use_tqdm(): else: if use_tqdm(): with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: - with Pool(processes=min(len(capture_tasks), task_limit)) as pool: + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: try: for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): capture_results.append(result) @@ -1571,7 +1572,7 @@ def use_tqdm(): pool.join() else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with Pool(processes=min(len(capture_tasks), task_limit)) as pool: + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: try: for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): capture_results.append(result) @@ -1592,7 +1593,7 @@ def use_tqdm(): if optimize_tasks: if use_tqdm(): with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: try: for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): optimize_results.append(result) @@ -1601,7 +1602,7 @@ def use_tqdm(): pool.close() pool.join() else: - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: try: for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): optimize_results.append(result) From c71eb231bc48bc786847ce09a86108d360ae21dc Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 10:39:57 +1000 Subject: [PATCH 628/741] try ProcessPoolExecutor --- src/prep.py | 57 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/src/prep.py b/src/prep.py index 63972e621..f44a8a37b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -18,7 +18,7 @@ from src.discparse import DiscParse import multiprocessing from multiprocessing import Pool - from multiprocessing import get_context + from concurrent.futures import ProcessPoolExecutor, as_completed from tqdm import tqdm import os import re @@ -1562,24 +1562,27 @@ def use_tqdm(): else: if use_tqdm(): with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: + futures = {executor.submit(self.capture_screenshot, *task): task for task in capture_tasks} + for future in as_completed(futures): + try: + result = future.result() capture_results.append(result) + except Exception as e: + console.print(f"[red]Error during screenshot capture: {e}") + finally: pbar.update(1) - finally: - pool.close() - pool.join() else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): + with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: + futures = {executor.submit(self.capture_screenshot, *task): task for task in capture_tasks} + for i, future in enumerate(as_completed(futures), 1): + try: + result = future.result() capture_results.append(result) console.print(f"Processed {i}/{len(capture_tasks)} screenshots") - finally: - pool.close() - pool.join() + except Exception as e: + console.print(f"[red]Error during screenshot capture: {e}") if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1588,28 +1591,32 @@ def use_tqdm(): os.remove(smallest) capture_results.remove(smallest) + # Optimization process optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] optimize_results = [] if optimize_tasks: if use_tqdm(): with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): + with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: + futures = {executor.submit(self.optimize_image_task, *task): task for task in optimize_tasks} + for future in as_completed(futures): + try: + result = future.result() optimize_results.append(result) + except Exception as e: + console.print(f"[red]Error during image optimization: {e}") + finally: pbar.update(1) - finally: - pool.close() - pool.join() else: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): + with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: + futures = {executor.submit(self.optimize_image_task, *task): task for task in optimize_tasks} + for i, future in enumerate(as_completed(futures), 1): + try: + result = future.result() optimize_results.append(result) console.print(f"Optimized {i}/{len(optimize_tasks)} images") - finally: - pool.close() - pool.join() + except Exception as e: + console.print(f"[red]Error during image optimization: {e}") valid_results = [] for image_path in optimize_results: From 23ccf8cc4685a310689ebfaf8df4fb4fdc587da0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 10:53:41 +1000 Subject: [PATCH 629/741] Revert "try ProcessPoolExecutor" This reverts commit c71eb231bc48bc786847ce09a86108d360ae21dc. --- src/prep.py | 57 +++++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/src/prep.py b/src/prep.py index f44a8a37b..63972e621 100644 --- a/src/prep.py +++ b/src/prep.py @@ -18,7 +18,7 @@ from src.discparse import DiscParse import multiprocessing from multiprocessing import Pool - from concurrent.futures import ProcessPoolExecutor, as_completed + from multiprocessing import get_context from tqdm import tqdm import os import re @@ -1562,27 +1562,24 @@ def use_tqdm(): else: if use_tqdm(): with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: - with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: - futures = {executor.submit(self.capture_screenshot, *task): task for task in capture_tasks} - for future in as_completed(futures): - try: - result = future.result() + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): capture_results.append(result) - except Exception as e: - console.print(f"[red]Error during screenshot capture: {e}") - finally: pbar.update(1) + finally: + pool.close() + pool.join() else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: - futures = {executor.submit(self.capture_screenshot, *task): task for task in capture_tasks} - for i, future in enumerate(as_completed(futures), 1): - try: - result = future.result() + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): capture_results.append(result) console.print(f"Processed {i}/{len(capture_tasks)} screenshots") - except Exception as e: - console.print(f"[red]Error during screenshot capture: {e}") + finally: + pool.close() + pool.join() if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1591,32 +1588,28 @@ def use_tqdm(): os.remove(smallest) capture_results.remove(smallest) - # Optimization process optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] optimize_results = [] if optimize_tasks: if use_tqdm(): with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: - futures = {executor.submit(self.optimize_image_task, *task): task for task in optimize_tasks} - for future in as_completed(futures): - try: - result = future.result() + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): optimize_results.append(result) - except Exception as e: - console.print(f"[red]Error during image optimization: {e}") - finally: pbar.update(1) + finally: + pool.close() + pool.join() else: - with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: - futures = {executor.submit(self.optimize_image_task, *task): task for task in optimize_tasks} - for i, future in enumerate(as_completed(futures), 1): - try: - result = future.result() + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): optimize_results.append(result) console.print(f"Optimized {i}/{len(optimize_tasks)} images") - except Exception as e: - console.print(f"[red]Error during image optimization: {e}") + finally: + pool.close() + pool.join() valid_results = [] for image_path in optimize_results: From 1a9425c424d7cbb3980289b15ee7326671393cfe Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 13:12:59 +1000 Subject: [PATCH 630/741] Dupe checking encoder matching Files might have the same resolution, flagged as potential dupe, but then pass dupe checking because encoder does not match, even if it's not in the filename, rather than an actual mismatch. This change requires encoder to be an actual match to be flagged as a potential dupe. Meaning files without encoder in their name bypass this check (they want get erroneously get flagged as not a dupe), getting caught by other checks only, such as resolution. fixes https://github.com/Audionut/Upload-Assistant/issues/189 --- src/trackers/COMMON.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 272a2ed55..44f0156a8 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -737,9 +737,9 @@ def process_exclusion(each): log_exclusion("season/episode mismatch", each) return True - if normalized_encoder and normalized_encoder not in normalized: + if normalized_encoder and normalized_encoder in normalized: log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) - return True + return False console.log(f"[debug] Passed all checks: {each}") return False From ecf8c2088922e90d689572ff769762d57ecf2eec Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 14:35:36 +1000 Subject: [PATCH 631/741] Cleanup BDMV capture tasks --- src/prep.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index 27cf4f0e4..6d389b5b5 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1227,12 +1227,17 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, os.chdir(f"{base_dir}/tmp/{folder_id}") existing_screens = glob.glob(f"{sanitized_filename}-*.png") - if len(existing_screens) >= num_screens: - console.print('[bold green]Reusing screenshots') + total_existing = len(existing_screens) + len(existing_images) + num_screens = max(0, self.screens - total_existing) + + if num_screens == 0: + console.print('[bold green]Reusing existing screenshots. No additional screenshots needed.') return - console.print("[bold yellow]Saving Screens...") + if meta['debug']: + console.print(f"[bold yellow]Saving Screens... Total needed: {self.screens}, Existing: {total_existing}, To capture: {num_screens}") capture_results = [] + capture_tasks = [] task_limit = int(meta.get('task_limit', os.cpu_count())) if use_vs: @@ -1245,11 +1250,12 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, loglevel = 'quiet' ss_times = self.valid_ss_time([], num_screens + 1, length) + existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} capture_tasks = [ ( file, ss_times[i], - os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{i}.png"), + os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{len(existing_indices) + i}.png"), keyframe, loglevel ) @@ -1265,13 +1271,13 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, ) ) if capture_results: - if len(capture_results) > num_screens: + if len(capture_tasks) > num_screens: smallest = min(capture_results, key=os.path.getsize) if meta['debug']: console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") os.remove(smallest) capture_results.remove(smallest) - + optimized_results = [] optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: optimized_results = list( @@ -1316,7 +1322,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, } meta['image_list'].append(img_dict) - console.print(f"[green]Successfully captured {len(meta['image_list'])} screenshots.") + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") def capture_disc_task(self, task): file, ss_time, image_path, keyframe, loglevel = task From 12b05bd56ed8c651ed03204a1305ff1808f451e2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 18:54:14 +1000 Subject: [PATCH 632/741] Push changes to other screen functions --- src/prep.py | 179 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 138 insertions(+), 41 deletions(-) diff --git a/src/prep.py b/src/prep.py index 18b8535ae..01f52bf83 100644 --- a/src/prep.py +++ b/src/prep.py @@ -17,7 +17,6 @@ import traceback from src.discparse import DiscParse import multiprocessing - from multiprocessing import Pool from multiprocessing import get_context from tqdm import tqdm import os @@ -147,7 +146,10 @@ async def check_and_collect(image_dict): image = Image.open(BytesIO(image_content)) vertical_resolution = image.height lower_bound = expected_vertical_resolution * 0.70 # 30% below - upper_bound = expected_vertical_resolution * 1.00 + if meta['is_disc'] == "DVD": + upper_bound = expected_vertical_resolution * 1.30 + else: + upper_bound = expected_vertical_resolution * 1.00 if not (lower_bound <= vertical_resolution <= upper_bound): console.print( @@ -1263,14 +1265,21 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, for i in range(num_screens + 1) ] - with Pool(processes=min(len(capture_tasks), task_limit)) as pool: - capture_results = list( - tqdm( - pool.imap_unordered(self.capture_disc_task, capture_tasks), - total=len(capture_tasks), - desc="Capturing Screenshots" + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + capture_results = list( + tqdm( + pool.imap_unordered(self.capture_disc_task, capture_tasks), + total=len(capture_tasks), + desc="Capturing Screenshots", + ascii=True, + dynamic_ncols=False + ) ) - ) + finally: + pool.close() + pool.join() + if capture_results: if len(capture_tasks) > num_screens: smallest = min(capture_results, key=os.path.getsize) @@ -1280,14 +1289,20 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, capture_results.remove(smallest) optimized_results = [] optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - optimized_results = list( - tqdm( - pool.imap_unordered(self.optimize_image_task, optimize_tasks), - total=len(optimize_tasks), - desc="Optimizing Images" + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + optimized_results = list( + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images", + ascii=True, + dynamic_ncols=False + ) ) - ) + finally: + pool.close() + pool.join() valid_results = [] for image_path in optimized_results: @@ -1432,8 +1447,12 @@ def _is_vob_good(n, loops, num_screens): input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) - with Pool(processes=min(num_screens + 1, task_limit)) as pool: - results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots")) + with get_context("spawn").Pool(processes=min(num_screens + 1, task_limit)) as pool: + try: + results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False)) + finally: + pool.close() + pool.join() if len(glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*")) > num_screens: smallest = None @@ -1456,31 +1475,105 @@ def _is_vob_good(n, loops, num_screens): optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] - with Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - optimize_results = list( # noqa F841 - tqdm( - pool.imap_unordered(self.optimize_image_task, optimize_tasks), - total=len(optimize_tasks), - desc="Optimizing Images" + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + optimize_results = list( # noqa F841 + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images", + ascii=True, + dynamic_ncols=False + ) ) - ) + finally: + pool.close() + pool.join() + + valid_results = [] + retry_attempts = 3 + + for image in optimize_results: + if "Error" in image: + console.print(f"[red]{image}") + continue + + retry_cap = False + image_size = os.path.getsize(image) + if image_size <= 120000: + console.print(f"[yellow]Image {image} is incredibly small, retaking.") + retry_cap = True + time.sleep(1) + + if retry_cap: + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image) + except Exception as e: + console.print(f"[red]Failed to delete {image}: {e}[/red]") + break + + image_index = int(image.rsplit('-', 1)[-1].split('.')[0]) + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[image_index % len(main_set)]}" + adjusted_time = random.uniform(0, voblength) + + try: + self.capture_dvd_screenshot((input_file, image, adjusted_time, meta, width, height, w_sar, h_sar)) + retaken_size = os.path.getsize(image) + + if retaken_size > 75000: + console.print(f"[green]Successfully retaken screenshot for: {image} ({retaken_size} bytes)[/green]") + valid_results.append(image) + break + else: + console.print(f"[red]Retaken image {image} is still too small. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {adjusted_time}: {e}[/red]") + + else: + console.print(f"[red]All retry attempts failed for {image}. Skipping.[/red]") + else: + valid_results.append(image) - valid_results_count = len([r for r in results if r]) - console.print(f"[green]Successfully captured {valid_results_count - 1} screenshots.") + for image in valid_results: + img_dict = { + 'img_url': image, + 'raw_url': image, + 'web_url': image + } + meta['image_list'].append(img_dict) + + console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") def capture_dvd_screenshot(self, task): input_file, image, seek_time, meta, width, height, w_sar, h_sar = task + if os.path.exists(image): + console.print(f"[green]Screenshot already exists: {image}[/green]") return image + try: loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + media_info = MediaInfo.parse(input_file) + video_duration = next((track.duration for track in media_info.tracks if track.track_type == "Video"), None) + + if video_duration and seek_time > video_duration: + seek_time = max(0, video_duration - 1) + ff = ffmpeg.input(input_file, ss=seek_time) if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel).run() - return image if os.path.exists(image) else None - except Exception as e: - console.print(f"[red]Error capturing screenshot for {input_file}: {str(e)}") + + ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() + if os.path.exists(image): + return image + else: + console.print(f"[red]Screenshot creation failed for {image}[/red]") + return None + + except ffmpeg.Error as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e.stderr.decode()}[/red]") return None def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): @@ -1658,7 +1751,7 @@ def use_tqdm(): } meta['image_list'].append(img_dict) - console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") if meta['debug']: finish_time = time.time() @@ -3139,17 +3232,21 @@ def use_tqdm(): pool_size = host_limits.get(img_host, default_pool_size) try: - with Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: + with get_context("spawn").Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: if use_tqdm(): - results = list( - tqdm( - pool.imap_unordered(self.upload_image_task, upload_tasks), - total=len(upload_tasks), - desc=f"Uploading Images to {img_host}", - ascii=True, - dynamic_ncols=False + try: + results = list( + tqdm( + pool.imap_unordered(self.upload_image_task, upload_tasks), + total=len(upload_tasks), + desc=f"Uploading Images to {img_host}", + ascii=True, + dynamic_ncols=False + ) ) - ) + finally: + pool.close() + pool.join() else: console.print(f"[blue]Non-TTY environment detected. Progress bar disabled. Uploading images to {img_host}.") results = [] From 8ab1ac834ecf4ec8621bab95f79dca132da76500 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 18:58:44 +1000 Subject: [PATCH 633/741] Remove useless console --- src/prep.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/prep.py b/src/prep.py index 01f52bf83..e66125328 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4144,9 +4144,6 @@ async def imdb_other_meta(self, meta): return meta async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): - if meta['debug']: - print(f"Starting search_tvmaze with filename: {filename}, year: {year}, imdbID: {imdbID}, tvdbID: {tvdbID}") - try: tvdbID = int(tvdbID) if tvdbID is not None else 0 except ValueError: @@ -4158,23 +4155,15 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): if imdbID is None: imdbID = '0' - if meta['debug']: - print(f"Processed inputs - imdbID: {imdbID}, tvdbID: {tvdbID}") if int(tvdbID) != 0: - if meta['debug']: - print(f"Searching TVmaze with TVDB ID: {tvdbID}") tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) if tvdb_resp: results.append(tvdb_resp) if int(imdbID) != 0: - if meta['debug']: - print(f"Searching TVmaze with IMDb ID: {imdbID}") imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) if imdb_resp: results.append(imdb_resp) - if meta['debug']: - print(f"Searching TVmaze with filename: {filename}") search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) if search_resp: if isinstance(search_resp, list): @@ -4183,8 +4172,6 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): results.append(search_resp) if year not in (None, ''): - if meta['debug']: - print(f"Filtering results by year: {year}") results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] seen = set() From d0dbcdbe043278f683f0032e88a28d93e6fd89a6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 11 Dec 2024 19:07:28 +1000 Subject: [PATCH 634/741] Upload_screens needs task_limit also --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index e66125328..f1ffb9b72 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3228,7 +3228,7 @@ def use_tqdm(): "ptscreens": 1, "lensdump": 1, } - default_pool_size = os.cpu_count() + default_pool_size = int(meta.get('task_limit', os.cpu_count())) pool_size = host_limits.get(img_host, default_pool_size) try: From cf3fb14545a45795b147464a416caf01f93e80fd Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Thu, 12 Dec 2024 02:19:44 +0700 Subject: [PATCH 635/741] Update BHD.py Add some spaces for screenshots in description --- src/trackers/BHD.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 1e3d5a8d7..f91b014f0 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -383,12 +383,18 @@ async def edit_desc(self, meta): else: images = meta['image_list'] if len(images) > 0: - desc.write("[center]") + desc.write("[align=center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") - desc.write("[/center]") + if (each == len(images) - 1): + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") + elif (each + 1) % 2 == 0: + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") + desc.write("\n") + else: + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") + desc.write("[/align]") desc.write(self.signature) desc.close() return From e7be8c7a53829b3fd1b8ed28b9438ee83039fdb7 Mon Sep 17 00:00:00 2001 From: btTeddy <91010735+btTeddy@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:40:37 +0000 Subject: [PATCH 636/741] Added PTT (#195) * Update upload.py * Update upload.py * Update README.md * Update example-config.py * Update example-config.py * Create PTT.py * Update PTT.py * Update PTT.py * lint --------- Co-authored-by: Audionut --- README.md | 2 +- data/example-config.py | 9 +- src/trackers/PTT.py | 219 +++++++++++++++++++++++++++++++++++++++++ upload.py | 5 +- 4 files changed, 230 insertions(+), 5 deletions(-) create mode 100644 src/trackers/PTT.py diff --git a/README.md b/README.md index b932cc3a7..57516e115 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK + - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/PTT/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs diff --git a/data/example-config.py b/data/example-config.py index 989c2d542..32bb850bd 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -90,9 +90,9 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK + # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, PTT, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK # Remove the trackers from the default_trackers list that are not used, to save being asked everytime - "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK", + "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, PTT, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK", "ACM": { "api_key": "ACM api key", @@ -249,6 +249,11 @@ "password": "", "announce_url": "" }, + "PTT": { + "api_key": "PTT api key", + "announce_url": "https://polishtorrent.top/announce/customannounceurl", + # "anon" : False, + }, "R4E": { "api_key": "R4E api key", "announce_url": "https://racing4everyone.eu/announce/customannounceurl", diff --git a/src/trackers/PTT.py b/src/trackers/PTT.py new file mode 100644 index 000000000..c23e87021 --- /dev/null +++ b/src/trackers/PTT.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class PTT(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'PTT' + self.source_flag = 'PTT' + self.upload_url = 'https://polishtorrent.top/api/torrents/upload' + self.search_url = 'https://polishtorrent.top/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.banned_groups = ['ViP', 'BiRD', 'M@RTiNU$', 'inTGrity', 'CiNEMAET', 'MusicET', 'TeamET', 'R2D2'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on site...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/upload.py b/upload.py index 8b6762e0e..5cba67e79 100644 --- a/upload.py +++ b/upload.py @@ -45,6 +45,7 @@ from src.trackers.ULCX import ULCX from src.trackers.SPD import SPD from src.trackers.YOINK import YOINK +from src.trackers.PTT import PTT import json from pathlib import Path import asyncio @@ -530,7 +531,7 @@ async def do_the_thing(base_dir): common = COMMON(config=config) api_trackers = [ 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', - 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' + 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK', 'PTT' ] other_api_trackers = [ 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' @@ -543,7 +544,7 @@ async def do_the_thing(base_dir): 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, - 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, + 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, 'PTT': PTT, } tracker_capabilities = { From 40f325773b300942c3f14575c79992e0a04b7f58 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Dec 2024 21:02:21 +1000 Subject: [PATCH 637/741] executor --- src/prep.py | 53 +++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/src/prep.py b/src/prep.py index f1ffb9b72..08b775a9d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -19,6 +19,7 @@ import multiprocessing from multiprocessing import get_context from tqdm import tqdm + from concurrent.futures import ProcessPoolExecutor, as_completed import os import re import math @@ -1661,24 +1662,18 @@ def use_tqdm(): else: if use_tqdm(): with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): - capture_results.append(result) - pbar.update(1) - finally: - pool.close() - pool.join() + with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: + futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] + for future in as_completed(futures): + capture_results.append(future.result()) + pbar.update(1) else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): - capture_results.append(result) - console.print(f"Processed {i}/{len(capture_tasks)} screenshots") - finally: - pool.close() - pool.join() + with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: + futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] + for i, future in enumerate(as_completed(futures), 1): + capture_results.append(future.result()) + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1692,23 +1687,17 @@ def use_tqdm(): if optimize_tasks: if use_tqdm(): with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): - optimize_results.append(result) - pbar.update(1) - finally: - pool.close() - pool.join() + with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: + futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] + for future in as_completed(futures): + optimize_results.append(future.result()) + pbar.update(1) else: - with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: - try: - for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): - optimize_results.append(result) - console.print(f"Optimized {i}/{len(optimize_tasks)} images") - finally: - pool.close() - pool.join() + with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: + futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] + for i, future in enumerate(as_completed(futures), 1): + optimize_results.append(future.result()) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") valid_results = [] for image_path in optimize_results: From cdfef59fe4c9cc4584585492879cd07ed19f8ebd Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 12 Dec 2024 21:26:30 +1000 Subject: [PATCH 638/741] IDK, something --- src/prep.py | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/prep.py b/src/prep.py index 08b775a9d..ac4d8e449 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1661,18 +1661,27 @@ def use_tqdm(): console.print("[yellow]All screenshots already exist. Skipping capture process.") else: if use_tqdm(): - with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as result: with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] + for future in as_completed(futures): - capture_results.append(future.result()) - pbar.update(1) + result = future.result() + if isinstance(result, str) and result.startswith("Error"): + console.print(f"[red]Error during screenshot capture: {result}") + else: + capture_results.append(result) else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] - for i, future in enumerate(as_completed(futures), 1): - capture_results.append(future.result()) + + for future in as_completed(futures): + result = future.result() + if isinstance(result, str) and result.startswith("Error"): + console.print(f"[red]Error during screenshot capture: {result}") + else: + capture_results.append(result) console.print(f"Processed {i}/{len(capture_tasks)} screenshots") if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: @@ -1682,22 +1691,30 @@ def use_tqdm(): os.remove(smallest) capture_results.remove(smallest) - optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + optimize_tasks = [(result, self.config) for result in capture_results if isinstance(result, str)] optimize_results = [] if optimize_tasks: if use_tqdm(): - with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as result: with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] + for future in as_completed(futures): - optimize_results.append(future.result()) - pbar.update(1) + result = future.result() + if isinstance(result, str) and result.startswith("Error"): + console.print(f"[red]Error during image optimization: {result}") + else: + optimize_results.append(result) else: with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] - for i, future in enumerate(as_completed(futures), 1): - optimize_results.append(future.result()) - console.print(f"Optimized {i}/{len(optimize_tasks)} images") + + for future in as_completed(futures): + result = future.result() + if isinstance(result, str) and result.startswith("Error"): + console.print(f"[red]Error during image optimization: {result}") + else: + optimize_results.append(result) valid_results = [] for image_path in optimize_results: From 83bceec39fdf61b16508af0246535febddf2c541 Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Thu, 12 Dec 2024 22:21:18 +0700 Subject: [PATCH 639/741] Update prep.py --- src/prep.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index f1ffb9b72..a9eabce44 100644 --- a/src/prep.py +++ b/src/prep.py @@ -693,7 +693,14 @@ async def process_tracker(tracker_name, meta): meta['tag'] = f"-{meta['tag']}" meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) - + if meta.get('tag') == "-SubsPlease": # SubsPlease-specific + tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks + bitrate = tracks[1].get('BitRate', '') # Get video bitrate + bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) + if bitrate == "8000000" or bitrate_oldMediaInfo >= "8000000": + meta['service'] = "CR" + else: + meta['service'] = "HIDI" meta['video'] = video meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): From 830cbe6ea020a18eae1f42b6e1841ad6fa789ac0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 14:50:29 +1000 Subject: [PATCH 640/741] HUNO - prompt if language not present --- src/trackers/HUNO.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 1e75de732..72bdaba97 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -7,6 +7,7 @@ import re import platform import bencodepy +import cli_ui from src.trackers.COMMON import COMMON from src.console import console @@ -136,7 +137,9 @@ def get_audio(self, meta): if language == "zxx": language = "Silent" elif not language: - language = "Unknown" # Default if no language is found + language = cli_ui.ask_string('No audio language present, you must enter one:') + if not language: + language = "Unknown" return f'{codec} {channels} {language}' From db85cf356a66949c201ea7bde16ea3fa2324a23c Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Fri, 13 Dec 2024 13:05:46 +0700 Subject: [PATCH 641/741] Added more checks to validate streaming service --- src/prep.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index a9eabce44..ba69741ec 100644 --- a/src/prep.py +++ b/src/prep.py @@ -695,11 +695,11 @@ async def process_tracker(tracker_name, meta): meta = await self.tag_override(meta) if meta.get('tag') == "-SubsPlease": # SubsPlease-specific tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks - bitrate = tracks[1].get('BitRate', '') # Get video bitrate - bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) - if bitrate == "8000000" or bitrate_oldMediaInfo >= "8000000": + bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 else '' # Get video bitrate if available + bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 else '' # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) + if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): meta['service'] = "CR" - else: + elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user meta['service'] = "HIDI" meta['video'] = video meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) From ab44e3394fc1c76e2572a0a08671664cdafd212e Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 16:09:24 +1000 Subject: [PATCH 642/741] lint --- src/prep.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index ba69741ec..b62be21db 100644 --- a/src/prep.py +++ b/src/prep.py @@ -693,11 +693,11 @@ async def process_tracker(tracker_name, meta): meta['tag'] = f"-{meta['tag']}" meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) - if meta.get('tag') == "-SubsPlease": # SubsPlease-specific - tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks + if meta.get('tag') == "-SubsPlease": # SubsPlease-specific + tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 else '' # Get video bitrate if available bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 else '' # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) - if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): + if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): meta['service'] = "CR" elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user meta['service'] = "HIDI" From 85f1a64b841386f98fc55e9aac394c50ed4f6a85 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 16:10:43 +1000 Subject: [PATCH 643/741] Indentation --- src/trackers/BHD.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index f91b014f0..97fcb6ba1 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -388,12 +388,12 @@ async def edit_desc(self, meta): web_url = images[each]['web_url'] img_url = images[each]['img_url'] if (each == len(images) - 1): - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") elif (each + 1) % 2 == 0: - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") - desc.write("\n") + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") + desc.write("\n") else: - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") desc.write("[/align]") desc.write(self.signature) desc.close() From 00a057028f05ca36e5fda39d4f819c5f00c77605 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 16:14:06 +1000 Subject: [PATCH 644/741] revert screenshot fix attempts --- src/prep.py | 77 +++++++++++++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/src/prep.py b/src/prep.py index ac4d8e449..b62be21db 100644 --- a/src/prep.py +++ b/src/prep.py @@ -19,7 +19,6 @@ import multiprocessing from multiprocessing import get_context from tqdm import tqdm - from concurrent.futures import ProcessPoolExecutor, as_completed import os import re import math @@ -694,7 +693,14 @@ async def process_tracker(tracker_name, meta): meta['tag'] = f"-{meta['tag']}" meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) - + if meta.get('tag') == "-SubsPlease": # SubsPlease-specific + tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks + bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 else '' # Get video bitrate if available + bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 else '' # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) + if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): + meta['service'] = "CR" + elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user + meta['service'] = "HIDI" meta['video'] = video meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): @@ -1661,28 +1667,25 @@ def use_tqdm(): console.print("[yellow]All screenshots already exist. Skipping capture process.") else: if use_tqdm(): - with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as result: - with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: - futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] - - for future in as_completed(futures): - result = future.result() - if isinstance(result, str) and result.startswith("Error"): - console.print(f"[red]Error during screenshot capture: {result}") - else: + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): capture_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() else: console.print("[blue]Non-TTY environment detected. Progress bar disabled.") - with ProcessPoolExecutor(max_workers=min(len(capture_tasks), task_limit)) as executor: - futures = [executor.submit(self.capture_screenshot, task) for task in capture_tasks] - - for future in as_completed(futures): - result = future.result() - if isinstance(result, str) and result.startswith("Error"): - console.print(f"[red]Error during screenshot capture: {result}") - else: + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): capture_results.append(result) - console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + finally: + pool.close() + pool.join() if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: smallest = min(capture_results, key=os.path.getsize) @@ -1691,30 +1694,28 @@ def use_tqdm(): os.remove(smallest) capture_results.remove(smallest) - optimize_tasks = [(result, self.config) for result in capture_results if isinstance(result, str)] + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] optimize_results = [] if optimize_tasks: if use_tqdm(): - with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as result: - with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: - futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] - - for future in as_completed(futures): - result = future.result() - if isinstance(result, str) and result.startswith("Error"): - console.print(f"[red]Error during image optimization: {result}") - else: + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): optimize_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() else: - with ProcessPoolExecutor(max_workers=min(len(optimize_tasks), task_limit)) as executor: - futures = [executor.submit(self.optimize_image_task, task) for task in optimize_tasks] - - for future in as_completed(futures): - result = future.result() - if isinstance(result, str) and result.startswith("Error"): - console.print(f"[red]Error during image optimization: {result}") - else: + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): optimize_results.append(result) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") + finally: + pool.close() + pool.join() valid_results = [] for image_path in optimize_results: From 93ffe05e6cba5a66d033d1734112c76dd159bca8 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 18:11:08 +1000 Subject: [PATCH 645/741] Robust language checking Proper fix for https://github.com/Audionut/Upload-Assistant/issues/143 --- src/trackers/HUNO.py | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 72bdaba97..e4146a381 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -115,25 +115,38 @@ def get_audio(self, meta): if dual: language = "DUAL" else: - if not meta['is_disc']: - # Read the MEDIAINFO.txt file + if meta['is_disc'] == "BDMV": + summary_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + with open(summary_path, 'r', encoding='utf-8') as f: + summary_text = f.read() + + audio_tracks = re.findall(r'Audio:\s*(.+)', summary_text) + if audio_tracks: + first_audio = audio_tracks[0] + language_match = re.search(r'([A-Za-z]+)\s*/', first_audio) + if language_match: + language = language_match.group(1).strip() + else: + print("DEBUG: No language found in the first audio track.") + + else: media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" with open(media_info_path, 'r', encoding='utf-8') as f: media_info_text = f.read() - # Extract the first audio section - first_audio_section = re.search(r'Audio\s+ID\s+:\s+2(.*?)\n\n', media_info_text, re.DOTALL) - if not first_audio_section: # Fallback in case of a different structure - first_audio_section = re.search(r'Audio(.*?)Text', media_info_text, re.DOTALL) + audio_sections = re.findall(r'Audio\s+.*?(?=\n\n|Text|Menu|$)', media_info_text, re.DOTALL) + if audio_sections: + first_audio_section = audio_sections[0] + language_match = re.search(r'Language\s*:\s*(\w+.*)', first_audio_section) - if first_audio_section: - # Extract language information from the first audio track - language_match = re.search(r'Language\s*:\s*(.+)', first_audio_section.group(1)) if language_match: language = language_match.group(1).strip() - language = re.sub(r'\(.+\)', '', language) # Remove text in parentheses + language = re.sub(r'\(.+\)', '', language) + else: + print("DEBUG: No Language match found in the first audio section.") + else: + print("DEBUG: No Audio sections found in MEDIAINFO.txt.") - # Handle special cases if language == "zxx": language = "Silent" elif not language: From 120cf74977cab71c32cb9f993d8e080fb53fd254 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 18:26:43 +1000 Subject: [PATCH 646/741] Missed a tvmaze console --- src/prep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index b62be21db..edfc2ffe2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4242,7 +4242,8 @@ def _make_tvmaze_request(self, url, params, meta): if resp.ok: return resp.json() else: - print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") + if meta['debug']: + print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") return None except Exception as e: print(f"Error making TVmaze request: {e}") From bbf9c7b46098a32eaac39c9fcb27573e65e8eef5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 19:03:24 +1000 Subject: [PATCH 647/741] Don't +1 +1 Since +1 is fixed in prep.screenshots, there's no longer a need to increment the call to prep. --- src/trackers/COMMON.py | 2 +- src/trackers/PTP.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 44f0156a8..27d492805 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -239,7 +239,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if not new_screens: if meta['debug']: console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) s.start() while s.is_alive(): await asyncio.sleep(1) diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 772633c1c..b1e9a6909 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -804,7 +804,7 @@ async def edit_desc(self, meta): meta[new_images_key] = [] new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") if not new_screens: - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None)) + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) s.start() while s.is_alive() is True: await asyncio.sleep(3) From a277b99781a8fbc4c10d178d53e819d175ad0539 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 19:15:05 +1000 Subject: [PATCH 648/741] HUNO - don't get caught at language in ua --- src/trackers/HUNO.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index e4146a381..a5b9702ef 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -150,8 +150,11 @@ def get_audio(self, meta): if language == "zxx": language = "Silent" elif not language: - language = cli_ui.ask_string('No audio language present, you must enter one:') - if not language: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + language = cli_ui.ask_string('No audio language present, you must enter one:') + if not language: + language = "Unknown" + else: language = "Unknown" return f'{codec} {channels} {language}' From 6a982df3522515b6a92bdc75c3592294d59ec11d Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 19:57:13 +1000 Subject: [PATCH 649/741] fix file based image retaking --- src/prep.py | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index b62be21db..47a37d212 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1718,6 +1718,7 @@ def use_tqdm(): pool.join() valid_results = [] + remaining_retakes = [] for image_path in optimize_results: if "Error" in image_path: console.print(f"[red]{image_path}") @@ -1736,20 +1737,51 @@ def use_tqdm(): pass elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: pass - elif self.img_host == "freeimage.host": - console.print("[bold red]Support for freeimage.host has been removed. Please remove it from your config.") - exit() elif not retake: console.print("[red]Image too large for your image host, retaking.") retake = True time.sleep(1) if retake: - console.print(f"[yellow]Retaking screenshot for: {image_path}[/yellow]") - capture_tasks.append(image_path) + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + self.capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel)) + self.optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) else: valid_results.append(image_path) + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + console.print(f"[green]Successfully processed {len(valid_results)} screenshots in total.") + for image_path in valid_results: img_dict = { 'img_url': image_path, From 4b70b0fa5ad9ed7b2a0ab98d5f79753d780aedc6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 13 Dec 2024 20:18:53 +1000 Subject: [PATCH 650/741] remove console --- src/prep.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 47a37d212..c227bc07a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1780,8 +1780,6 @@ def use_tqdm(): if remaining_retakes: console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") - console.print(f"[green]Successfully processed {len(valid_results)} screenshots in total.") - for image_path in valid_results: img_dict = { 'img_url': image_path, From 02534d37b45dc5b1f7262fee8d6a6cce6a4b27bc Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 00:11:06 +1000 Subject: [PATCH 651/741] Fix bdmv screens retry --- src/prep.py | 52 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/src/prep.py b/src/prep.py index c227bc07a..da5b578a4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1312,31 +1312,67 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, pool.join() valid_results = [] + remaining_retakes = [] for image_path in optimized_results: - retake = False - if not os.path.exists(image_path): + if "Error" in image_path: + console.print(f"[red]{image_path}") continue + retake = False image_size = os.path.getsize(image_path) if image_size <= 75000: console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") retake = True - elif image_size <= 31000000 and self.img_host == "imgbb": + time.sleep(1) + elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: pass - elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: pass - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: pass - else: + elif not retake: console.print("[red]Image too large for your image host, retaking.") retake = True + time.sleep(1) if retake: - console.print(f"[yellow]Retaking screenshot for: {image_path}[/yellow]") - capture_tasks.append((file, None, image_path, keyframe, loglevel)) + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + self.capture_disc_task((file, random_time, image_path, keyframe, loglevel)) + self.optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) else: valid_results.append(image_path) + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + for image_path in valid_results: img_dict = { 'img_url': image_path, From 5ee0bce2b56691afe345fdd23626ddf8f622b41f Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 00:34:37 +1000 Subject: [PATCH 652/741] Add framerate as a dvd system fallback method Should fix or help mitigate https://github.com/Audionut/Upload-Assistant/issues/193 --- src/prep.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index da5b578a4..83d165fa4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -710,9 +710,9 @@ async def process_tracker(tracker_name, meta): meta['3D'] = self.is_3d(mi, bdinfo) if meta.get('manual_source', None): meta['source'] = meta['manual_source'] - _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) else: - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) elif meta.get('service'): @@ -2511,7 +2511,9 @@ def get_tag(self, video, meta): tag = "" return tag - def get_source(self, type, video, path, is_disc, meta): + def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): + with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + mi = json.load(f) resolution = meta['resolution'] try: try: @@ -2546,6 +2548,17 @@ def get_source(self, type, video, path, is_disc, meta): system = "NTSC" except Exception: system = "" + if system == "": + try: + framerate = mi['media']['track'][1].get('FrameRate', '') + if framerate == "25": + system = "PAL" + elif framerate: + system = "NTSC" + else: + system = "" + except Exception: + system = "" finally: if system is None: system = "" From 6619c66b73b9211ce44d36e6a11ca85c7cda9d14 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 09:32:23 +1000 Subject: [PATCH 653/741] Handle lack of mi.json --- src/prep.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 83d165fa4..c7ad44b38 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2512,8 +2512,12 @@ def get_tag(self, video, meta): return tag def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): - with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: - mi = json.load(f) + try: + with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + mi = json.load(f) + except Exception: + if meta['debug']: + console.print("No mediainfo.json") resolution = meta['resolution'] try: try: From b3a9578cf62cf7f28e277cd6eca884d9ff40f224 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 10:35:54 +1000 Subject: [PATCH 654/741] Always set folder_id --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index c7ad44b38..d8c29bd15 100644 --- a/src/prep.py +++ b/src/prep.py @@ -429,8 +429,8 @@ async def gather_prep(self, meta, mode): base_dir = meta['base_dir'] meta['saved_description'] = False + folder_id = os.path.basename(meta['path']) if meta.get('uuid', None) is None: - folder_id = os.path.basename(meta['path']) meta['uuid'] = folder_id if not os.path.exists(f"{base_dir}/tmp/{meta['uuid']}"): Path(f"{base_dir}/tmp/{meta['uuid']}").mkdir(parents=True, exist_ok=True) From b40b48de563b70e94907b268abf6b89839066305 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 14:05:22 +1000 Subject: [PATCH 655/741] Require - for tag with bdmv Will stop matching against video codec, region etc. --- src/prep.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index d8c29bd15..cf0bed8e9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2501,14 +2501,26 @@ def is_3d(self, mi, bdinfo): def get_tag(self, video, meta): try: - tag = guessit(video)['release_group'] - tag = f"-{tag}" - except Exception: + parsed = guessit(video) + release_group = parsed.get('release_group') + + if meta['is_disc'] == "BDMV": + if release_group: + if f"-{release_group}" not in video: + if meta['debug']: + console.print(f"[warning] Invalid release group format: {release_group}") + release_group = None + + tag = f"-{release_group}" if release_group else "" + except Exception as e: + console.print(f"Error while parsing: {e}") tag = "" + if tag == "-": tag = "" - if tag[1:].lower() in ["nogroup", 'nogrp']: + if tag[1:].lower() in ["nogroup", "nogrp"]: tag = "" + return tag def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): From 15934de095e9ed5f9ad78c548b4f35aea3f7d4bb Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 17:54:24 +1000 Subject: [PATCH 656/741] dvd screens - fix existing skip and exception --- src/prep.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index cf0bed8e9..93ab6fdcc 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1409,8 +1409,8 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): return if num_screens is None: - num_screens = self.screens - if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): + num_screens = self.screens - len(existing_images) + if num_screens == 0 or (len(meta.get('image_list', [])) >= self.screens and disc_num == 0): return if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: @@ -1608,7 +1608,11 @@ def capture_dvd_screenshot(self, task): if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() + try: + ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() + except ffmpeg._run.Error as e: + stderr_output = e.stderr.decode() if e.stderr else "No stderr output available" + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {stderr_output}[/red]") if os.path.exists(image): return image else: @@ -4337,7 +4341,8 @@ def _make_tvmaze_request(self, url, params, meta): if resp.ok: return resp.json() else: - print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") + if meta['debug']: + print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") return None except Exception as e: print(f"Error making TVmaze request: {e}") From 032825860b444dcbf73589220001cceebf5d029d Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 14 Dec 2024 19:42:51 +1000 Subject: [PATCH 657/741] BHD prohibit dvdrip --- src/trackers/BHD.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 97fcb6ba1..3691c4921 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -405,6 +405,10 @@ async def search_existing(self, meta, disctype): console.print("[bold red]This is an internal BHD release, skipping upload[/bold red]") meta['skipping'] = "BHD" return + if meta['type'] == "DVDRIP": + console.print("[bold red]No DVDRIP at BHD, skipping upload[/bold red]") + meta['skipping'] = "BHD" + return dupes = [] console.print("[yellow]Searching for existing torrents on site...") category = meta['category'] From fbaa41e17fc9fc0cfaac17e95567884ecf1be50c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 15 Dec 2024 09:56:18 +1000 Subject: [PATCH 658/741] Remove ffmpeg exception catch --- src/prep.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index edfc2ffe2..5235f0d5d 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1579,8 +1579,8 @@ def capture_dvd_screenshot(self, task): console.print(f"[red]Screenshot creation failed for {image}[/red]") return None - except ffmpeg.Error as e: - console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e.stderr.decode()}[/red]") + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e}[/red]") return None def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): @@ -1811,8 +1811,6 @@ def capture_screenshot(self, args): return f"Error: Screenshot not generated or is empty at {image_path}" return image_path - except ffmpeg.Error as e: - return f"FFmpeg Error: {e.stderr.decode()}" except Exception as e: return f"Error: {str(e)}" From 591ae5a388b3c2a2296a620c91afae8405bab9b3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 15 Dec 2024 11:21:16 +1000 Subject: [PATCH 659/741] Add better detection for MP2 audio --- src/prep.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 5235f0d5d..d539526dd 100644 --- a/src/prep.py +++ b/src/prep.py @@ -947,6 +947,9 @@ def filter_mediainfo(data): "ID": track.get("ID", {}), "UniqueID": track.get("UniqueID", {}), "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Settings": track.get("Format_Settings", {}), "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny", {}), "Format_Settings_Endianness": track.get("Format_Settings_Endianness", {}), "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures", {}), @@ -2402,6 +2405,9 @@ def get_audio_v2(self, mi, meta, bdinfo): codec = audio.get(format, "") + audio_extra.get(additional, "") extra = format_extra.get(additional, "") + format_settings = track.get('Format_Settings', '') + if not isinstance(format_settings, str): + format_settings = "" format_settings = format_settings_extra.get(format_settings, "") if format_settings == "EX" and chan == "5.1": format_settings = "EX" @@ -2415,8 +2421,12 @@ def get_audio_v2(self, mi, meta, bdinfo): if additional and additional.endswith("X"): codec = "DTS:X" chan = f"{int(channels) - 1}.1" + format_profile = track.get('Format_Profile', '') if format == "MPEG Audio": - codec = track.get('CodecID_Hint', '') + if format_profile == "Layer 2": + codec = "MP2" + else: + codec = track.get('CodecID_Hint', '') audio = f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" audio = ' '.join(audio.split()) From e0c6e892985c76cd61c5c2512d80219df64b4ad2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 15 Dec 2024 21:21:41 +1000 Subject: [PATCH 660/741] Fix borked mp2 code --- src/prep.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/prep.py b/src/prep.py index d539526dd..b348e3f39 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2279,8 +2279,11 @@ def get_audio_v2(self, mi, meta, bdinfo): additional = track.get('Format_AdditionalFeatures', '') format_settings = track.get('Format_Settings', '') + if not isinstance(format_settings, str): + format_settings = "" if format_settings in ['Explicit']: format_settings = "" + format_profile = track.get('Format_Profile', '') # Channels channels = track.get('Channels_Original', track.get('Channels')) if not str(channels).isnumeric(): @@ -2405,9 +2408,6 @@ def get_audio_v2(self, mi, meta, bdinfo): codec = audio.get(format, "") + audio_extra.get(additional, "") extra = format_extra.get(additional, "") - format_settings = track.get('Format_Settings', '') - if not isinstance(format_settings, str): - format_settings = "" format_settings = format_settings_extra.get(format_settings, "") if format_settings == "EX" and chan == "5.1": format_settings = "EX" @@ -2421,7 +2421,7 @@ def get_audio_v2(self, mi, meta, bdinfo): if additional and additional.endswith("X"): codec = "DTS:X" chan = f"{int(channels) - 1}.1" - format_profile = track.get('Format_Profile', '') + if format == "MPEG Audio": if format_profile == "Layer 2": codec = "MP2" From 9564638808f9ed6c573438f36afec00e025ca8c0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 15 Dec 2024 21:35:04 +1000 Subject: [PATCH 661/741] Fix BDMV unit3d packed screenshots --- src/prep.py | 61 ++++++++++++++++++++++++++++++++++-------- src/trackers/COMMON.py | 4 +-- 2 files changed, 51 insertions(+), 14 deletions(-) diff --git a/src/prep.py b/src/prep.py index b348e3f39..d9c9e2d19 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1241,13 +1241,16 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, os.chdir(f"{base_dir}/tmp/{folder_id}") existing_screens = glob.glob(f"{sanitized_filename}-*.png") total_existing = len(existing_screens) + len(existing_images) - num_screens = max(0, self.screens - total_existing) + if not force_screenshots: + num_screens = max(0, self.screens - total_existing) + else: + num_screens = num_screens - if num_screens == 0: + if num_screens == 0 and not force_screenshots: console.print('[bold green]Reusing existing screenshots. No additional screenshots needed.') return - if meta['debug']: + if meta['debug'] and not force_screenshots: console.print(f"[bold yellow]Saving Screens... Total needed: {self.screens}, Existing: {total_existing}, To capture: {num_screens}") capture_results = [] capture_tasks = [] @@ -1315,31 +1318,67 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, pool.join() valid_results = [] + remaining_retakes = [] for image_path in optimized_results: - retake = False - if not os.path.exists(image_path): + if "Error" in image_path: + console.print(f"[red]{image_path}") continue + retake = False image_size = os.path.getsize(image_path) if image_size <= 75000: console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") retake = True - elif image_size <= 31000000 and self.img_host == "imgbb": + time.sleep(1) + elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: pass - elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: pass - elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: pass - else: + elif not retake: console.print("[red]Image too large for your image host, retaking.") retake = True + time.sleep(1) if retake: - console.print(f"[yellow]Retaking screenshot for: {image_path}[/yellow]") - capture_tasks.append((file, None, image_path, keyframe, loglevel)) + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + self.capture_disc_task((file, random_time, image_path, keyframe, loglevel)) + self.optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) else: valid_results.append(image_path) + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + for image_path in valid_results: img_dict = { 'img_url': image_path, diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 27d492805..963fd7f53 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -86,8 +86,6 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des # Handle multiple discs case elif len(discs) > 1: - if multi_screens == 0: - multi_screens = 2 # Initialize retry_count if not already set if 'retry_count' not in meta: meta['retry_count'] = 0 @@ -108,7 +106,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des if meta['debug']: console.print("[yellow]Using original uploaded images for first disc") images = meta['image_list'] - for img_index in range(min(multi_screens, len(images))): + for img_index in range(len(images[:int(meta['screens'])])): web_url = images[img_index]['web_url'] raw_url = images[img_index]['raw_url'] image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" From 6ac3b2bf1efcfbf3449b450af2e36507cbce85ad Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 16 Dec 2024 21:14:39 +1000 Subject: [PATCH 662/741] Fix torrent creation PTP-HDB When piece size > 16 MiB --- src/trackers/HDB.py | 1 + src/trackers/PTP.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index e7f0b21fd..5d703c7b5 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -236,6 +236,7 @@ async def upload(self, meta, disctype): # Create a new torrent with piece size explicitly set to 16 MiB new_torrent = prep.CustomTorrent( + meta=meta, path=Path(meta['path']), trackers=["https://fake.tracker"], source="L4G", diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index b1e9a6909..41865687a 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -619,9 +619,6 @@ async def edit_desc(self, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) - if multi_screens < 2: - multi_screens = 2 - console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: images = meta['image_list'] @@ -659,6 +656,9 @@ async def edit_desc(self, meta): elif len(discs) > 1: if 'retry_count' not in meta: meta['retry_count'] = 0 + if multi_screens < 2: + multi_screens = 2 + console.print("[yellow]PTP requires at least 2 screenshots for multi disc content, overriding config") for i, each in enumerate(discs): new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": @@ -776,6 +776,9 @@ async def edit_desc(self, meta): # Handle multiple files case elif len(filelist) > 1: + if multi_screens < 2: + multi_screens = 2 + console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") for i in range(len(filelist)): file = filelist[i] if i == 0: @@ -986,6 +989,7 @@ async def upload(self, meta, url, data, disctype): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( + meta=meta, path=Path(meta['path']), trackers=[self.announce_url], source="L4G", From 004306e4744c757640b276b78325f16641687733 Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Tue, 17 Dec 2024 02:13:35 +0700 Subject: [PATCH 663/741] Set episode title to empty for SP releases SP releases never have episode title and titles are mistaken as episode title sometimes. --- src/prep.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/prep.py b/src/prep.py index d9c9e2d19..d09dba3fd 100644 --- a/src/prep.py +++ b/src/prep.py @@ -697,6 +697,7 @@ async def process_tracker(tracker_name, meta): tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 else '' # Get video bitrate if available bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 else '' # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) + meta['episode_title'] = "" if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): meta['service'] = "CR" elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user From acc8c0314d9fc1dcbafcbb09e8f332bb515cf6a2 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 07:30:40 +1000 Subject: [PATCH 664/741] manual episode title handling --- src/args.py | 3 +++ src/prep.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/args.py b/src/args.py index f51fdfd35..c18a8e683 100644 --- a/src/args.py +++ b/src/args.py @@ -40,6 +40,7 @@ def parse(self, args, meta): parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default=None) parser.add_argument('-season', '--season', nargs='*', required=False, help="Season (number)", type=str) parser.add_argument('-episode', '--episode', nargs='*', required=False, help="Episode (number)", type=str) + parser.add_argument('-met', '--manual-episode-title', nargs=1, required=False, help="Set episode title, empty = empty", type=datetime.date.fromisoformat, dest="manual_episode_title") parser.add_argument('-daily', '--daily', nargs=1, required=False, help="Air date of this episode (YYYY-MM-DD)", type=datetime.date.fromisoformat, dest="manual_date") parser.add_argument('--no-season', dest='no_season', action='store_true', required=False, help="Remove Season from title") parser.add_argument('--no-year', dest='no_year', action='store_true', required=False, help="Remove Year from title") @@ -242,6 +243,8 @@ def parse(self, args, meta): meta[key] = 100 elif key in ("tag") and value == []: meta[key] = "" + elif key in ["manual_episode_title"]: + meta[key] = value if value else "" else: meta[key] = meta.get(key, None) if key in ('trackers'): diff --git a/src/prep.py b/src/prep.py index d09dba3fd..bcc3d75b9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3414,7 +3414,10 @@ async def get_name(self, meta): source = meta.get('source', "") uhd = meta.get('uhd', "") hdr = meta.get('hdr', "") - episode_title = meta.get('episode_title', '') + if meta.get('manual_episode_title'): + episode_title = meta.get('manual_episode_title') + else: + episode_title = meta.get('episode_title', '') if meta.get('is_disc', "") == "BDMV": # Disk video_codec = meta.get('video_codec', "") region = meta.get('region', "") From 8cfdab99857b25befc75742993504b8642a48f08 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 09:04:36 +1000 Subject: [PATCH 665/741] MTV - add imgbb as approved host --- src/trackers/MTV.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 1e569110c..92b8011b8 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -45,7 +45,7 @@ async def upload(self, meta, disctype): await self.upload_with_retry(meta, cookiefile, common) async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] images_reuploaded = False if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): @@ -179,7 +179,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): if approved_image_hosts is None: - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] retry_mode = False images_reuploaded = False From 63b94ccfd4cc553b9fd8af59fbb16167e6b85c60 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 15:43:04 +1000 Subject: [PATCH 666/741] Initial dupe checking refactor --- src/prep.py | 120 +++++++++++++++++++++++++++++--------------- src/trackersetup.py | 118 +++++++++++++++++++++++++++++++++++++++++++ src/uphelper.py | 119 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 317 insertions(+), 40 deletions(-) create mode 100644 src/trackersetup.py create mode 100644 src/uphelper.py diff --git a/src/prep.py b/src/prep.py index 94455b15c..7724a7e4f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -12,6 +12,8 @@ from src.trackers.COMMON import COMMON from src.clients import Clients from data.config import config +from src.uphelper import UploadHelper +from src.trackersetup import TRACKER_SETUP, tracker_class_map try: import traceback @@ -618,46 +620,6 @@ async def process_tracker(tracker_name, meta): else: console.print("Skipping existing search as meta already populated") - if 'manual_frames' not in meta: - meta['manual_frames'] = {} - manual_frames = meta['manual_frames'] - # Take Screenshots - if meta['is_disc'] == "BDMV": - if meta.get('edit', False) is False: - if meta.get('vapoursynth', False) is True: - use_vs = True - else: - use_vs = False - try: - ds = multiprocessing.Process(target=self.disc_screenshots, args=(meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() - elif meta['is_disc'] == "DVD": - if meta.get('edit', False) is False: - try: - ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None, None)) - ds.start() - while ds.is_alive() is True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() - else: - if meta.get('edit', False) is False: - try: - s = multiprocessing.Process( - target=self.screenshots, - args=(videopath, filename, meta['uuid'], base_dir, meta), # Positional arguments - kwargs={'manual_frames': manual_frames} # Keyword argument - ) - s.start() - while s.is_alive() is True: - await asyncio.sleep(3) - except KeyboardInterrupt: - s.terminate() - meta['tmdb'] = meta.get('tmdb_manual', None) meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: @@ -734,6 +696,84 @@ async def process_tracker(tracker_name, meta): else: meta['edition'] = "" + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) + parser = Args(config) + helper = UploadHelper() + confirm = helper.get_confirmation(meta) + while confirm is False: + editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") + editargs = (meta['path'],) + tuple(editargs.split()) + if meta.get('debug', False): + editargs += ("--debug",) + meta, help, before_args = parser.parse(editargs, meta) + meta['edit'] = True + meta = await self.gather_prep(meta=meta, mode='cli') + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) + confirm = helper.get_confirmation(meta) + + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + + for tracker_name in enabled_trackers: + disctype = meta.get('disctype', None) + tracker_name = tracker_name.replace(" ", "").upper().strip() + + if meta['name'].endswith('DUPE?'): + meta['name'] = meta['name'].replace(' DUPE?', '') + + if tracker_name in tracker_class_map: + tracker_class = tracker_class_map[tracker_name](config=config) + if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): + console.print("we're banned") + dupes = await tracker_class.search_existing(meta, disctype) + if 'skipping' not in meta or meta['skipping'] is None: + dupes = await common.filter_dupes(dupes, meta) + meta = helper.dupe_check(dupes, meta) + meta['skipping'] = None + + if 'manual_frames' not in meta: + meta['manual_frames'] = {} + manual_frames = meta['manual_frames'] + # Take Screenshots + if meta['is_disc'] == "BDMV": + if meta.get('edit', False) is False: + if meta.get('vapoursynth', False) is True: + use_vs = True + else: + use_vs = False + try: + ds = multiprocessing.Process(target=self.disc_screenshots, args=(meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + except KeyboardInterrupt: + ds.terminate() + elif meta['is_disc'] == "DVD": + if meta.get('edit', False) is False: + try: + ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None, None)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + except KeyboardInterrupt: + ds.terminate() + else: + if meta.get('edit', False) is False: + try: + s = multiprocessing.Process( + target=self.screenshots, + args=(videopath, filename, meta['uuid'], base_dir, meta), # Positional arguments + kwargs={'manual_frames': manual_frames} # Keyword argument + ) + s.start() + while s.is_alive() is True: + await asyncio.sleep(3) + except KeyboardInterrupt: + s.terminate() + # WORK ON THIS meta.get('stream', False) meta['stream'] = self.stream_optimized(meta['stream']) diff --git a/src/trackersetup.py b/src/trackersetup.py new file mode 100644 index 000000000..8357a0a65 --- /dev/null +++ b/src/trackersetup.py @@ -0,0 +1,118 @@ +from src.trackers.HUNO import HUNO +from src.trackers.BLU import BLU +from src.trackers.BHD import BHD +from src.trackers.AITHER import AITHER +from src.trackers.STC import STC +from src.trackers.R4E import R4E +from src.trackers.THR import THR +from src.trackers.STT import STT +from src.trackers.HP import HP +from src.trackers.PTP import PTP +from src.trackers.SN import SN +from src.trackers.ACM import ACM +from src.trackers.HDB import HDB +from src.trackers.LCD import LCD +from src.trackers.TTG import TTG +from src.trackers.LST import LST +from src.trackers.FL import FL +from src.trackers.LT import LT +from src.trackers.NBL import NBL +from src.trackers.ANT import ANT +from src.trackers.PTER import PTER +from src.trackers.MTV import MTV +from src.trackers.JPTV import JPTV +from src.trackers.TL import TL +from src.trackers.HDT import HDT +from src.trackers.RF import RF +from src.trackers.OE import OE +from src.trackers.BHDTV import BHDTV +from src.trackers.RTF import RTF +from src.trackers.OTW import OTW +from src.trackers.FNP import FNP +from src.trackers.CBR import CBR +from src.trackers.UTP import UTP +from src.trackers.AL import AL +from src.trackers.SHRI import SHRI +from src.trackers.TIK import TIK +from src.trackers.TVC import TVC +from src.trackers.PSS import PSS +from src.trackers.ULCX import ULCX +from src.trackers.SPD import SPD +from src.trackers.YOINK import YOINK +import cli_ui +from src.console import console + + +class TRACKER_SETUP: + def __init__(self, config): + self.config = config + # Add initialization details here + pass + + def trackers_enabled(self, meta): + from data.config import config + if meta.get('trackers', None) is not None: + trackers = meta['trackers'] + else: + trackers = config['TRACKERS']['default_trackers'] + if "," in trackers: + trackers = trackers.split(',') + + if isinstance(trackers, str): + trackers = trackers.split(',') + trackers = [s.strip().upper() for s in trackers] + if meta.get('manual', False): + trackers.insert(0, "MANUAL") + return trackers + + def check_banned_group(self, tracker, banned_group_list, meta): + if meta['tag'] == "": + return False + else: + q = False + for tag in banned_group_list: + if isinstance(tag, list): + if meta['tag'][1:].lower() == tag[0].lower(): + console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") + q = True + else: + if meta['tag'][1:].lower() == tag.lower(): + console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + q = True + if q: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): + return True + else: + return True + return False + + +tracker_class_map = { + 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'CBR': CBR, + 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, + 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, + 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, + 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, +} + +tracker_capabilities = { + 'AITHER': {'mod_q': True, 'draft': False}, + 'BHD': {'draft_live': True}, + 'BLU': {'mod_q': True, 'draft': False}, + 'LST': {'mod_q': True, 'draft': True} +} + +api_trackers = { + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', + 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' +} + +other_api_trackers = { + 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' +} + +http_trackers = { + 'FL', 'HDB', 'HDT', 'MTV', 'PTER', 'TTG' +} diff --git a/src/uphelper.py b/src/uphelper.py new file mode 100644 index 000000000..56e482fff --- /dev/null +++ b/src/uphelper.py @@ -0,0 +1,119 @@ +import cli_ui +from rich.console import Console +from data.config import config + +console = Console() + + +class UploadHelper: + def dupe_check(self, dupes, meta): + if not dupes: + console.print("[green]No dupes found") + meta['upload'] = True + return meta + else: + console.print() + dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) + console.print() + cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") + cli_ui.info(dupe_text) + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if meta.get('dupe', False) is False: + upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) + else: + upload = True + else: + if meta.get('dupe', False) is False: + console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") + upload = False + else: + console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") + upload = True + console.print() + if upload is False: + meta['upload'] = False + else: + meta['upload'] = True + for each in dupes: + each_name = each['name'] if isinstance(each, dict) else each + if each_name == meta['name']: + meta['name'] = f"{meta['name']} DUPE?" + + return meta + + def get_confirmation(self, meta): + if meta['debug'] is True: + console.print("[bold red]DEBUG: True") + console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") + console.print() + console.print("[bold yellow]Database Info[/bold yellow]") + console.print(f"[bold]Title:[/bold] {meta['title']} ({meta['year']})") + console.print() + console.print(f"[bold]Overview:[/bold] {meta['overview']}") + console.print() + console.print(f"[bold]Category:[/bold] {meta['category']}") + if int(meta.get('tmdb', 0)) != 0: + console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") + if int(meta.get('imdb_id', '0')) != 0: + console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb_id']}") + if int(meta.get('tvdb_id', '0')) != 0: + console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") + if int(meta.get('tvmaze_id', '0')) != 0: + console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") + if int(meta.get('mal_id', 0)) != 0: + console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") + console.print() + if int(meta.get('freeleech', '0')) != 0: + console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") + tag = "" if meta['tag'] == "" else f" / {meta['tag'][1:]}" + res = meta['source'] if meta['is_disc'] == "DVD" else meta['resolution'] + console.print(f"{res} / {meta['type']}{tag}") + if meta.get('personalrelease', False) is True: + console.print("[bold green]Personal Release![/bold green]") + console.print() + if meta.get('unattended', False) is False: + self.get_missing(meta) + ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" + if ring_the_bell: + console.print(ring_the_bell) + + if meta.get('is disc', False) is True: + meta['keep_folder'] = False + + if meta.get('keep_folder') and meta['isdir']: + console.print("[bold yellow]Uploading with --keep-folder[/bold yellow]") + kf_confirm = input("You specified --keep-folder. Uploading in folders might not be allowed. Proceed? [y/N]: ").strip().lower() + if kf_confirm != 'y': + console.print("[bold red]Aborting...[/bold red]") + exit() + + console.print("[bold yellow]Is this correct?[/bold yellow]") + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm = input("Correct? [y/N]: ").strip().lower() == 'y' + else: + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm = True + + return confirm + + def get_missing(self, meta): + info_notes = { + 'edition': 'Special Edition/Release', + 'description': "Please include Remux/Encode Notes if possible", + 'service': "WEB Service e.g.(AMZN, NF)", + 'region': "Disc Region", + 'imdb': 'IMDb ID (tt1234567)', + 'distributor': "Disc Distributor e.g.(BFI, Criterion)" + } + missing = [] + if meta.get('imdb_id', '0') == '0': + meta['imdb_id'] = '0' + meta['potential_missing'].append('imdb_id') + for each in meta['potential_missing']: + if str(meta.get(each, '')).strip() in ["", "None", "0"]: + missing.append(f"--{each} | {info_notes.get(each, '')}") + if missing: + cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") + for each in missing: + cli_ui.info(each) + console.print() From 316c262b1f5b3d7640bedb773b5bb9279270ef47 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 16:21:02 +1000 Subject: [PATCH 667/741] track tracker status --- src/prep.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7724a7e4f..5f20fcd81 100644 --- a/src/prep.py +++ b/src/prep.py @@ -717,6 +717,9 @@ async def process_tracker(tracker_name, meta): tracker_setup = TRACKER_SETUP(config=config) enabled_trackers = tracker_setup.trackers_enabled(meta) + tracker_status = {} + successful_trackers = 0 + for tracker_name in enabled_trackers: disctype = meta.get('disctype', None) tracker_name = tracker_name.replace(" ", "").upper().strip() @@ -726,14 +729,38 @@ async def process_tracker(tracker_name, meta): if tracker_name in tracker_class_map: tracker_class = tracker_class_map[tracker_name](config=config) + tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False} + if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - console.print("we're banned") + console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") + tracker_status[tracker_name]['banned'] = True + continue + dupes = await tracker_class.search_existing(meta, disctype) if 'skipping' not in meta or meta['skipping'] is None: dupes = await common.filter_dupes(dupes, meta) - meta = helper.dupe_check(dupes, meta) + meta, is_dupe = helper.dupe_check(dupes, meta) + if is_dupe: + console.print(f"[yellow]Tracker '{tracker_name}' has confirmed dupes.[/yellow]") + tracker_status[tracker_name]['dupe'] = True + elif meta['skipping']: + tracker_status[tracker_name]['skipped'] = True meta['skipping'] = None + if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: + console.print(f"[green]Tracker '{tracker_name}' passed both checks.[/green]") + successful_trackers += 1 + if meta['debug']: + console.print("\n[bold]Tracker Processing Summary:[/bold]") + for t_name, status in tracker_status.items(): + banned_status = 'Yes' if status['banned'] else 'No' + skipped_status = 'Yes' if status['skipped'] else 'No' + dupe_status = 'Yes' if status['dupe'] else 'No' + if meta['debug']: + console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status}") + if meta['debug']: + console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") + if 'manual_frames' not in meta: meta['manual_frames'] = {} manual_frames = meta['manual_frames'] From 4ce14cc1ccd652c05240e8a649034c176f8352cc Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 16:34:13 +1000 Subject: [PATCH 668/741] Config option --- data/example-config.py | 5 +++++ src/prep.py | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index 32bb850bd..e7183b8d3 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -86,6 +86,11 @@ # Needs a 5 second wait to ensure the API is updated "get_permalink": False, + # How many trackers need to pass successfull checking to continue with the upload process + # Default = 1. If 1 (or more) tracker/s pass banned_group and dupe checking, uploading will continue + # If less than the number of trackers pass the checking, exit immediately. + "tracker_pass_checks": "1", + }, "TRACKERS": { diff --git a/src/prep.py b/src/prep.py index 5f20fcd81..add97c503 100644 --- a/src/prep.py +++ b/src/prep.py @@ -761,6 +761,13 @@ async def process_tracker(tracker_name, meta): if meta['debug']: console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") + meta['skip_uploading'] = int(self.config['DEFAULT'].get('tracker_pass_checks', 1)) + if successful_trackers <= meta['skip_uploading']: + console.print( + f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]" + ) + return + if 'manual_frames' not in meta: meta['manual_frames'] = {} manual_frames = meta['manual_frames'] From 31d3fc87eccda741dd075bcacac06751cca5f5b0 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 16:38:19 +1000 Subject: [PATCH 669/741] save meta state --- src/prep.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/prep.py b/src/prep.py index add97c503..eaae7ae4e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -768,6 +768,9 @@ async def process_tracker(tracker_name, meta): ) return + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + if 'manual_frames' not in meta: meta['manual_frames'] = {} manual_frames = meta['manual_frames'] From 682eac745866d32e13533443d2f8e2e599c45045 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 16:47:08 +1000 Subject: [PATCH 670/741] Put status into meta with uploading --- src/prep.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index eaae7ae4e..e1c9491ba 100644 --- a/src/prep.py +++ b/src/prep.py @@ -729,7 +729,7 @@ async def process_tracker(tracker_name, meta): if tracker_name in tracker_class_map: tracker_class = tracker_class_map[tracker_name](config=config) - tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False} + tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") @@ -749,15 +749,20 @@ async def process_tracker(tracker_name, meta): if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: console.print(f"[green]Tracker '{tracker_name}' passed both checks.[/green]") + tracker_status[tracker_name]['upload'] = True successful_trackers += 1 + + meta['tracker_status'] = tracker_status + if meta['debug']: console.print("\n[bold]Tracker Processing Summary:[/bold]") for t_name, status in tracker_status.items(): banned_status = 'Yes' if status['banned'] else 'No' skipped_status = 'Yes' if status['skipped'] else 'No' dupe_status = 'Yes' if status['dupe'] else 'No' + upload_status = 'Yes' if status['upload'] else 'No' if meta['debug']: - console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status}") + console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status} | [yellow]Upload:[/yellow] {upload_status}") if meta['debug']: console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") From 2853d2282f0e21050f66107edc160a5123255bb3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 17:19:26 +1000 Subject: [PATCH 671/741] cleanup upload.py todo: fix ptp and probably thr and probably....... --- upload.py | 557 ++++++++++++------------------------------------------ 1 file changed, 125 insertions(+), 432 deletions(-) diff --git a/upload.py b/upload.py index 5cba67e79..8a65b7531 100644 --- a/upload.py +++ b/upload.py @@ -4,48 +4,8 @@ from src.args import Args from src.clients import Clients from src.trackers.COMMON import COMMON -from src.trackers.HUNO import HUNO -from src.trackers.BLU import BLU -from src.trackers.BHD import BHD -from src.trackers.AITHER import AITHER -from src.trackers.STC import STC -from src.trackers.R4E import R4E from src.trackers.THR import THR -from src.trackers.STT import STT -from src.trackers.HP import HP from src.trackers.PTP import PTP -from src.trackers.SN import SN -from src.trackers.ACM import ACM -from src.trackers.HDB import HDB -from src.trackers.LCD import LCD -from src.trackers.TTG import TTG -from src.trackers.LST import LST -from src.trackers.FL import FL -from src.trackers.LT import LT -from src.trackers.NBL import NBL -from src.trackers.ANT import ANT -from src.trackers.PTER import PTER -from src.trackers.MTV import MTV -from src.trackers.JPTV import JPTV -from src.trackers.TL import TL -from src.trackers.HDT import HDT -from src.trackers.RF import RF -from src.trackers.OE import OE -from src.trackers.BHDTV import BHDTV -from src.trackers.RTF import RTF -from src.trackers.OTW import OTW -from src.trackers.FNP import FNP -from src.trackers.CBR import CBR -from src.trackers.UTP import UTP -from src.trackers.AL import AL -from src.trackers.SHRI import SHRI -from src.trackers.TIK import TIK -from src.trackers.TVC import TVC -from src.trackers.PSS import PSS -from src.trackers.ULCX import ULCX -from src.trackers.SPD import SPD -from src.trackers.YOINK import YOINK -from src.trackers.PTT import PTT import json from pathlib import Path import asyncio @@ -58,6 +18,7 @@ import traceback import click import re +from src.trackersetup import TRACKER_SETUP, tracker_class_map, api_trackers, other_api_trackers, http_trackers, tracker_capabilities from src.console import console from rich.markdown import Markdown @@ -500,59 +461,13 @@ async def do_the_thing(base_dir): console.print(f"[green]Gathering info for {os.path.basename(path)}") await process_meta(meta, base_dir) prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - if meta.get('trackers', None) is not None: - trackers = meta['trackers'] - else: - trackers = config['TRACKERS']['default_trackers'] - if "," in trackers: - trackers = trackers.split(',') - confirm = get_confirmation(meta) - while confirm is False: - editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") - editargs = (meta['path'],) + tuple(editargs.split()) - if meta.get('debug', False): - editargs += ("--debug",) - meta, help, before_args = parser.parse(editargs, meta) - meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode='cli') - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - confirm = get_confirmation(meta) - - if isinstance(trackers, str): - trackers = trackers.split(',') - trackers = [s.strip().upper() for s in trackers] - if meta.get('manual', False): - trackers.insert(0, "MANUAL") + #################################### ####### Upload to Trackers ####### # noqa #F266 #################################### common = COMMON(config=config) - api_trackers = [ - 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', - 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK', 'PTT' - ] - other_api_trackers = [ - 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' - ] - http_trackers = [ - 'FL', 'HDB', 'HDT', 'MTV', 'PTER', 'TTG' - ] - tracker_class_map = { - 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'CBR': CBR, - 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, - 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, - 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, - 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, 'PTT': PTT, - } - - tracker_capabilities = { - 'AITHER': {'mod_q': True, 'draft': False}, - 'BHD': {'draft_live': True}, - 'BLU': {'mod_q': True, 'draft': False}, - 'LST': {'mod_q': True, 'draft': True} - } + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): modq, draft = None, None @@ -575,7 +490,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): return modq, draft - for tracker in trackers: + for tracker in enabled_trackers: disctype = meta.get('disctype', None) tracker = tracker.replace(" ", "").upper().strip() if meta['name'].endswith('DUPE?'): @@ -588,124 +503,77 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) - - if meta['unattended']: - upload_to_tracker = True - else: - try: - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - - if upload_to_tracker: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - - # Check if the group is banned for the tracker - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - - dupes = await tracker_class.search_existing(meta, disctype) - if 'skipping' not in meta or meta['skipping'] is None: - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - - # Proceed with upload if the meta is set to upload - if meta.get('upload', False): - await tracker_class.upload(meta, disctype) - perm = config['DEFAULT'].get('get_permalink', False) - if perm: - # need a wait so we don't race the api - await asyncio.sleep(5) - await tracker_class.search_torrent_page(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) - meta['skipping'] = None + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) if tracker in other_api_trackers: tracker_class = tracker_class_map[tracker](config=config) - - if meta['unattended']: - upload_to_tracker = True - else: - try: - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - - if upload_to_tracker: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - - # Check if the group is banned for the tracker - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - - # Perform the existing checks for dupes except TL - if tracker != "TL": - if tracker == "RTF": - await tracker_class.api_test(meta) - - dupes = await tracker_class.search_existing(meta, disctype) - if 'skipping' not in meta or meta['skipping'] is None: - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - - if 'skipping' not in meta or meta['skipping'] is None: - # Proceed with upload if the meta is set to upload - if tracker == "TL" or meta.get('upload', False): - await tracker_class.upload(meta, disctype) - if tracker == 'SN': - await asyncio.sleep(16) - await client.add_to_client(meta, tracker_class.tracker) - meta['skipping'] = None + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + # Perform the existing checks for dupes except TL + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) + # Proceed with upload if the meta is set to upload + if tracker == "TL" or upload_status: + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - if meta['unattended']: - upload_to_tracker = True - else: - try: - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - - if upload_to_tracker: - console.print(f"Uploading to {tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - if await tracker_class.validate_credentials(meta) is True: - dupes = await tracker_class.search_existing(meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] is True: + if upload_status: + console.print(f"Uploading to {tracker}") + + if await tracker_class.validate_credentials(meta) is True: await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) await client.add_to_client(meta, tracker_class.tracker) if tracker == "MANUAL": @@ -714,7 +582,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): else: do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) if do_manual: - for manual_tracker in trackers: + for manual_tracker in enabled_trackers: if manual_tracker != 'MANUAL': manual_tracker = manual_tracker.replace(" ", "").upper().strip() tracker_class = tracker_class_map[manual_tracker](config=config) @@ -730,83 +598,66 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") if tracker == "THR": - if meta['unattended']: - upload_to_thr = True - else: - try: - upload_to_ptp = cli_ui.ask_yes_no( - f"Upload to THR? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - if upload_to_thr: - console.print("Uploading to THR") - # nable to get IMDB id/Youtube Link - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) is None: - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube - thr = THR(config=config) - try: - with requests.Session() as session: - console.print("[yellow]Logging in to THR") - session = thr.login(session) - console.print("[yellow]Searching for Dupes") - dupes = thr.search_existing(session, disctype, meta.get('imdb_id')) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] is True: + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + console.print("Uploading to THR") + # nable to get IMDB id/Youtube Link + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + if meta.get('youtube', None) is None: + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") + meta['youtube'] = youtube + thr = THR(config=config) + try: + with requests.Session() as session: + console.print("[yellow]Logging in to THR") + session = thr.login(session) await thr.upload(session, meta, disctype) + await asyncio.sleep(0.5) await client.add_to_client(meta, "THR") - except Exception: - console.print(traceback.format_exc()) + except Exception: + console.print(traceback.format_exc()) if tracker == "PTP": - if meta['unattended']: - upload_to_ptp = True - else: - try: - upload_to_ptp = cli_ui.ask_yes_no( - f"Upload to {tracker}? {debug}", - default=meta['unattended'] - ) - except (KeyboardInterrupt, EOFError): - sys.exit(1) # Exit immediately - - if upload_to_ptp: # Ensure the variable is defined before this check - console.print(f"Uploading to {tracker}") - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - ptp = PTP(config=config) - if check_banned_group("PTP", ptp.banned_groups, meta): - continue - try: - console.print("[yellow]Searching for Group ID") - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID is None: - console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['upload'] = True - else: - console.print("[yellow]Searching for Existing Releases") - dupes = await ptp.search_existing(groupID, meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] is True: - ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData, disctype) - await asyncio.sleep(5) - await client.add_to_client(meta, "PTP") - except Exception: - console.print(traceback.format_exc()) + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + console.print(f"Uploading to {tracker}") + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + ptp = PTP(config=config) + try: + console.print("[yellow]Searching for Group ID") + groupID = await ptp.get_group_by_imdb(meta['imdb_id']) + if groupID is None: + console.print("[yellow]No Existing Group found") + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") + meta['youtube'] = youtube + meta['upload'] = True + else: + console.print("[yellow]Searching for Existing Releases") + dupes = await ptp.search_existing(groupID, meta, disctype) + dupes = await common.filter_dupes(dupes, meta) + + if meta.get('imdb_info', {}) == {}: + meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) + if meta['upload'] is True: + ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) + await ptp.upload(meta, ptpUrl, ptpData, disctype) + await asyncio.sleep(5) + await client.add_to_client(meta, "PTP") + except Exception: + console.print(traceback.format_exc()) if meta.get('queue') is not None: processed_files_count += 1 @@ -816,164 +667,6 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): save_processed_file(log_file, path) -def get_confirmation(meta): - if meta['debug'] is True: - console.print("[bold red]DEBUG: True") - console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") - console.print() - console.print("[bold yellow]Database Info[/bold yellow]") - console.print(f"[bold]Title:[/bold] {meta['title']} ({meta['year']})") - console.print() - console.print(f"[bold]Overview:[/bold] {meta['overview']}") - console.print() - console.print(f"[bold]Category:[/bold] {meta['category']}") - if int(meta.get('tmdb', 0)) != 0: - console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") - if int(meta.get('imdb_id', '0')) != 0: - console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb_id']}") - if int(meta.get('tvdb_id', '0')) != 0: - console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") - if int(meta.get('tvmaze_id', '0')) != 0: - console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") - if int(meta.get('mal_id', 0)) != 0: - console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") - console.print() - if int(meta.get('freeleech', '0')) != 0: - console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") - if meta['tag'] == "": - tag = "" - else: - tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] - else: - res = meta['resolution'] - - console.print(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) is True: - console.print("[bold green]Personal Release![/bold green]") - console.print() - if meta.get('unattended', False) is False: - get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" # \a rings the bell - if ring_the_bell: - console.print(ring_the_bell) - - # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' - if meta.get('is disc', False) is True: - meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True - - if meta.get('keep_folder'): - if meta['isdir']: - console.print("[bold yellow]Uploading with --keep-folder[/bold yellow]") - kf_confirm = input("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed? [y/N]: ").strip().lower() - if kf_confirm != 'y': - console.print("[bold red]Aborting...[/bold red]") - exit() - - console.print("[bold yellow]Is this correct?[/bold yellow]") - console.print(f"[bold]Name:[/bold] {meta['name']}") - confirm_input = input("Correct? [y/N]: ").strip().lower() - confirm = confirm_input == 'y' - - else: - console.print(f"[bold]Name:[/bold] {meta['name']}") - confirm = True - - return confirm - - -def dupe_check(dupes, meta): - if not dupes: - console.print("[green]No dupes found") - meta['upload'] = True - return meta - else: - console.print() - dupe_text = "\n".join(dupes) - console.print() - cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") - cli_ui.info(dupe_text) - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - if meta.get('dupe', False) is False: - upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) - else: - upload = True - else: - if meta.get('dupe', False) is False: - console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") - upload = False - else: - console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") - upload = True - console.print() - if upload is False: - meta['upload'] = False - else: - meta['upload'] = True - for each in dupes: - if each == meta['name']: - meta['name'] = f"{meta['name']} DUPE?" - - return meta - - -# Return True if banned group -def check_banned_group(tracker, banned_group_list, meta): - if meta['tag'] == "": - return False - else: - q = False - for tag in banned_group_list: - if isinstance(tag, list): - if meta['tag'][1:].lower() == tag[0].lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") - console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") - q = True - else: - if meta['tag'][1:].lower() == tag.lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") - q = True - if q: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): - return True - else: - return True - return False - - -def get_missing(meta): - info_notes = { - 'edition': 'Special Edition/Release', - 'description': "Please include Remux/Encode Notes if possible (either here or edit your upload)", - 'service': "WEB Service e.g.(AMZN, NF)", - 'region': "Disc Region", - 'imdb': 'IMDb ID (tt1234567)', - 'distributor': "Disc Distributor e.g.(BFI, Criterion, etc)" - } - missing = [] - if meta.get('imdb_id', '0') == '0': - meta['imdb_id'] = '0' - meta['potential_missing'].append('imdb_id') - if len(meta['potential_missing']) > 0: - for each in meta['potential_missing']: - if str(meta.get(each, '')).replace(' ', '') in ["", "None", "0"]: - if each == "imdb_id": - each = 'imdb' - missing.append(f"--{each} | {info_notes.get(each)}") - if missing != []: - cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") - for each in missing: - if each.split('|')[0].replace('--', '').strip() in ["imdb"]: - cli_ui.info(cli_ui.red, each) - else: - cli_ui.info(each) - - console.print() - return - - if __name__ == '__main__': pyver = platform.python_version_tuple() if int(pyver[0]) != 3 or int(pyver[1]) < 12: From 2449e952a84c72de02d1fa3d501e0e01b505e501 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 18:01:26 +1000 Subject: [PATCH 672/741] add nano to docker fixes https://github.com/Audionut/Upload-Assistant/issues/211 --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 3c4ada561..037c52bfa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,6 +11,7 @@ RUN apt-get update && \ mktorrent \ rustc \ mono-complete && \ + nano && \ rm -rf /var/lib/apt/lists/* # Set up a virtual environment to isolate our Python dependencies From 091f8bf4c26cad0ff451750e95e2d16655851e92 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 18:08:01 +1000 Subject: [PATCH 673/741] fix dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 037c52bfa..0dfb7af08 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update && \ cargo \ mktorrent \ rustc \ - mono-complete && \ + mono-complete \ nano && \ rm -rf /var/lib/apt/lists/* From 73ae9031b08e0b27e187d1ded1ccba8cb4e78f7c Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 19:27:48 +1000 Subject: [PATCH 674/741] clean return --- src/prep.py | 7 +++---- upload.py | 57 ++++++++++++++++++++++++++++------------------------- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/src/prep.py b/src/prep.py index e1c9491ba..d61f2e906 100644 --- a/src/prep.py +++ b/src/prep.py @@ -620,6 +620,7 @@ async def process_tracker(tracker_name, meta): else: console.print("Skipping existing search as meta already populated") + console.print("[yellow]Building meta data.....") meta['tmdb'] = meta.get('tmdb_manual', None) meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: @@ -767,10 +768,8 @@ async def process_tracker(tracker_name, meta): console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") meta['skip_uploading'] = int(self.config['DEFAULT'].get('tracker_pass_checks', 1)) - if successful_trackers <= meta['skip_uploading']: - console.print( - f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]" - ) + if successful_trackers < meta['skip_uploading']: + console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") return with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: diff --git a/upload.py b/upload.py index 8a65b7531..8046ac699 100644 --- a/upload.py +++ b/upload.py @@ -213,40 +213,43 @@ async def process_meta(meta, base_dir): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta = await prep.gather_prep(meta=meta, mode='cli') - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - meta['cutoff'] = int(config['DEFAULT'].get('cutoff_screens', 3)) - if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: - if 'image_list' not in meta: - meta['image_list'] = [] - return_dict = {} - new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) - + if not meta: + return + else: with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) + meta['cutoff'] = int(config['DEFAULT'].get('cutoff_screens', 3)) + if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: + if 'image_list' not in meta: + meta['image_list'] = [] + return_dict = {} + new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + + elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: + meta['image_list'] = [] - elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: - meta['image_list'] = [] + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + reuse_torrent = None + if meta.get('rehash', False) is False: + reuse_torrent = await client.find_existing_torrent(meta) + if reuse_torrent is not None: + prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - if not os.path.exists(torrent_path): - reuse_torrent = None - if meta.get('rehash', False) is False: - reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent is not None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) + if meta['nohash'] is False and reuse_torrent is None: + prep.create_torrent(meta, Path(meta['path']), "BASE") + if meta['nohash']: + meta['client'] = "none" - if meta['nohash'] is False and reuse_torrent is None: + elif os.path.exists(torrent_path) and meta.get('rehash', False) is True and meta['nohash'] is False: prep.create_torrent(meta, Path(meta['path']), "BASE") - if meta['nohash']: - meta['client'] = "none" - - elif os.path.exists(torrent_path) and meta.get('rehash', False) is True and meta['nohash'] is False: - prep.create_torrent(meta, Path(meta['path']), "BASE") - if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + if int(meta.get('randomized', 0)) >= 1: + prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) async def do_the_thing(base_dir): From ef6aaf6c10c4e59e00a7251c3164f63aea139f6c Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Tue, 17 Dec 2024 17:54:30 +0700 Subject: [PATCH 675/741] Add -met/--manual-episode-title arg --- src/args.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/args.py b/src/args.py index c18a8e683..cf6b03aa6 100644 --- a/src/args.py +++ b/src/args.py @@ -40,7 +40,7 @@ def parse(self, args, meta): parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default=None) parser.add_argument('-season', '--season', nargs='*', required=False, help="Season (number)", type=str) parser.add_argument('-episode', '--episode', nargs='*', required=False, help="Episode (number)", type=str) - parser.add_argument('-met', '--manual-episode-title', nargs=1, required=False, help="Set episode title, empty = empty", type=datetime.date.fromisoformat, dest="manual_episode_title") + parser.add_argument('-met', '--manual-episode-title', nargs=1, required=False, help="Set episode title, empty = empty", type=str, dest="manual_episode_title") parser.add_argument('-daily', '--daily', nargs=1, required=False, help="Air date of this episode (YYYY-MM-DD)", type=datetime.date.fromisoformat, dest="manual_date") parser.add_argument('--no-season', dest='no_season', action='store_true', required=False, help="Remove Season from title") parser.add_argument('--no-year', dest='no_year', action='store_true', required=False, help="Remove Year from title") @@ -243,8 +243,10 @@ def parse(self, args, meta): meta[key] = 100 elif key in ("tag") and value == []: meta[key] = "" + elif key in ["manual_episode_title"] and value == []: + meta[key] = "" elif key in ["manual_episode_title"]: - meta[key] = value if value else "" + meta[key] = value else: meta[key] = meta.get(key, None) if key in ('trackers'): From 992666242b172f1cd91997525f85f098c5eea20c Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Tue, 17 Dec 2024 18:02:03 +0700 Subject: [PATCH 676/741] Update prep.py to set manual episode title --- src/prep.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index bcc3d75b9..d9e33b58c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3725,8 +3725,13 @@ async def get_season_episode(self, video, meta): # meta['season'] = "COMPLETE" meta['season_int'] = season_int meta['episode_int'] = episode_int - - meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') + + # Manual episode title + if meta['manual_episode_title'] == "": + meta['episode_title_storage'] = meta.get('manual_episode_title') + else: + meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') + if meta['season'] == "S00" or meta['episode'] == "E00": meta['episode_title'] = meta['episode_title_storage'] From 46ecd9b067073f3d22a38f438556fb85fb2a6127 Mon Sep 17 00:00:00 2001 From: Khoa Pham Date: Tue, 17 Dec 2024 18:03:47 +0700 Subject: [PATCH 677/741] fix whitespace --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index d9e33b58c..437deb0f1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3725,13 +3725,13 @@ async def get_season_episode(self, video, meta): # meta['season'] = "COMPLETE" meta['season_int'] = season_int meta['episode_int'] = episode_int - + # Manual episode title if meta['manual_episode_title'] == "": meta['episode_title_storage'] = meta.get('manual_episode_title') else: meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') - + if meta['season'] == "S00" or meta['episode'] == "E00": meta['episode_title'] = meta['episode_title_storage'] From 2f54e011fdb3b647de2ccb217721c63955bdb849 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 21:23:43 +1000 Subject: [PATCH 678/741] better return handling --- src/prep.py | 2 + upload.py | 416 ++++++++++++++++++++++++++++------------------------ 2 files changed, 224 insertions(+), 194 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7268b0649..f3a17c1ed 100644 --- a/src/prep.py +++ b/src/prep.py @@ -773,6 +773,8 @@ async def process_tracker(tracker_name, meta): console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") return + meta['we_are_uploading'] = True + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) diff --git a/upload.py b/upload.py index 8046ac699..1d81f5441 100644 --- a/upload.py +++ b/upload.py @@ -463,211 +463,239 @@ async def do_the_thing(base_dir): console.print(f"[green]Gathering info for {os.path.basename(path)}") await process_meta(meta, base_dir) - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + if 'we_are_uploading' not in meta: + console.print("we are not uploading.......") + if meta.get('queue') is not None: + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + if not meta['debug']: + if log_file: + save_processed_file(log_file, path) - #################################### - ####### Upload to Trackers ####### # noqa #F266 - #################################### - common = COMMON(config=config) - tracker_setup = TRACKER_SETUP(config=config) - enabled_trackers = tracker_setup.trackers_enabled(meta) + else: + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): - modq, draft = None, None + #################################### + ####### Upload to Trackers ####### # noqa #F266 + #################################### - tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + print("Enabled Trackers:", enabled_trackers) + print("API Trackers:", api_trackers) - # Handle BHD specific draft/live logic - if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): - draft_int = await tracker_class.get_live(meta) - draft = "Draft" if draft_int == 0 else "Live" + async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): + modq, draft = None, None - # Handle mod_q and draft for other trackers - else: - if tracker_caps.get('mod_q'): - modq = await tracker_class.get_flag(meta, 'modq') - modq = 'Yes' if modq else 'No' - if tracker_caps.get('draft'): - draft = await tracker_class.get_flag(meta, 'draft') - draft = 'Yes' if draft else 'No' - - return modq, draft - - for tracker in enabled_trackers: - disctype = meta.get('disctype', None) - tracker = tracker.replace(" ", "").upper().strip() - if meta['name'].endswith('DUPE?'): - meta['name'] = meta['name'].replace(' DUPE?', '') - - if meta['debug']: - debug = "(DEBUG)" - else: - debug = "" - - if tracker in api_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - perm = config['DEFAULT'].get('get_permalink', False) - if perm: - # need a wait so we don't race the api - await asyncio.sleep(5) - await tracker_class.search_torrent_page(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) - - if tracker in other_api_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - - # Perform the existing checks for dupes except TL - if tracker != "TL": - if tracker == "RTF": - await tracker_class.api_test(meta) - # Proceed with upload if the meta is set to upload - if tracker == "TL" or upload_status: - await tracker_class.upload(meta, disctype) - if tracker == 'SN': - await asyncio.sleep(16) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) - if tracker in http_trackers: - tracker_class = tracker_class_map[tracker](config=config) - tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + # Handle BHD specific draft/live logic + if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): + draft_int = await tracker_class.get_live(meta) + draft = "Draft" if draft_int == 0 else "Live" + + # Handle mod_q and draft for other trackers + else: + if tracker_caps.get('mod_q'): + modq = await tracker_class.get_flag(meta, 'modq') + modq = 'Yes' if modq else 'No' + if tracker_caps.get('draft'): + draft = await tracker_class.get_flag(meta, 'draft') + draft = 'Yes' if draft else 'No' - if upload_status: - console.print(f"Uploading to {tracker}") + return modq, draft - if await tracker_class.validate_credentials(meta) is True: - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + for tracker in enabled_trackers: + disctype = meta.get('disctype', None) + tracker = tracker.replace(" ", "").upper().strip() + if meta['name'].endswith('DUPE?'): + meta['name'] = meta['name'].replace(' DUPE?', '') - if tracker == "MANUAL": - if meta['unattended']: - do_manual = True + if meta['debug']: + debug = "(DEBUG)" else: - do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) - if do_manual: - for manual_tracker in enabled_trackers: - if manual_tracker != 'MANUAL': - manual_tracker = manual_tracker.replace(" ", "").upper().strip() - tracker_class = tracker_class_map[manual_tracker](config=config) - if manual_tracker in api_trackers: - await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) - else: - await tracker_class.edit_desc(meta) - url = await prep.package(meta) - if url is False: - console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") - else: - console.print(f"[green]{meta['name']}") - console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") - - if tracker == "THR": - tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - console.print("Uploading to THR") - # nable to get IMDB id/Youtube Link - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) is None: - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube - thr = THR(config=config) - try: - with requests.Session() as session: - console.print("[yellow]Logging in to THR") - session = thr.login(session) - await thr.upload(session, meta, disctype) + debug = "" + + if tracker in api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + if upload_to_tracker: + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + console.print(f"[red]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/red]") + + if upload_status: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + await tracker_class.upload(meta, disctype) await asyncio.sleep(0.5) - await client.add_to_client(meta, "THR") - except Exception: - console.print(traceback.format_exc()) - - if tracker == "PTP": - tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - console.print(f"Uploading to {tracker}") - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - ptp = PTP(config=config) - try: - console.print("[yellow]Searching for Group ID") - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID is None: - console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['upload'] = True - else: - console.print("[yellow]Searching for Existing Releases") - dupes = await ptp.search_existing(groupID, meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] is True: - ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData, disctype) - await asyncio.sleep(5) - await client.add_to_client(meta, "PTP") - except Exception: - console.print(traceback.format_exc()) - - if meta.get('queue') is not None: - processed_files_count += 1 - console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") - if not meta['debug']: - if log_file: - save_processed_file(log_file, path) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker in other_api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + if upload_to_tracker: + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + + if upload_status: + # Get mod_q, draft, or draft/live depending on the tracker + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + # Print mod_q and draft info if relevant + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + # Perform the existing checks for dupes except TL + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) + # Proceed with upload if the meta is set to upload + if tracker == "TL" or upload_status: + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker in http_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_to_tracker = cli_ui.ask_yes_no( + f"Upload to {tracker_class.tracker}? {debug}", + default=meta['unattended'] + ) + if upload_to_tracker: + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + console.print(f"[blue]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/blue]") + + if upload_status: + console.print(f"Uploading to {tracker}") + + if await tracker_class.validate_credentials(meta) is True: + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker == "MANUAL": + if meta['unattended']: + do_manual = True + else: + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + if do_manual: + for manual_tracker in enabled_trackers: + if manual_tracker != 'MANUAL': + manual_tracker = manual_tracker.replace(" ", "").upper().strip() + tracker_class = tracker_class_map[manual_tracker](config=config) + if manual_tracker in api_trackers: + await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) + else: + await tracker_class.edit_desc(meta) + url = await prep.package(meta) + if url is False: + console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") + else: + console.print(f"[green]{meta['name']}") + console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") + + if tracker == "THR": + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + console.print("Uploading to THR") + # nable to get IMDB id/Youtube Link + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + if meta.get('youtube', None) is None: + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") + meta['youtube'] = youtube + thr = THR(config=config) + try: + with requests.Session() as session: + console.print("[yellow]Logging in to THR") + session = thr.login(session) + await thr.upload(session, meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, "THR") + except Exception: + console.print(traceback.format_exc()) + + if tracker == "PTP": + tracker_status = meta.get('tracker_status', {}) + for tracker, status in tracker_status.items(): + upload_status = status.get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + console.print(f"Uploading to {tracker}") + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + ptp = PTP(config=config) + try: + console.print("[yellow]Searching for Group ID") + groupID = await ptp.get_group_by_imdb(meta['imdb_id']) + if groupID is None: + console.print("[yellow]No Existing Group found") + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") + meta['youtube'] = youtube + meta['upload'] = True + else: + console.print("[yellow]Searching for Existing Releases") + dupes = await ptp.search_existing(groupID, meta, disctype) + dupes = await common.filter_dupes(dupes, meta) + + if meta.get('imdb_info', {}) == {}: + meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) + if meta['upload'] is True: + ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) + await ptp.upload(meta, ptpUrl, ptpData, disctype) + await asyncio.sleep(5) + await client.add_to_client(meta, "PTP") + except Exception: + console.print(traceback.format_exc()) + + if meta.get('queue') is not None: + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + if not meta['debug']: + if log_file: + save_processed_file(log_file, path) if __name__ == '__main__': From ca10a3f8471ed3ea8021f395b3e7458692db1cc1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 21:35:30 +1000 Subject: [PATCH 679/741] stop repeated loops over trackers --- upload.py | 123 ++++++++++++++++++++---------------------------------- 1 file changed, 46 insertions(+), 77 deletions(-) diff --git a/upload.py b/upload.py index 1d81f5441..91ca4fd9e 100644 --- a/upload.py +++ b/upload.py @@ -520,92 +520,61 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker in api_trackers: tracker_class = tracker_class_map[tracker](config=config) tracker_status = meta.get('tracker_status', {}) - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - if upload_to_tracker: - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - console.print(f"[red]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/red]") - - if upload_status: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[red]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/red]") - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - perm = config['DEFAULT'].get('get_permalink', False) - if perm: - # need a wait so we don't race the api - await asyncio.sleep(5) - await tracker_class.search_torrent_page(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + if upload_status: + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) if tracker in other_api_trackers: tracker_class = tracker_class_map[tracker](config=config) tracker_status = meta.get('tracker_status', {}) - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - if upload_to_tracker: - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") - - if upload_status: - # Get mod_q, draft, or draft/live depending on the tracker - modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) - - # Print mod_q and draft info if relevant - if modq is not None: - console.print(f"(modq: {modq})") - if draft is not None: - console.print(f"(draft: {draft})") - - console.print(f"Uploading to {tracker_class.tracker}") - - # Perform the existing checks for dupes except TL - if tracker != "TL": - if tracker == "RTF": - await tracker_class.api_test(meta) - # Proceed with upload if the meta is set to upload - if tracker == "TL" or upload_status: - await tracker_class.upload(meta, disctype) - if tracker == 'SN': - await asyncio.sleep(16) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + + if upload_status: + console.print(f"Uploading to {tracker_class.tracker}") + + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) + if tracker == "TL" or upload_status: + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) if tracker in http_trackers: tracker_class = tracker_class_map[tracker](config=config) tracker_status = meta.get('tracker_status', {}) - upload_to_tracker = cli_ui.ask_yes_no( - f"Upload to {tracker_class.tracker}? {debug}", - default=meta['unattended'] - ) - if upload_to_tracker: - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - console.print(f"[blue]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/blue]") - - if upload_status: - console.print(f"Uploading to {tracker}") - - if await tracker_class.validate_credentials(meta) is True: - await tracker_class.upload(meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, tracker_class.tracker) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[blue]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/blue]") + + if upload_status: + console.print(f"Uploading to {tracker}") + + if await tracker_class.validate_credentials(meta) is True: + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) if tracker == "MANUAL": if meta['unattended']: From 0783e7b459be4576e6165f9f229c9aac3fbccb2d Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 21:45:11 +1000 Subject: [PATCH 680/741] Clean MTV host handling --- src/prep.py | 2 +- src/trackers/MTV.py | 41 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index 437deb0f1..a13d6e3b3 100644 --- a/src/prep.py +++ b/src/prep.py @@ -88,7 +88,7 @@ async def prompt_user_for_confirmation(self, message: str) -> bool: sys.exit(1) async def check_images_concurrently(self, imagelist, meta): - approved_image_hosts = ['ptpimg', 'imgbox'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] invalid_host_found = False # Track if any image is on a non-approved host # Ensure meta['image_sizes'] exists diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 92b8011b8..9bfa86ebe 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -14,6 +14,7 @@ from datetime import datetime import glob import multiprocessing +from urllib.parse import urlparse class MTV(): @@ -46,9 +47,29 @@ async def upload(self, meta, disctype): async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + url_host_mapping = { + "i.ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "images2.imgbox.com": "imgbox", + } images_reuploaded = False - if all(any(host in image['raw_url'] for host in approved_image_hosts) for image in meta['image_list']): + normalized_approved_hosts = set(approved_image_hosts + list(url_host_mapping.keys())) # noqa F841 + for image in meta['image_list']: + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = url_host_mapping.get(hostname, hostname) + if meta['debug']: + if mapped_host in approved_image_hosts: + console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") + else: + console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") + + if all( + url_host_mapping.get(urlparse(image['raw_url']).netloc, urlparse(image['raw_url']).netloc) in approved_image_hosts + for image in meta['image_list'] + ): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") image_list = meta['image_list'] @@ -181,6 +202,12 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + url_host_mapping = { + "i.ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "images2.imgbox.com": "imgbox", + } + retry_mode = False images_reuploaded = False new_images_key = 'mtv_images_key' @@ -280,9 +307,15 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts for image in uploaded_images: console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") - if not all(any(x in image['raw_url'] for x in approved_image_hosts) for image in meta.get(new_images_key, [])): - console.print("[red]Unsupported image host detected, please use one of the approved image hosts") - return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = url_host_mapping.get(hostname, hostname) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts return meta[new_images_key], False, images_reuploaded From b1cf8a429fc259ca0f2826e0d7f663104019bbdb Mon Sep 17 00:00:00 2001 From: Audionut Date: Tue, 17 Dec 2024 22:16:08 +1000 Subject: [PATCH 681/741] bhd - status not satus --- src/trackers/BHD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 3691c4921..3cdb80939 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -164,7 +164,7 @@ async def upload_with_retry(self, meta, common, img_host_index=1): data['imdb_id'] = 1 response = requests.post(url=url, files=files, data=data, headers=headers) response = response.json() - elif response['satus_message'].startswith('Invalid name value'): + elif response['status_message'].startswith('Invalid name value'): console.print(f"[bold yellow]Submitted Name: {bhd_name}") console.print(response) except Exception: From f6148353fff6f5375729e8e2f436e1cc668c936f Mon Sep 17 00:00:00 2001 From: Audionut Date: Wed, 18 Dec 2024 16:11:06 +1000 Subject: [PATCH 682/741] better dupe console --- src/prep.py | 2 +- src/trackers/ACM.py | 2 +- src/trackers/AITHER.py | 2 +- src/trackers/AL.py | 2 +- src/trackers/ANT.py | 2 +- src/trackers/BHD.py | 2 +- src/trackers/BLU.py | 2 +- src/trackers/FNP.py | 2 +- src/trackers/HDB.py | 2 +- src/trackers/HP.py | 2 +- src/trackers/HUNO.py | 2 +- src/trackers/JPTV.py | 2 +- src/trackers/LST.py | 2 +- src/trackers/MTV.py | 2 +- src/trackers/NBL.py | 2 +- src/trackers/OE.py | 3 +-- src/trackers/OTW.py | 2 +- src/trackers/PSS.py | 2 +- src/trackers/R4E.py | 2 +- src/trackers/RF.py | 2 +- src/trackers/RTF.py | 2 +- src/trackers/SHRI.py | 2 +- src/trackers/SN.py | 2 +- src/trackers/SPD.py | 2 +- src/trackers/STC.py | 2 +- src/trackers/STT.py | 2 +- src/trackers/TIK.py | 2 +- src/trackers/TVC.py | 2 +- src/trackers/ULCX.py | 2 +- src/trackers/UTP.py | 2 +- src/trackers/YOINK.py | 2 +- 31 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/prep.py b/src/prep.py index f3a17c1ed..56aed332f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -750,7 +750,7 @@ async def process_tracker(tracker_name, meta): meta['skipping'] = None if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: - console.print(f"[green]Tracker '{tracker_name}' passed both checks.[/green]") + console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") tracker_status[tracker_name]['upload'] = True successful_trackers += 1 diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 520ee3db6..76fd3b9f4 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -269,7 +269,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on ACM...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdb': meta['tmdb'], diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 1a3f145cc..942738eec 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -227,7 +227,7 @@ async def get_res_id(self, resolution): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on Aither...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 66e3949fc..eea9eeded 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -162,7 +162,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on AL...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 8a5cba97c..f88ae3796 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -147,7 +147,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "ANT" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on ANT...") params = { 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 't': 'search', diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 3691c4921..997b483af 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -410,7 +410,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "BHD" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on BHD...") category = meta['category'] if category == 'MOVIE': tmdbID = "movie" diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index ec4a130ad..c658adf13 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -208,7 +208,7 @@ async def derived_dv_layer(self, meta): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on BLU...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 498b8ea39..371e7e147 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -154,7 +154,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on FNP...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 5d703c7b5..2279048f5 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -320,7 +320,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HDB...") url = "https://hdbits.org/api/torrents" data = { 'username': self.username, diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 771949a94..d458c572d 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -143,7 +143,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HP...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index a5b9702ef..3d215259a 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -297,7 +297,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "HUNO" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HUNO...") params = { 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 1b079e2ba..4b1aae56a 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -150,7 +150,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on JPTV...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdb': meta['tmdb'], diff --git a/src/trackers/LST.py b/src/trackers/LST.py index aace023d6..4144343b1 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -199,7 +199,7 @@ async def get_flag(self, meta, flag_name): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on LST...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 92b8011b8..0a424cb60 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -619,7 +619,7 @@ async def login(self, cookiefile): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on MTV...") params = { 't': 'search', 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 4ee317b50..5eb39ea09 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -85,7 +85,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "NBL" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on NBL...") if int(meta.get('tvmaze_id', 0)) != 0: search_term = {'tvmaze': int(meta['tvmaze_id'])} elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 59d3d035c..63f1f55e2 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -279,7 +279,6 @@ def process_languages(tracks): else: console.print("[red]No media information available in meta.[/red]") - # Existing disc metadata handling bbcode = BBCODE() if meta.get('discs', []) != []: discs = meta['discs'] @@ -322,7 +321,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "OE" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on OE...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 3e9259359..d1cc69156 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -154,7 +154,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on OTW...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py index 97f377bfb..c4abe60f4 100644 --- a/src/trackers/PSS.py +++ b/src/trackers/PSS.py @@ -156,7 +156,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on PSS...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index deaf0b095..c340c17dc 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -150,7 +150,7 @@ async def is_docu(self, genres): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on R4E...") url = "https://racing4everyone.eu/api/torrents/filter" params = { 'api_token': self.config['TRACKERS']['R4E']['api_key'].strip(), diff --git a/src/trackers/RF.py b/src/trackers/RF.py index 9e01187c1..fe1e5a4f6 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -172,7 +172,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "RF" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on RF...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index cb146e58c..4f1576c6a 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -100,7 +100,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "RTF" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on RTF...") headers = { 'accept': 'application/json', 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index 7d79fc731..a41120210 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -154,7 +154,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on SHRI...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 199ff68e0..5f10c7eb4 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -123,7 +123,7 @@ async def edit_desc(self, meta): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on SN...") params = { 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() diff --git a/src/trackers/SPD.py b/src/trackers/SPD.py index b5893da1f..6dfa2956b 100644 --- a/src/trackers/SPD.py +++ b/src/trackers/SPD.py @@ -125,7 +125,7 @@ async def get_cat_id(self, category_name): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on SPD...") headers = { 'accept': 'application/json', 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), diff --git a/src/trackers/STC.py b/src/trackers/STC.py index f5e2b7ee8..ff72fc63a 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -170,7 +170,7 @@ async def get_res_id(self, resolution): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on STC...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 7da801754..076a0f505 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -148,7 +148,7 @@ async def get_res_id(self, resolution): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on STT...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 2fc467d04..9446d05fe 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -569,7 +569,7 @@ def country_code_to_name(self, code): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on TIK...") disctype = meta.get('disctype', None) params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py index 3e184ae3b..7de6e800f 100644 --- a/src/trackers/TVC.py +++ b/src/trackers/TVC.py @@ -292,7 +292,7 @@ async def search_existing(self, meta, disctype): # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on TVC...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdb': meta['tmdb'], diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 67f13912a..099fac36f 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -159,7 +159,7 @@ async def search_existing(self, meta, disctype): meta['skipping'] = "ULCX" return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on ULCX...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 86f645d78..4ac259d81 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -151,7 +151,7 @@ async def get_res_id(self, resolution): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on UTP...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index 90003c132..7fca97365 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -154,7 +154,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on YOINK...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], From 06a60bc3b97a3650d9bc9b5d390a178c95543c1c Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 09:09:42 +1000 Subject: [PATCH 683/741] Don't tag BD with region guessit was tagging some bd incorrectly --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index ba7057b38..9d71b1a55 100644 --- a/src/prep.py +++ b/src/prep.py @@ -688,6 +688,8 @@ async def process_tracker(tracker_name, meta): if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific meta['region'] = self.get_region(bdinfo, meta.get('region', None)) meta['video_codec'] = self.get_video_codec(bdinfo) + if meta['tag'][1:].startswith(meta['region']): + meta['tag'] = meta['tag'].replace(f"-{meta['region']}", '') else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) if meta.get('no_edition') is False: From e475c0f19b319bbbd0ed90a338a302bebf63f9fe Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 09:10:43 +1000 Subject: [PATCH 684/741] Don't try process TV meta if not TV --- src/prep.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 9d71b1a55..6c56fb51f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -643,7 +643,8 @@ async def process_tracker(tracker_name, meta): else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) + if meta['category'] == "TV": + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) # If no imdb, search for it if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) @@ -654,7 +655,8 @@ async def process_tracker(tracker_name, meta): else: if not meta['tag'].startswith('-') and meta['tag'] != "": meta['tag'] = f"-{meta['tag']}" - meta = await self.get_season_episode(video, meta) + if meta['category'] == "TV": + meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) if meta.get('tag') == "-SubsPlease": # SubsPlease-specific tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks From 481e4bbfd133fea43298edf5ee3f2741dbdf5255 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 11:26:17 +1000 Subject: [PATCH 685/741] refactor IMDB handling Messy code with all console prints still present. MOVIE - before/after: Metadata processed in 15.62 seconds Metadata processed in 6.23 seconds TV - before/after: Metadata processed in 19.07 seconds Metadata processed in 8.41 seconds --- src/prep.py | 245 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 238 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6c56fb51f..83f9d85d4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -57,6 +57,7 @@ import io from io import BytesIO import sys + import httpx except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -621,19 +622,29 @@ async def process_tracker(tracker_name, meta): console.print("Skipping existing search as meta already populated") console.print("[yellow]Building meta data.....") + if meta['debug']: + meta_start_time = time.time() meta['tmdb'] = meta.get('tmdb_manual', None) meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: meta['category'] = meta['category'].upper() + meta_middle33_time = time.time() + console.print(f"before tmdb processing {meta_middle33_time - meta_start_time:.2f} seconds") if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + meta_middle34_time = time.time() + console.print(f"get_tmdb_imdb_from_mediainfo processing {meta_middle34_time - meta_start_time:.2f} seconds") if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) + meta_middle36_time = time.time() + console.print(f"get_tmdb_id processing {meta_middle36_time - meta_start_time:.2f} seconds") elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') meta = await self.get_tmdb_from_imdb(meta, filename) + meta_middle35_time = time.time() + console.print(f"get_tmdb_from_imdb processing {meta_middle35_time - meta_start_time:.2f} seconds") else: meta['tmdb_manual'] = meta.get('tmdb', None) @@ -646,10 +657,16 @@ async def process_tracker(tracker_name, meta): if meta['category'] == "TV": meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) # If no imdb, search for it + meta_middle_time = time.time() + console.print(f"Metadata middle processed in {meta_middle_time - meta_start_time:.2f} seconds") if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) + meta_middle1_time = time.time() + console.print(f"Metadata middle1 processed in {meta_middle1_time - meta_middle_time:.2f} seconds") if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: - meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) + meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) + meta_middle2_time = time.time() + console.print(f"Metadata middle2 processed in {meta_middle2_time - meta_middle_time:.2f} seconds") if meta.get('tag', None) is None: meta['tag'] = self.get_tag(video, meta) else: @@ -668,6 +685,7 @@ async def process_tracker(tracker_name, meta): elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user meta['service'] = "HIDI" meta['video'] = video + console.print("Original Lnguage:", meta['original_language']) meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') @@ -703,6 +721,9 @@ async def process_tracker(tracker_name, meta): meta['edition'] = "" meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) + if meta['debug']: + meta_finish_time = time.time() + console.print(f"Metadata processed in {meta_finish_time - meta_start_time:.2f} seconds") parser = Args(config) helper = UploadHelper() confirm = helper.get_confirmation(meta) @@ -2055,11 +2076,13 @@ async def get_tmdb_from_imdb(self, meta, filename): if len(info['movie_results']) >= 1: meta['category'] = "MOVIE" meta['tmdb'] = info['movie_results'][0]['id'] + meta['original_language'] = info['movie_results'][0].get('original_language') elif len(info['tv_results']) >= 1: meta['category'] = "TV" meta['tmdb'] = info['tv_results'][0]['id'] + meta['original_language'] = info['tv_results'][0].get('original_language') else: - imdb_info = await self.get_imdb_info(imdb_id.replace('tt', ''), meta) + imdb_info = await self.get_imdb_info_api(imdb_id.replace('tt', ''), meta) title = imdb_info.get("title") if title is None: title = filename @@ -2169,7 +2192,7 @@ async def tmdb_other_meta(self, meta): except Exception: console.print('[yellow]Unable to grab videos from TMDb.') - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id']) if original_language is not None: meta['original_language'] = original_language else: @@ -2223,7 +2246,7 @@ async def tmdb_other_meta(self, meta): console.print('[yellow]Unable to grab videos from TMDb.') # meta['aka'] = f" AKA {response['original_name']}" - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id']) if original_language is not None: meta['original_language'] = original_language else: @@ -4218,21 +4241,97 @@ async def package(self, meta): return False return + async def get_imdb_aka_api(self, imdb_id): + start_time = time.time() + if imdb_id == "0": + return "", None + if not imdb_id.startswith("tt"): + imdb_id = f"tt{imdb_id}" + url = "https://api.graphql.imdb.com/" + query = { + "query": f""" + query {{ + title(id: "{imdb_id}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + countriesOfOrigin {{ + countries {{ + id + }} + }} + }} + }} + """ + } + + headers = { + "Content-Type": "application/json", + } + + async with httpx.AsyncClient() as client: + response = await client.post(url, headers=headers, json=query) + + if response.status_code != 200: + console.print(f"Failed to fetch data: {response.status_code}, {response.text}") + return "", None + + try: + data = response.json() + console.print("json data:", data) + except json.JSONDecodeError: + console.print("Error parsing JSON response") + return "", None + + # Check if `data` and `title` exist + title_data = data.get("data", {}).get("title") + if title_data is None: + console.print("Title data is missing from response") + return "", None + + # Extract relevant fields from the response + aka = title_data.get("originalTitleText", {}).get("text", "") + is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) + original_language = None + + console.print("title_data:", title_data) + console.print("original title:", aka) + console.print("is_original:", is_original) + + if not is_original and aka: + aka = f" AKA {aka}" + console.print("aka", aka) + finish_time = time.time() + console.print(f"IMDB AKA with API processed in {finish_time - start_time:.4f} seconds") + return aka, original_language + async def get_imdb_aka(self, imdb_id): + start_time = time.time() if imdb_id == "0": return "", None + if not imdb_id.startswith("tt"): + imdb_id = f"tt{imdb_id}" ia = Cinemagoer() result = ia.get_movie(imdb_id.replace('tt', '')) - + console.print("result:", result) original_language = result.get('language codes') + console.print("original language list:", original_language) if isinstance(original_language, list): if len(original_language) > 1: original_language = None elif len(original_language) == 1: original_language = original_language[0] aka = result.get('original title', result.get('localized title', "")).replace(' - IMDb', '').replace('\u00ae', '') + console.print("AKA:", aka) if aka != "": aka = f" AKA {aka}" + finish_time = time.time() + console.print(f"IMDB AKA with Cinemagoer processed in {finish_time - start_time:.4f} seconds") return aka, original_language async def get_dvd_size(self, discs, manual_dvds): @@ -4288,6 +4387,138 @@ def daily_to_tmdb_season_episode(self, tmdbid, date): console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") return season, episode + async def get_imdb_info_api(self, imdbID, meta): + imdb_info = {} + + if imdbID == "0": + return "", None + else: + if not imdbID.startswith("tt"): + imdbIDtt = f"tt{imdbID}" + query = { + "query": f""" + query GetTitleInfo {{ + title(id: "{imdbIDtt}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + releaseYear {{ + year + }} + titleType {{ + id + }} + plot {{ + plotText {{ + plainText + }} + }} + ratingsSummary {{ + aggregateRating + voteCount + }} + primaryImage {{ + url + }} + runtime {{ + displayableProperty {{ + value {{ + plainText + }} + }} + seconds + }} + titleGenres {{ + genres {{ + genre {{ + text + }} + }} + }} + principalCredits {{ + category {{ + text + id + }} + credits {{ + name {{ + id + nameText {{ + text + }} + }} + }} + }} + }} + }} + """ + } + + url = "https://api.graphql.imdb.com/" + headers = {"Content-Type": "application/json"} + + response = requests.post(url, json=query, headers=headers) + data = response.json() + + title_data = data.get("data", {}).get("title", {}) + if not title_data: + return meta + + imdb_info['imdbID'] = imdbID + imdb_info['title'] = title_data.get('titleText', {}).get('text', '') + imdb_info['year'] = title_data.get('releaseYear', {}).get('year', '') + original_title = title_data.get('originalTitleText', {}).get('text', '') + if not original_title or original_title == imdb_info['title']: + original_title = imdb_info['title'] + imdb_info['aka'] = original_title + imdb_info['type'] = title_data.get('titleType', {}).get('id') + runtime_data = title_data.get('runtime', {}) + runtime_seconds = runtime_data.get('seconds', 0) + if runtime_seconds: + runtime_minutes = runtime_seconds // 60 + else: + runtime_minutes = 0 + + imdb_info['runtime'] = str(runtime_minutes) + imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') + if not imdb_info['cover']: + imdb_info['cover'] = meta.get('poster', '') + imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') + genres = title_data.get('titleGenres', {}).get('genres', []) + genre_list = [g.get('genre', {}).get('text', '') for g in genres if g.get('genre', {}).get('text')] + imdb_info['genres'] = ', '.join(genre_list) + imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') + imdb_info['directors'] = [] + principal_credits = title_data.get('principalCredits', []) + for pc in principal_credits: + category_text = pc.get('category', {}).get('text', '') + if 'Direct' in category_text: + credits = pc.get('credits', []) + for c in credits: + name_id = c.get('name', {}).get('id', '') + if name_id.startswith('nm'): + imdb_info['directors'].append(name_id) + break + + if not title_data: + imdb_info = { + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), + } + if len(meta.get('tmdb_directors', [])) >= 1: + imdb_info['directors'] = meta['tmdb_directors'] + console.print("imdb_info:", imdb_info) + return imdb_info + async def get_imdb_info(self, imdbID, meta): imdb_info = {} if int(str(imdbID).replace('tt', '')) != 0: @@ -4326,7 +4557,7 @@ async def get_imdb_info(self, imdbID, meta): } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] - + console.print("imdb_info:", imdb_info) return imdb_info async def search_imdb(self, filename, search_year): @@ -4340,7 +4571,7 @@ async def search_imdb(self, filename, search_year): return imdbID async def imdb_other_meta(self, meta): - imdb_info = meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) + imdb_info = meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) meta['title'] = imdb_info['title'] meta['year'] = imdb_info['year'] meta['aka'] = imdb_info['aka'] From 4ff9ef5c1568b66b7afcd7db005fd457192f7620 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 13:37:34 +1000 Subject: [PATCH 686/741] cleanup --- src/prep.py | 48 +++--------------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) diff --git a/src/prep.py b/src/prep.py index 83f9d85d4..3209a16be 100644 --- a/src/prep.py +++ b/src/prep.py @@ -57,7 +57,6 @@ import io from io import BytesIO import sys - import httpx except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -630,21 +629,13 @@ async def process_tracker(tracker_name, meta): meta['category'] = self.get_cat(video) else: meta['category'] = meta['category'].upper() - meta_middle33_time = time.time() - console.print(f"before tmdb processing {meta_middle33_time - meta_start_time:.2f} seconds") if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) - meta_middle34_time = time.time() - console.print(f"get_tmdb_imdb_from_mediainfo processing {meta_middle34_time - meta_start_time:.2f} seconds") if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) - meta_middle36_time = time.time() - console.print(f"get_tmdb_id processing {meta_middle36_time - meta_start_time:.2f} seconds") elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') meta = await self.get_tmdb_from_imdb(meta, filename) - meta_middle35_time = time.time() - console.print(f"get_tmdb_from_imdb processing {meta_middle35_time - meta_start_time:.2f} seconds") else: meta['tmdb_manual'] = meta.get('tmdb', None) @@ -657,16 +648,10 @@ async def process_tracker(tracker_name, meta): if meta['category'] == "TV": meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) # If no imdb, search for it - meta_middle_time = time.time() - console.print(f"Metadata middle processed in {meta_middle_time - meta_start_time:.2f} seconds") if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) - meta_middle1_time = time.time() - console.print(f"Metadata middle1 processed in {meta_middle1_time - meta_middle_time:.2f} seconds") if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) - meta_middle2_time = time.time() - console.print(f"Metadata middle2 processed in {meta_middle2_time - meta_middle_time:.2f} seconds") if meta.get('tag', None) is None: meta['tag'] = self.get_tag(video, meta) else: @@ -685,7 +670,6 @@ async def process_tracker(tracker_name, meta): elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user meta['service'] = "HIDI" meta['video'] = video - console.print("Original Lnguage:", meta['original_language']) meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): meta['tag'] = meta['tag'].replace(f"-{meta['channels']}", '') @@ -4242,7 +4226,6 @@ async def package(self, meta): return async def get_imdb_aka_api(self, imdb_id): - start_time = time.time() if imdb_id == "0": return "", None if not imdb_id.startswith("tt"): @@ -4274,19 +4257,8 @@ async def get_imdb_aka_api(self, imdb_id): "Content-Type": "application/json", } - async with httpx.AsyncClient() as client: - response = await client.post(url, headers=headers, json=query) - - if response.status_code != 200: - console.print(f"Failed to fetch data: {response.status_code}, {response.text}") - return "", None - - try: - data = response.json() - console.print("json data:", data) - except json.JSONDecodeError: - console.print("Error parsing JSON response") - return "", None + response = requests.post(url, headers=headers, json=query) + data = response.json() # Check if `data` and `title` exist title_data = data.get("data", {}).get("title") @@ -4299,39 +4271,27 @@ async def get_imdb_aka_api(self, imdb_id): is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) original_language = None - console.print("title_data:", title_data) - console.print("original title:", aka) - console.print("is_original:", is_original) - if not is_original and aka: aka = f" AKA {aka}" - console.print("aka", aka) - finish_time = time.time() - console.print(f"IMDB AKA with API processed in {finish_time - start_time:.4f} seconds") + return aka, original_language async def get_imdb_aka(self, imdb_id): - start_time = time.time() if imdb_id == "0": return "", None if not imdb_id.startswith("tt"): imdb_id = f"tt{imdb_id}" ia = Cinemagoer() result = ia.get_movie(imdb_id.replace('tt', '')) - console.print("result:", result) original_language = result.get('language codes') - console.print("original language list:", original_language) if isinstance(original_language, list): if len(original_language) > 1: original_language = None elif len(original_language) == 1: original_language = original_language[0] aka = result.get('original title', result.get('localized title', "")).replace(' - IMDb', '').replace('\u00ae', '') - console.print("AKA:", aka) if aka != "": aka = f" AKA {aka}" - finish_time = time.time() - console.print(f"IMDB AKA with Cinemagoer processed in {finish_time - start_time:.4f} seconds") return aka, original_language async def get_dvd_size(self, discs, manual_dvds): @@ -4516,7 +4476,6 @@ async def get_imdb_info_api(self, imdbID, meta): } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] - console.print("imdb_info:", imdb_info) return imdb_info async def get_imdb_info(self, imdbID, meta): @@ -4557,7 +4516,6 @@ async def get_imdb_info(self, imdbID, meta): } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] - console.print("imdb_info:", imdb_info) return imdb_info async def search_imdb(self, filename, search_year): From 74402bdd6553b1964bcc01e3d2c5861e010bacca Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 14:23:12 +1000 Subject: [PATCH 687/741] ptp-thr fixes --- src/prep.py | 27 +++++++++++++++++- upload.py | 80 ++++++++++++++++------------------------------------- 2 files changed, 50 insertions(+), 57 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6c56fb51f..f923877b4 100644 --- a/src/prep.py +++ b/src/prep.py @@ -737,12 +737,34 @@ async def process_tracker(tracker_name, meta): tracker_class = tracker_class_map[tracker_name](config=config) tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} + if tracker_name in {"THR", "PTP"}: + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + if tracker_name == "PTP": + console.print("[yellow]Searching for Group ID") + ptp = PTP(config=config) + groupID = await ptp.get_group_by_imdb(meta['imdb_id']) + if groupID is None: + console.print("[yellow]No Existing Group found") + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") + meta['youtube'] = youtube + meta['ptp_groupID'] = groupID + + if tracker_name == "THR": + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") + meta['youtube'] = youtube + if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") tracker_status[tracker_name]['banned'] = True continue - dupes = await tracker_class.search_existing(meta, disctype) + if tracker_name not in {"THR", "PTP", "MANUAL"}: + dupes = await tracker_class.search_existing(meta, disctype) + elif tracker_name == "PTP": + dupes = await ptp.search_existing(groupID, meta, disctype) if 'skipping' not in meta or meta['skipping'] is None: dupes = await common.filter_dupes(dupes, meta) meta, is_dupe = helper.dupe_check(dupes, meta) @@ -751,6 +773,9 @@ async def process_tracker(tracker_name, meta): tracker_status[tracker_name]['dupe'] = True elif meta['skipping']: tracker_status[tracker_name]['skipped'] = True + if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": + if meta.get('imdb_info', {}) == {}: + meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) meta['skipping'] = None if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: diff --git a/upload.py b/upload.py index 91ca4fd9e..6c00ec47a 100644 --- a/upload.py +++ b/upload.py @@ -599,65 +599,33 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker == "THR": tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - console.print("Uploading to THR") - # nable to get IMDB id/Youtube Link - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) is None: - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube - thr = THR(config=config) - try: - with requests.Session() as session: - console.print("[yellow]Logging in to THR") - session = thr.login(session) - await thr.upload(session, meta, disctype) - await asyncio.sleep(0.5) - await client.add_to_client(meta, "THR") - except Exception: - console.print(traceback.format_exc()) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + thr = THR(config=config) + try: + with requests.Session() as session: + console.print("[yellow]Logging in to THR") + session = thr.login(session) + await thr.upload(session, meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, "THR") + except Exception: + console.print(traceback.format_exc()) if tracker == "PTP": tracker_status = meta.get('tracker_status', {}) - for tracker, status in tracker_status.items(): - upload_status = status.get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") - - if upload_status: - console.print(f"Uploading to {tracker}") - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - ptp = PTP(config=config) - try: - console.print("[yellow]Searching for Group ID") - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID is None: - console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['upload'] = True - else: - console.print("[yellow]Searching for Existing Releases") - dupes = await ptp.search_existing(groupID, meta, disctype) - dupes = await common.filter_dupes(dupes, meta) - - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] is True: - ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData, disctype) - await asyncio.sleep(5) - await client.add_to_client(meta, "PTP") - except Exception: - console.print(traceback.format_exc()) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + + if upload_status: + ptp = PTP(config=config) + groupID = meta['ptp_groupID'] + ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) + await ptp.upload(meta, ptpUrl, ptpData, disctype) + await asyncio.sleep(5) + await client.add_to_client(meta, "PTP") if meta.get('queue') is not None: processed_files_count += 1 From 8f5406a9fc4d32edfc324bf2d9cee274584dccad Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 14:37:24 +1000 Subject: [PATCH 688/741] manual mod fix --- src/prep.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index f923877b4..5696e3931 100644 --- a/src/prep.py +++ b/src/prep.py @@ -761,7 +761,7 @@ async def process_tracker(tracker_name, meta): tracker_status[tracker_name]['banned'] = True continue - if tracker_name not in {"THR", "PTP", "MANUAL"}: + if tracker_name not in {"THR", "PTP"}: dupes = await tracker_class.search_existing(meta, disctype) elif tracker_name == "PTP": dupes = await ptp.search_existing(groupID, meta, disctype) @@ -782,6 +782,9 @@ async def process_tracker(tracker_name, meta): console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") tracker_status[tracker_name]['upload'] = True successful_trackers += 1 + else: + if tracker_name == "MANUAL": + successful_trackers += 1 meta['tracker_status'] = tracker_status @@ -4183,7 +4186,7 @@ async def package(self, meta): generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") if meta['tvdb_id'] != "0": generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") - if meta['tvmaze_id'] != "0": + if "tvmaze_id" in meta and meta['tvmaze_id'] != "0": generic.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): From 4c5c3de1bd39d61a2c722e37e4dceb7bf63d1bfe Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 17:24:57 +1000 Subject: [PATCH 689/741] add original language argument --- src/args.py | 1 + src/prep.py | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/args.py b/src/args.py index cf6b03aa6..c18e75885 100644 --- a/src/args.py +++ b/src/args.py @@ -49,6 +49,7 @@ def parse(self, args, meta): parser.add_argument('--no-tag', dest='no_tag', action='store_true', required=False, help="Remove Group Tag from title") parser.add_argument('--no-edition', dest='no_edition', action='store_true', required=False, help="Remove Edition from title") parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") + parser.add_argument('-ol', '--original-language', dest='manual_language', nargs='*', required=False, help="Set original audio language") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Override the year found", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) diff --git a/src/prep.py b/src/prep.py index 3209a16be..b44696d4b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -623,6 +623,8 @@ async def process_tracker(tracker_name, meta): console.print("[yellow]Building meta data.....") if meta['debug']: meta_start_time = time.time() + if meta.get('manual_language'): + meta['original_langauge'] = meta.get('manual_language').lower() meta['tmdb'] = meta.get('tmdb_manual', None) meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) if meta.get('category', None) is None: @@ -2176,7 +2178,7 @@ async def tmdb_other_meta(self, meta): except Exception: console.print('[yellow]Unable to grab videos from TMDb.') - meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id']) + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) if original_language is not None: meta['original_language'] = original_language else: @@ -2230,7 +2232,7 @@ async def tmdb_other_meta(self, meta): console.print('[yellow]Unable to grab videos from TMDb.') # meta['aka'] = f" AKA {response['original_name']}" - meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id']) + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) if original_language is not None: meta['original_language'] = original_language else: @@ -2474,7 +2476,7 @@ def get_audio_v2(self, mi, meta, bdinfo): if meta.get('dual_audio', False): dual = "Dual-Audio" else: - if meta.get('original_language', '') != 'en': + if not meta.get('original_language', '').startswith('en'): eng, orig = False, False try: for t in tracks: @@ -4225,7 +4227,7 @@ async def package(self, meta): return False return - async def get_imdb_aka_api(self, imdb_id): + async def get_imdb_aka_api(self, imdb_id, meta): if imdb_id == "0": return "", None if not imdb_id.startswith("tt"): @@ -4269,7 +4271,10 @@ async def get_imdb_aka_api(self, imdb_id): # Extract relevant fields from the response aka = title_data.get("originalTitleText", {}).get("text", "") is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) - original_language = None + if meta.get('manual_language'): + original_language = meta.get('manual_language') + else: + original_language = None if not is_original and aka: aka = f" AKA {aka}" @@ -4464,6 +4469,8 @@ async def get_imdb_info_api(self, imdbID, meta): if name_id.startswith('nm'): imdb_info['directors'].append(name_id) break + if meta.get('manual_language'): + imdb_info['original_langauge'] = meta.get('manual_language') if not title_data: imdb_info = { From 709380e6f1c5b5c3ad40ee79d2ae9aad182625a1 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 17:57:33 +1000 Subject: [PATCH 690/741] Handle none returns --- src/prep.py | 67 ++++++++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/src/prep.py b/src/prep.py index b44696d4b..3a50a92d6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4430,44 +4430,43 @@ async def get_imdb_info_api(self, imdbID, meta): response = requests.post(url, json=query, headers=headers) data = response.json() - title_data = data.get("data", {}).get("title", {}) - if not title_data: - return meta - - imdb_info['imdbID'] = imdbID - imdb_info['title'] = title_data.get('titleText', {}).get('text', '') - imdb_info['year'] = title_data.get('releaseYear', {}).get('year', '') - original_title = title_data.get('originalTitleText', {}).get('text', '') - if not original_title or original_title == imdb_info['title']: - original_title = imdb_info['title'] - imdb_info['aka'] = original_title - imdb_info['type'] = title_data.get('titleType', {}).get('id') - runtime_data = title_data.get('runtime', {}) - runtime_seconds = runtime_data.get('seconds', 0) - if runtime_seconds: - runtime_minutes = runtime_seconds // 60 - else: - runtime_minutes = 0 - - imdb_info['runtime'] = str(runtime_minutes) - imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') - if not imdb_info['cover']: - imdb_info['cover'] = meta.get('poster', '') - imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') - genres = title_data.get('titleGenres', {}).get('genres', []) - genre_list = [g.get('genre', {}).get('text', '') for g in genres if g.get('genre', {}).get('text')] - imdb_info['genres'] = ', '.join(genre_list) - imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') - imdb_info['directors'] = [] - principal_credits = title_data.get('principalCredits', []) + title_data = data.get("data", {}).get("title", {}) + if not title_data: + return meta + imdb_info['imdbID'] = imdbID + imdb_info['title'] = title_data.get('titleText', {}).get('text', '') or '' + imdb_info['year'] = title_data.get('releaseYear', {}).get('year', '') or '' + original_title = title_data.get('originalTitleText', {}).get('text', '') + if not original_title or original_title == imdb_info['title']: + original_title = imdb_info['title'] + imdb_info['aka'] = original_title + imdb_info['type'] = title_data.get('titleType', {}).get('id', '') + runtime_data = title_data.get('runtime', {}) + runtime_seconds = runtime_data.get('seconds', 0) + runtime_minutes = runtime_seconds // 60 if runtime_seconds else 0 + imdb_info['runtime'] = str(runtime_minutes) + imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') or meta.get('poster', '') + imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') or 'No plot available' + title_genres = title_data.get('titleGenres') + if title_genres and isinstance(title_genres, dict): + genres = title_genres.get('genres', []) + else: + genres = [] + genre_list = [g.get('genre', {}).get('text', '') for g in genres if g.get('genre', {}).get('text')] + imdb_info['genres'] = ', '.join(genre_list) or '' + imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') + imdb_info['directors'] = [] + principal_credits = title_data.get('principalCredits', []) + if principal_credits and isinstance(principal_credits, list): for pc in principal_credits: category_text = pc.get('category', {}).get('text', '') if 'Direct' in category_text: credits = pc.get('credits', []) - for c in credits: - name_id = c.get('name', {}).get('id', '') - if name_id.startswith('nm'): - imdb_info['directors'].append(name_id) + if credits and isinstance(credits, list): + for c in credits: + name_id = c.get('name', {}).get('id', '') + if name_id.startswith('nm'): + imdb_info['directors'].append(name_id) break if meta.get('manual_language'): imdb_info['original_langauge'] = meta.get('manual_language') From ba631439fd40b070120a202a26867c30fe9ccd04 Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 18:04:41 +1000 Subject: [PATCH 691/741] Set default tvmaze so rest of code doesn't bitch and moan --- src/prep.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prep.py b/src/prep.py index 5696e3931..631ebdfdf 100644 --- a/src/prep.py +++ b/src/prep.py @@ -645,6 +645,8 @@ async def process_tracker(tracker_name, meta): # Search tvmaze if meta['category'] == "TV": meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) + else: + meta.setdefault('tvmaze_id', '0') # If no imdb, search for it if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) From 5850deb1e5f153321789b56018742dfacc385def Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 18:12:04 +1000 Subject: [PATCH 692/741] cleanup --- upload.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/upload.py b/upload.py index 6c00ec47a..face85fb9 100644 --- a/upload.py +++ b/upload.py @@ -19,6 +19,7 @@ import click import re from src.trackersetup import TRACKER_SETUP, tracker_class_map, api_trackers, other_api_trackers, http_trackers, tracker_capabilities +import time from src.console import console from rich.markdown import Markdown @@ -216,9 +217,6 @@ async def process_meta(meta, base_dir): if not meta: return else: - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) meta['cutoff'] = int(config['DEFAULT'].get('cutoff_screens', 3)) if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: if 'image_list' not in meta: @@ -460,7 +458,8 @@ async def do_the_thing(base_dir): except Exception as e: console.print(f"[red]Failed to load metadata for path '{path}': {e}") - + if meta['debug']: + upload_start_time = time.time() console.print(f"[green]Gathering info for {os.path.basename(path)}") await process_meta(meta, base_dir) if 'we_are_uploading' not in meta: @@ -482,8 +481,6 @@ async def do_the_thing(base_dir): common = COMMON(config=config) tracker_setup = TRACKER_SETUP(config=config) enabled_trackers = tracker_setup.trackers_enabled(meta) - print("Enabled Trackers:", enabled_trackers) - print("API Trackers:", api_trackers) async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): modq, draft = None, None @@ -532,7 +529,9 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): console.print(f"(draft: {draft})") console.print(f"Uploading to {tracker_class.tracker}") - + if meta['debug']: + upload_finish_time = time.time() + console.print(f"Upload from Audionut UA processed in {upload_finish_time - upload_start_time:.2f} seconds") await tracker_class.upload(meta, disctype) await asyncio.sleep(0.5) perm = config['DEFAULT'].get('get_permalink', False) From 063c7d5afe6a706fa6b5f3e6ffbe9c8c3ca85e7e Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 18:17:58 +1000 Subject: [PATCH 693/741] Change docker to release version instead --- .github/workflows/docker-image.yml | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index aa49b2e40..d6f403411 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,10 +1,9 @@ name: Create and publish Docker images on: - push: - branches: - - master - - development + release: + types: + - published workflow_dispatch: env: @@ -43,30 +42,19 @@ jobs: run: | REPO_NAME=${{ env.IMAGE_NAME }} echo "LOWER_CASE_REPO_NAME=${REPO_NAME,,}" >> $GITHUB_ENV - - - name: Get short commit id - id: get_short_commit_id - run: | - echo "SHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - - name: Set image name based on branch - id: set_image_name + - name: Get release version + id: get_release_version run: | - if [ "${{ github.ref_name }}" == "master" ]; then - IMAGE_TAG="master" - elif [ "${{ github.ref_name }}" == "develop" ]; then - IMAGE_TAG="develop" - else - IMAGE_TAG="${{ github.ref_name }}" - fi - echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV - + RELEASE_VERSION=${{ github.event.release.tag_name }} + echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV + - name: Build and push Docker image uses: docker/build-push-action@v3 with: context: . push: true - tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.IMAGE_TAG }}, ${{ steps.meta.outputs.tags }} + tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.RELEASE_VERSION }}, ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max \ No newline at end of file From 2765c0fe57a4fbda29f26589e9879fc370d5a99f Mon Sep 17 00:00:00 2001 From: Audionut Date: Thu, 19 Dec 2024 20:38:29 +1000 Subject: [PATCH 694/741] Correct returns in uphelper --- src/uphelper.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/uphelper.py b/src/uphelper.py index 56e482fff..0334dfde4 100644 --- a/src/uphelper.py +++ b/src/uphelper.py @@ -10,13 +10,14 @@ def dupe_check(self, dupes, meta): if not dupes: console.print("[green]No dupes found") meta['upload'] = True - return meta + return meta, False else: console.print() dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) console.print() cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): if meta.get('dupe', False) is False: upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) @@ -29,9 +30,11 @@ def dupe_check(self, dupes, meta): else: console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") upload = True + console.print() if upload is False: meta['upload'] = False + return meta, True else: meta['upload'] = True for each in dupes: @@ -39,7 +42,7 @@ def dupe_check(self, dupes, meta): if each_name == meta['name']: meta['name'] = f"{meta['name']} DUPE?" - return meta + return meta, False def get_confirmation(self, meta): if meta['debug'] is True: From 2cae7749016397f91a8098bd36aa3609f80ea48a Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 08:54:52 +1000 Subject: [PATCH 695/741] Missed runtime catch when nonetype --- src/prep.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 85373bd2d..7d4464874 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4472,8 +4472,12 @@ async def get_imdb_info_api(self, imdbID, meta): imdb_info['aka'] = original_title imdb_info['type'] = title_data.get('titleType', {}).get('id', '') runtime_data = title_data.get('runtime', {}) - runtime_seconds = runtime_data.get('seconds', 0) - runtime_minutes = runtime_seconds // 60 if runtime_seconds else 0 + if runtime_data and isinstance(runtime_data, dict): + runtime_seconds = runtime_data.get('seconds', 0) + runtime_minutes = runtime_seconds // 60 if runtime_seconds else 0 + else: + runtime_seconds = 0 + runtime_minutes = 0 imdb_info['runtime'] = str(runtime_minutes) imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') or meta.get('poster', '') imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') or 'No plot available' @@ -4495,7 +4499,7 @@ async def get_imdb_info_api(self, imdbID, meta): if credits and isinstance(credits, list): for c in credits: name_id = c.get('name', {}).get('id', '') - if name_id.startswith('nm'): + if name_id and name_id.startswith('nm'): imdb_info['directors'].append(name_id) break if meta.get('manual_language'): From 6abe428e440f5172ca084a5dd94bd0622dde150b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 10:43:33 +1000 Subject: [PATCH 696/741] Revert "Don't tag BD with region" This reverts commit 06a60bc3b97a3650d9bc9b5d390a178c95543c1c. --- src/prep.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 7d4464874..bbdb65181 100644 --- a/src/prep.py +++ b/src/prep.py @@ -696,8 +696,6 @@ async def process_tracker(tracker_name, meta): if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific meta['region'] = self.get_region(bdinfo, meta.get('region', None)) meta['video_codec'] = self.get_video_codec(bdinfo) - if meta['tag'][1:].startswith(meta['region']): - meta['tag'] = meta['tag'].replace(f"-{meta['region']}", '') else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) if meta.get('no_edition') is False: From 086d0c769658385c892e7504d77e827b60546d08 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 10:51:00 +1000 Subject: [PATCH 697/741] cherry picked dvd dupe matching improvements https://github.com/Audionut/Upload-Assistant/pull/200/commits/d66c45cdc0d485fb440d73408a0b33b3012a8dd7 --- src/trackers/COMMON.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 963fd7f53..b37bc67b5 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -713,6 +713,11 @@ def process_exclusion(each): log_exclusion(f"resolution '{target_resolution}' mismatch", each) return True + if is_dvd: + if any(str(res) in each for res in [1080, 720, 2160]): + log_exclusion(f"resolution '{target_resolution}' mismatch", each) + return True + for check in attribute_checks: if check["key"] == "repack": if has_repack_in_uuid and "repack" not in normalized: @@ -735,9 +740,10 @@ def process_exclusion(each): log_exclusion("season/episode mismatch", each) return True - if normalized_encoder and normalized_encoder in normalized: - log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) - return False + if not is_dvd: + if normalized_encoder and normalized_encoder in normalized: + log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) + return False console.log(f"[debug] Passed all checks: {each}") return False From 48e51ba4285fd59f26d7eccc02b4a611e5449637 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 11:00:59 +1000 Subject: [PATCH 698/741] maybe fix docker manual dispatch --- .github/workflows/docker-image.yml | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index d6f403411..8312fea9d 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -43,18 +43,30 @@ jobs: REPO_NAME=${{ env.IMAGE_NAME }} echo "LOWER_CASE_REPO_NAME=${REPO_NAME,,}" >> $GITHUB_ENV - - name: Get release version - id: get_release_version + - name: Get release version or branch name + id: get_version_or_branch run: | - RELEASE_VERSION=${{ github.event.release.tag_name }} - echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV + if [ "${{ github.event_name }}" == "release" ]; then + RELEASE_VERSION=${{ github.event.release.tag_name }} + if [ -z "$RELEASE_VERSION" ]; then + echo "RELEASE_VERSION is empty. Please ensure a release tag is provided." + exit 1 + fi + echo "VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV + elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + BRANCH_NAME=${{ github.ref_name }} + echo "VERSION=${BRANCH_NAME}" >> $GITHUB_ENV + else + echo "Unsupported event: ${{ github.event_name }}" + exit 1 + fi - name: Build and push Docker image uses: docker/build-push-action@v3 with: context: . push: true - tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.RELEASE_VERSION }}, ${{ steps.meta.outputs.tags }} + tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.VERSION }}, ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max \ No newline at end of file From 1d40730efa224acccbba31527d7ebf431171151d Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 12:14:47 +1000 Subject: [PATCH 699/741] Handle manual episode title not in meta --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index bbdb65181..a443f05ba 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3914,7 +3914,7 @@ async def get_season_episode(self, video, meta): meta['episode_int'] = episode_int # Manual episode title - if meta['manual_episode_title'] == "": + if 'manual_episode_title' in meta and meta['manual_episode_title'] == "": meta['episode_title_storage'] = meta.get('manual_episode_title') else: meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') From 458cc1ed16e913d9d358fd5167926f81f8938f6c Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 12:51:39 +1000 Subject: [PATCH 700/741] Add config option to skip MTV if require rehash --- data/example-config.py | 1 + src/prep.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/data/example-config.py b/data/example-config.py index e7183b8d3..f30da8e12 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -216,6 +216,7 @@ 'announce_url': "get from https://www.morethantv.me/upload.php", # 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' + 'skip_if_rehash': False # Skip uploading to MTV if it would require a torrent rehash because existing piece size > 8 MiB }, "NBL": { "api_key": "NBL api key", diff --git a/src/prep.py b/src/prep.py index a443f05ba..d71db8238 100644 --- a/src/prep.py +++ b/src/prep.py @@ -780,6 +780,22 @@ async def process_tracker(tracker_name, meta): tracker_status[tracker_name]['dupe'] = True elif meta['skipping']: tracker_status[tracker_name]['skipped'] = True + if tracker_name == "MTV": + tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + check_torrent = await client.find_existing_torrent(meta) + if check_torrent: + console.print(f"[yellow]Existing torrent found on {check_torrent}[/yellow]") + self.create_base_from_existing_torrent(check_torrent, meta['base_dir'], meta['uuid']) + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + tracker_status[tracker_name]['skipped'] = True + elif os.path.exists(torrent_path): + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + tracker_status[tracker_name]['skipped'] = True if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) From d00ee913cddff66f11a0eef3fe78064a41a9677a Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 13:31:51 +1000 Subject: [PATCH 701/741] TL don't search existing fixes https://github.com/Audionut/Upload-Assistant/issues/213 --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index d71db8238..006957622 100644 --- a/src/prep.py +++ b/src/prep.py @@ -768,7 +768,7 @@ async def process_tracker(tracker_name, meta): tracker_status[tracker_name]['banned'] = True continue - if tracker_name not in {"THR", "PTP"}: + if tracker_name not in {"THR", "PTP", "TL"}: dupes = await tracker_class.search_existing(meta, disctype) elif tracker_name == "PTP": dupes = await ptp.search_existing(groupID, meta, disctype) From f273abdfeb22503226225d75120424c4d53c09ce Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 18:00:09 +1000 Subject: [PATCH 702/741] Add config option to prefer MTV accepted torrent files Iterate over returned torrents and prefer torrents whose piece_size =< 8 MiB, --- src/clients.py | 141 ++++++++++++++++++++++++++++++++++--------------- src/prep.py | 6 ++- 2 files changed, 103 insertions(+), 44 deletions(-) diff --git a/src/clients.py b/src/clients.py index c6e1521a7..cbd9e3b06 100644 --- a/src/clients.py +++ b/src/clients.py @@ -67,6 +67,7 @@ async def find_existing_torrent(self, meta): default_torrent_client = meta['client'] if meta.get('client', None) == 'none' or default_torrent_client == 'none': return None + client = self.config['TORRENT_CLIENTS'][default_torrent_client] torrent_storage_dir = client.get('torrent_storage_dir') torrent_client = client.get('torrent_client', '').lower() @@ -77,33 +78,66 @@ async def find_existing_torrent(self, meta): if not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") - torrenthash = None + prefer_small_pieces = meta.get('prefer_small_pieces', False) + best_match = None # Track the best match for fallback if prefer_small_pieces is enabled + + # Iterate through pre-specified hashes for hash_key in ['torrenthash', 'ext_torrenthash']: hash_value = meta.get(hash_key) if hash_value: valid, torrent_path = await self.is_valid_torrent( meta, f"{torrent_storage_dir}/{hash_value}.torrent", - hash_value, torrent_client, print_err=True + hash_value, torrent_client, client, print_err=True ) if valid: - torrenthash = hash_value - break - - if torrent_client == 'qbit' and not torrenthash and client.get('enable_search'): - torrenthash = await self.search_qbit_for_torrent(meta, client) - - if torrenthash: - torrent_path = f"{torrent_storage_dir}/{torrenthash}.torrent" - valid2, torrent_path = await self.is_valid_torrent( - meta, torrent_path, torrenthash, torrent_client, print_err=False - ) - if valid2: - return torrent_path + if not prefer_small_pieces: + console.print(f"[green]Found a valid torrent: [bold yellow]{hash_value}") + return torrent_path + + # Get piece size and update the best match + torrent = Torrent.read(torrent_path) + piece_size = torrent.piece_size + if piece_size <= 8388608: + console.print(f"[green]Found a valid torrent with preferred piece size: [bold yellow]{hash_value}") + return torrent_path + + if best_match is None or piece_size < best_match['piece_size']: + best_match = {'torrenthash': hash_value, 'torrent_path': torrent_path, 'piece_size': piece_size} + console.print(f"[yellow]Storing valid torrent as best match: [bold yellow]{hash_value}") + + # Search the client if no pre-specified hash matches + if torrent_client == 'qbit' and client.get('enable_search'): + found_hash = await self.search_qbit_for_torrent(meta, client) + if found_hash: + valid, torrent_path = await self.is_valid_torrent( + meta, f"{torrent_storage_dir}/{found_hash}.torrent", found_hash, torrent_client, client, print_err=False + ) + if valid: + # Continue checking other torrents if `prefer_small_pieces` is enabled + if not prefer_small_pieces: + console.print(f"[green]Found a valid torrent from client search: [bold yellow]{found_hash}") + return torrent_path + + # Get piece size and update the best match + torrent = Torrent.read(torrent_path) + piece_size = torrent.piece_size + if piece_size <= 8388608: + console.print(f"[green]Found a valid torrent with preferred piece size from client search: [bold yellow]{found_hash}") + return torrent_path + + if best_match is None or piece_size < best_match['piece_size']: + best_match = {'torrenthash': found_hash, 'torrent_path': torrent_path, 'piece_size': piece_size} + console.print(f"[yellow]Storing valid torrent from client search as best match: [bold yellow]{found_hash}") + + # Use best match if no preferred torrent found + if prefer_small_pieces and best_match: + console.print(f"[yellow]Using best match torrent with hash: [bold yellow]{best_match['torrenthash']}") + return best_match['torrent_path'] console.print("[bold yellow]No Valid .torrent found") return None - async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): + async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, client, print_err=False): valid = False wrong_file = False @@ -174,16 +208,28 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client if os.path.exists(torrent_path): try: reuse_torrent = Torrent.read(torrent_path) + torrent_storage_dir_valid = torrent_path + torrent_file_size_kib = os.path.getsize(torrent_storage_dir_valid) / 1024 if meta['debug']: - console.log(f"Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") + console.log(f"Checking piece size, count and size: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}, size={torrent_file_size_kib}") # Piece size and count validations - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): - console.print("[bold yellow]Too many pieces exist in current hash. REHASHING") + if not meta.get('prefer_small_pieces', False): + if reuse_torrent.pieces >= 8000 and reuse_torrent.piece_size < 8388608: + console.print("[bold yellow]Too many pieces detected") + valid = False + elif reuse_torrent.pieces >= 5000 and reuse_torrent.piece_size < 4194304: + console.print("[bold yellow]Too many pieces detected") + valid = False + elif reuse_torrent.pieces >= 12000: + console.print("[bold yellow]Too many pieces detected") valid = False elif reuse_torrent.piece_size < 32768: console.print("[bold yellow]Piece size too small to reuse") valid = False + elif torrent_file_size_kib > 250: + console.print("[bold yellow]Torrent file size exceeds 250 KiB") + valid = False elif wrong_file: console.print("[bold red] Provided .torrent has files that were not expected") valid = False @@ -203,17 +249,25 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client async def search_qbit_for_torrent(self, meta, client): console.print("[green]Searching qbittorrent for an existing .torrent") torrent_storage_dir = client.get('torrent_storage_dir', None) + if meta['debug']: if torrent_storage_dir: console.print(f"Torrent storage directory found: {torrent_storage_dir}") else: console.print("No torrent storage directory found.") + if torrent_storage_dir is None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None try: - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) qbt_client.auth_log_in() if meta['debug']: console.print("We logged into qbittorrent") @@ -224,7 +278,7 @@ async def search_qbit_for_torrent(self, meta, client): console.print("[bold red]APIConnectionError: INCORRECT HOST/PORT") return None - # Remote path map if needed + # Remote path mapping if needed remote_path_map = False local_path, remote_path = await self.remote_path_map(meta) if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): @@ -234,46 +288,49 @@ async def search_qbit_for_torrent(self, meta, client): console.print(f"Local path: {local_path}") console.print(f"Remote path: {remote_path}") + # Iterate through torrents and evaluate + best_match = None torrents = qbt_client.torrents.info() for torrent in torrents: try: torrent_path = torrent.get('content_path', f"{torrent.save_path}{torrent.name}") - # console.print("Trying torrent_paths") except AttributeError: if meta['debug']: console.print(torrent) console.print_exception() continue + # Apply remote-to-local path mapping if remote_path_map: - # Replace remote path with local path only if not already mapped if not torrent_path.startswith(local_path): torrent_path = torrent_path.replace(remote_path, local_path) - if meta['debug']: - console.print("Replaced paths round 2:", torrent_path) - - # Check if the local path was accidentally duplicated and correct it if torrent_path.startswith(f"{local_path}/{local_path.split('/')[-1]}"): torrent_path = torrent_path.replace(f"{local_path}/{local_path.split('/')[-1]}", local_path) - if meta['debug']: - console.print("Corrected duplicate in torrent path round 2:", torrent_path) - - # Standardize path separators for the local OS torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) - if meta['debug']: - console.print("Final torrent path after remote mapping round 2:", torrent_path) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: - if torrent_path.lower() == meta['filelist'][0].lower() and len(torrent.files) == len(meta['filelist']): - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) + if torrent_path.lower() != meta['filelist'][0].lower() or len(torrent.files) != len(meta['filelist']): + continue + + elif os.path.normpath(meta['path']).lower() != os.path.normpath(torrent_path).lower(): + continue + + # Check piece size if prefer_small_pieces is enabled + torrent_file_path = os.path.join(torrent_storage_dir, f"{torrent.hash}.torrent") + torrent_data = Torrent.read(torrent_file_path) + piece_size = torrent_data.piece_size + if meta.get('prefer_small_pieces', False): + if best_match is None or piece_size < best_match['piece_size']: + valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', client, print_err=False) if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") - return torrent.hash + best_match = {'hash': torrent.hash, 'torrent_path': torrent_path, 'piece_size': piece_size} + else: + return torrent.hash + + # Return the best match if prefer_small_pieces is enabled and no direct match was found + if best_match: + console.print(f"[green]Using best match torrent with hash: {best_match['hash']}") + return best_match['hash'] - elif os.path.normpath(meta['path']).lower() == os.path.normpath(torrent_path).lower(): - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) - if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") - return torrent.hash return None def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, client): diff --git a/src/prep.py b/src/prep.py index 006957622..808ea730f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -705,7 +705,6 @@ async def process_tracker(tracker_name, meta): meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') else: meta['edition'] = "" - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) if meta['debug']: meta_finish_time = time.time() @@ -782,6 +781,10 @@ async def process_tracker(tracker_name, meta): tracker_status[tracker_name]['skipped'] = True if tracker_name == "MTV": tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": + meta['prefer_small_pieces'] = True + else: + meta['prefer_small_pieces'] = False if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") if not os.path.exists(torrent_path): @@ -2659,7 +2662,6 @@ def get_tag(self, video, meta): try: parsed = guessit(video) release_group = parsed.get('release_group') - if meta['is_disc'] == "BDMV": if release_group: if f"-{release_group}" not in video: From a9d5d3508382821c84a0317e13c10b84c3e0472e Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 18:19:10 +1000 Subject: [PATCH 703/741] update signatures --- data/example-config.py | 3 ++- src/trackers/AITHER.py | 2 +- src/trackers/BHD.py | 2 +- src/trackers/BLU.py | 2 +- src/trackers/CBR.py | 2 +- src/trackers/FNP.py | 2 +- src/trackers/HUNO.py | 2 +- src/trackers/LCD.py | 2 +- src/trackers/LST.py | 2 +- src/trackers/OE.py | 2 +- src/trackers/OTW.py | 2 +- src/trackers/PTT.py | 2 +- src/trackers/RF.py | 2 +- src/trackers/SHRI.py | 2 +- src/trackers/TIK.py | 2 +- src/trackers/ULCX.py | 2 +- src/trackers/UNIT3D_TEMPLATE.py | 2 +- src/trackers/UTP.py | 2 +- src/trackers/YOINK.py | 2 +- 19 files changed, 20 insertions(+), 19 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index f30da8e12..3934a9f90 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -216,7 +216,8 @@ 'announce_url': "get from https://www.morethantv.me/upload.php", # 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' - 'skip_if_rehash': False # Skip uploading to MTV if it would require a torrent rehash because existing piece size > 8 MiB + 'skip_if_rehash': False, # Skip uploading to MTV if it would require a torrent rehash because existing piece size > 8 MiB + 'prefer_mtv_torrent': False, # Iterate over found torrents and prefer MTV suitable torrents if found. }, "NBL": { "api_key": "NBL api key", diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 942738eec..2ab28bbee 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -28,7 +28,7 @@ def __init__(self, config): self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' self.torrent_url = 'https://aither.cc/api/torrents/' - self.signature = "\n[center][url=https://aither.cc/forums/topics/1349/posts/24958]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [ '4K4U', 'afm72', 'AROMA', 'Bandi', 'BiTOR', 'Bluespots', 'Chivaman', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Garshasp', 'Ghost', 'Grym', 'Hi10', 'HiQVE', 'ImE', 'ION10', 'iVy', 'Judas', 'LAMA', 'Langbard', 'LION', 'MeGusta', 'MONOLITH', 'Natty', 'nikt0', 'noxxus', 'OEPlus', 'OFT', 'OsC', 'Panda', 'PYC', 'QxR', 'r00t', 'Ralphy', 'RARBG', 'RCVR', 'RetroPeeps', diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 037f948f9..5d2f670bb 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -29,7 +29,7 @@ def __init__(self, config): self.tracker = 'BHD' self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK'] pass diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index c658adf13..23908ab7e 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -27,7 +27,7 @@ def __init__(self, config): self.search_url = 'https://blutopia.cc/api/torrents/filter' self.torrent_url = 'https://blutopia.cc/api/torrents/' self.upload_url = 'https://blutopia.cc/api/torrents/upload' - self.signature = "\n[center][url=https://blutopia.cc/forums/topics/3087/posts/42941]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [ '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index b4e54e66d..f6813c057 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -27,7 +27,7 @@ def __init__(self, config): self.search_url = 'https://capybarabr.com/api/torrents/filter' self.torrent_url = 'https://capybarabr.com/api/torrents/' self.upload_url = 'https://capybarabr.com/api/torrents/upload' - self.signature = "\n[center][img]https://i.ibb.co/tYNzwgd/thanks-cbr.png[/img][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 371e7e147..eac00ec0b 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'FnP' self.upload_url = 'https://fearnopeer.com/api/torrents/upload' self.search_url = 'https://fearnopeer.com/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 3d215259a..35c596821 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'HUNO' self.search_url = 'https://hawke.uno/api/torrents/filter' self.upload_url = 'https://hawke.uno/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 2bcaaee3f..463fe2ea6 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -27,7 +27,7 @@ def __init__(self, config): self.search_url = 'https://locadora.cc/api/torrents/filter' self.torrent_url = 'https://locadora.cc/api/torrents/' self.upload_url = 'https://locadora.cc/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 4144343b1..503ded332 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -28,7 +28,7 @@ def __init__(self, config): self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' self.torrent_url = 'https://lst.gg/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY', 'NaNi', diff --git a/src/trackers/OE.py b/src/trackers/OE.py index 63f1f55e2..91ac0b6f2 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -28,7 +28,7 @@ def __init__(self, config): self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' self.torrent_url = 'https://onlyencodes.cc/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [ '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index d1cc69156..db4457614 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'OLD' self.upload_url = 'https://oldtoons.world/api/torrents/upload' self.search_url = 'https://oldtoons.world/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/PTT.py b/src/trackers/PTT.py index c23e87021..b3dacc160 100644 --- a/src/trackers/PTT.py +++ b/src/trackers/PTT.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'PTT' self.upload_url = 'https://polishtorrent.top/api/torrents/upload' self.search_url = 'https://polishtorrent.top/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ['ViP', 'BiRD', 'M@RTiNU$', 'inTGrity', 'CiNEMAET', 'MusicET', 'TeamET', 'R2D2'] pass diff --git a/src/trackers/RF.py b/src/trackers/RF.py index fe1e5a4f6..306432cac 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -28,7 +28,7 @@ def __init__(self, config): self.source_flag = 'ReelFliX' self.upload_url = 'https://reelflix.xyz/api/torrents/upload' self.search_url = 'https://reelflix.xyz/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py index a41120210..ebf7f298f 100644 --- a/src/trackers/SHRI.py +++ b/src/trackers/SHRI.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'Shareisland' self.upload_url = 'https://shareisland.org/api/torrents/upload' self.search_url = 'https://shareisland.org/api/torrents/filter' - self.signature = "\n[center][url=https://shareisland.org]Created by SHRI Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py index 9446d05fe..788a2164c 100644 --- a/src/trackers/TIK.py +++ b/src/trackers/TIK.py @@ -32,7 +32,7 @@ def __init__(self, config): self.search_url = 'https://cinematik.net/api/torrents/filter' self.upload_url = 'https://cinematik.net/api/torrents/upload' self.torrent_url = 'https://cinematik.net/api/torrents/' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by testing 123, Audionuts Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py index 099fac36f..440132904 100644 --- a/src/trackers/ULCX.py +++ b/src/trackers/ULCX.py @@ -20,7 +20,7 @@ def __init__(self, config): self.source_flag = 'ULCX' self.upload_url = 'https://upload.cx/api/torrents/upload' self.search_url = 'https://upload.cx/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ['Tigole', 'x0r', 'Judas', 'SPDVD', 'MeGusta', 'YIFY', 'SWTYBLZ', 'TAoE', 'TSP', 'TSPxL', 'LAMA', '4K4U', 'ION10', 'Will1869', 'TGx', 'Sicario', 'QxR', 'Hi10', 'EMBER', 'FGT', 'AROMA', 'd3g', 'nikt0', 'Grym', 'RARBG', 'iVy', 'FnP', 'EDGE2020', 'NuBz', 'NAHOM', 'Ralphy'] diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 60a7b6297..d4a530e2a 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -31,7 +31,7 @@ def __init__(self, config): self.source_flag = 'Source flag for .torrent' self.upload_url = 'https://domain.tld/api/torrents/upload' self.search_url = 'https://domain.tld/api/torrents/filter' - self.signature = None + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index 4ac259d81..375fc2170 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -27,7 +27,7 @@ def __init__(self, config): self.search_url = 'https://utp.to/api/torrents/filter' self.torrent_url = 'https://utp.to/api/torrents/' self.upload_url = 'https://utp.to/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [] pass diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py index 7fca97365..a6d2cc1d3 100644 --- a/src/trackers/YOINK.py +++ b/src/trackers/YOINK.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'YOINK' self.upload_url = 'https://yoinked.org/api/torrents/upload' self.search_url = 'https://yoinked.org/api/torrents/filter' - self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by L4G's Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = ["YTS,YiFY,LAMA,MeGUSTA,NAHOM,GalaxyRG,RARBG"] pass From b4d79ffadb7e1fe18e0a683174e293f73b60282b Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 19:22:18 +1000 Subject: [PATCH 704/741] Only check MTV piece size if other checks pass Also skip if needed rehash --- src/prep.py | 35 +++++++++--------- src/trackers/MTV.py | 87 ++++++++++++++++++++++++--------------------- 2 files changed, 65 insertions(+), 57 deletions(-) diff --git a/src/prep.py b/src/prep.py index 808ea730f..0dfcd7a0b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -780,25 +780,28 @@ async def process_tracker(tracker_name, meta): elif meta['skipping']: tracker_status[tracker_name]['skipped'] = True if tracker_name == "MTV": - tracker_config = self.config['TRACKERS'].get(tracker_name, {}) - if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": - meta['prefer_small_pieces'] = True - else: - meta['prefer_small_pieces'] = False - if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": - torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - if not os.path.exists(torrent_path): - check_torrent = await client.find_existing_torrent(meta) - if check_torrent: - console.print(f"[yellow]Existing torrent found on {check_torrent}[/yellow]") - self.create_base_from_existing_torrent(check_torrent, meta['base_dir'], meta['uuid']) + if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: + tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": + meta['prefer_small_pieces'] = True + else: + meta['prefer_small_pieces'] = False + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + check_torrent = await client.find_existing_torrent(meta) + if check_torrent: + console.print(f"[yellow]Existing torrent found on {check_torrent}[/yellow]") + self.create_base_from_existing_torrent(check_torrent, meta['base_dir'], meta['uuid']) + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + console.print("[yellow]No existing torrent found with piece size lesser than 8MB[/yellow]") + tracker_status[tracker_name]['skipped'] = True + elif os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) if torrent.piece_size > 8388608: + console.print("[yellow]Existing torrent found with piece size greater than 8MB[/yellow]") tracker_status[tracker_name]['skipped'] = True - elif os.path.exists(torrent_path): - torrent = Torrent.read(torrent_path) - if torrent.piece_size > 8388608: - tracker_status[tracker_name]['skipped'] = True if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 5c64aa544..18fb57b3a 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -46,6 +46,52 @@ async def upload(self, meta, disctype): await self.upload_with_retry(meta, cookiefile, common) async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + if not os.path.exists(torrent_file_path): + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent = Torrent.read(torrent_path) + + if torrent.piece_size > 8388608: + tracker_config = self.config['TRACKERS'].get(MTV, {}) + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + + meta['max_piece_size'] = '8' + if meta['is_disc']: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + new_torrent = prep.CustomTorrent( + meta=meta, + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + new_torrent.piece_size = 8 * 1024 * 1024 + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + + torrent_filename = "MTV" + + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + else: + console.print("[red]Piece size is OVER 8M and skip_if_rehash enabled. Skipping upload.") + return + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] url_host_mapping = { "i.ibb.co": "imgbb", @@ -92,47 +138,6 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print("[red]All image hosts failed. Please check your configuration.") return - torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - if not os.path.exists(torrent_file_path): - torrent_filename = "BASE" - torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" - torrent = Torrent.read(torrent_path) - - if torrent.piece_size > 8388608: - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - - meta['max_piece_size'] = '8' - if meta['is_disc']: - include = [] - exclude = [] - else: - include = ["*.mkv", "*.mp4", "*.ts"] - exclude = ["*.*", "*sample.mkv", "!sample*.*"] - - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - new_torrent = prep.CustomTorrent( - meta=meta, - path=Path(meta['path']), - trackers=["https://fake.tracker"], - source="L4G", - private=True, - exclude_globs=exclude, # Ensure this is always a list - include_globs=include, # Ensure this is always a list - creation_date=datetime.now(), - comment="Created by L4G's Upload Assistant", - created_by="L4G's Upload Assistant" - ) - - new_torrent.piece_size = 8 * 1024 * 1024 - new_torrent.validate_piece_size() - new_torrent.generate(callback=prep.torf_cb, interval=5) - new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) - - torrent_filename = "MTV" - - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - cat_id = await self.get_cat_id(meta) resolution_id = await self.get_res_id(meta['resolution']) source_id = await self.get_source_id(meta) From 95f201851190810a7cb1b9dbda46a827e3b426af Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 20:50:24 +1000 Subject: [PATCH 705/741] fix MTV torrent creation --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 18fb57b3a..017611f3e 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -87,10 +87,10 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): torrent_filename = "MTV" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) else: console.print("[red]Piece size is OVER 8M and skip_if_rehash enabled. Skipping upload.") return + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] url_host_mapping = { From b404d752901ba2468dcffecde93f5514e48fa211 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 20:59:08 +1000 Subject: [PATCH 706/741] Ask to upload even if pass all checks satisfies https://github.com/Audionut/Upload-Assistant/issues/215 --- src/prep.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 0dfcd7a0b..95594f25e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -809,8 +809,12 @@ async def process_tracker(tracker_name, meta): if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") - tracker_status[tracker_name]['upload'] = True - successful_trackers += 1 + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + cli_ui.ask_yes_no(f"Passed all tracker checks, upload to {tracker_name}?", default=False) + tracker_status[tracker_name]['upload'] = False + else: + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 else: if tracker_name == "MANUAL": successful_trackers += 1 From 6dea06fc0f1f260ed453dc26ae31daca41ddb2e5 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 21:06:33 +1000 Subject: [PATCH 707/741] Fix more NoneType in imdb --- src/prep.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 95594f25e..6109f6b02 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4413,6 +4413,8 @@ async def get_imdb_info_api(self, imdbID, meta): else: if not imdbID.startswith("tt"): imdbIDtt = f"tt{imdbID}" + else: + imdbIDtt = imdbID query = { "query": f""" query GetTitleInfo {{ @@ -4493,7 +4495,7 @@ async def get_imdb_info_api(self, imdbID, meta): if not original_title or original_title == imdb_info['title']: original_title = imdb_info['title'] imdb_info['aka'] = original_title - imdb_info['type'] = title_data.get('titleType', {}).get('id', '') + imdb_info['type'] = title_data.get('titleType', {}).get('id', '') or '' runtime_data = title_data.get('runtime', {}) if runtime_data and isinstance(runtime_data, dict): runtime_seconds = runtime_data.get('seconds', 0) @@ -4502,7 +4504,7 @@ async def get_imdb_info_api(self, imdbID, meta): runtime_seconds = 0 runtime_minutes = 0 imdb_info['runtime'] = str(runtime_minutes) - imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') or meta.get('poster', '') + imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') or meta.get('poster', '') or '' imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') or 'No plot available' title_genres = title_data.get('titleGenres') if title_genres and isinstance(title_genres, dict): @@ -4511,7 +4513,7 @@ async def get_imdb_info_api(self, imdbID, meta): genres = [] genre_list = [g.get('genre', {}).get('text', '') for g in genres if g.get('genre', {}).get('text')] imdb_info['genres'] = ', '.join(genre_list) or '' - imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') + imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') or '' imdb_info['directors'] = [] principal_credits = title_data.get('principalCredits', []) if principal_credits and isinstance(principal_credits, list): From 4f44539ec7df5f4e989a5860b700ac4d09f20380 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 21:29:00 +1000 Subject: [PATCH 708/741] add proper user selection check --- src/prep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 6109f6b02..2de17e554 100644 --- a/src/prep.py +++ b/src/prep.py @@ -810,8 +810,8 @@ async def process_tracker(tracker_name, meta): if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - cli_ui.ask_yes_no(f"Passed all tracker checks, upload to {tracker_name}?", default=False) - tracker_status[tracker_name]['upload'] = False + user_confirmed = cli_ui.ask_yes_no(f"Passed all tracker checks, upload to {tracker_name}?", default=False) + tracker_status[tracker_name]['upload'] = user_confirmed else: tracker_status[tracker_name]['upload'] = True successful_trackers += 1 From 4805f1908ca756aa78f090bc96210f150cdb5274 Mon Sep 17 00:00:00 2001 From: Audionut Date: Fri, 20 Dec 2024 22:48:32 +1000 Subject: [PATCH 709/741] really fix user prompt for upload like really really --- src/prep.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 2de17e554..c3b818fc2 100644 --- a/src/prep.py +++ b/src/prep.py @@ -805,16 +805,20 @@ async def process_tracker(tracker_name, meta): if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) - meta['skipping'] = None if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - user_confirmed = cli_ui.ask_yes_no(f"Passed all tracker checks, upload to {tracker_name}?", default=False) - tracker_status[tracker_name]['upload'] = user_confirmed + edit_choice = input("Enter 'y' to upload, or press Enter to skip uploading:") + if edit_choice.lower() == 'y': + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 + else: + tracker_status[tracker_name]['upload'] = False else: tracker_status[tracker_name]['upload'] = True successful_trackers += 1 + meta['skipping'] = None else: if tracker_name == "MANUAL": successful_trackers += 1 From eed597412d0932555853e6534a7f602b3ae0c7de Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 08:45:13 +1000 Subject: [PATCH 710/741] fix unable to get when imdbID is not valid --- src/prep.py | 108 ++++++++++++++++++++++++++-------------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/src/prep.py b/src/prep.py index c3b818fc2..ea3fc0b1c 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4409,16 +4409,36 @@ def daily_to_tmdb_season_episode(self, tmdbid, date): console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") return season, episode + def safe_get(self, data, path, default=None): + for key in path: + if isinstance(data, dict): + data = data.get(key, default) + else: + return default + return data + async def get_imdb_info_api(self, imdbID, meta): - imdb_info = {} + imdb_info = { + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), + } + if len(meta.get('tmdb_directors', [])) >= 1: + imdb_info['directors'] = meta['tmdb_directors'] if imdbID == "0": - return "", None + return imdb_info else: - if not imdbID.startswith("tt"): - imdbIDtt = f"tt{imdbID}" - else: - imdbIDtt = imdbID + try: + if not imdbID.startswith("tt"): + imdbIDtt = f"tt{imdbID}" + else: + imdbIDtt = imdbID + except Exception: + return imdb_info query = { "query": f""" query GetTitleInfo {{ @@ -4489,62 +4509,42 @@ async def get_imdb_info_api(self, imdbID, meta): response = requests.post(url, json=query, headers=headers) data = response.json() - title_data = data.get("data", {}).get("title", {}) - if not title_data: - return meta + if response.status_code != 200: + return imdb_info + + title_data = self.safe_get(data, ["data", "title"], {}) + if not data or "data" not in data or "title" not in data["data"]: + return imdb_info + imdb_info['imdbID'] = imdbID - imdb_info['title'] = title_data.get('titleText', {}).get('text', '') or '' - imdb_info['year'] = title_data.get('releaseYear', {}).get('year', '') or '' - original_title = title_data.get('originalTitleText', {}).get('text', '') - if not original_title or original_title == imdb_info['title']: - original_title = imdb_info['title'] - imdb_info['aka'] = original_title - imdb_info['type'] = title_data.get('titleType', {}).get('id', '') or '' - runtime_data = title_data.get('runtime', {}) - if runtime_data and isinstance(runtime_data, dict): - runtime_seconds = runtime_data.get('seconds', 0) - runtime_minutes = runtime_seconds // 60 if runtime_seconds else 0 - else: - runtime_seconds = 0 - runtime_minutes = 0 - imdb_info['runtime'] = str(runtime_minutes) - imdb_info['cover'] = title_data.get('primaryImage', {}).get('url', '') or meta.get('poster', '') or '' - imdb_info['plot'] = title_data.get('plot', {}).get('plotText', {}).get('plainText', '') or 'No plot available' - title_genres = title_data.get('titleGenres') - if title_genres and isinstance(title_genres, dict): - genres = title_genres.get('genres', []) - else: - genres = [] - genre_list = [g.get('genre', {}).get('text', '') for g in genres if g.get('genre', {}).get('text')] - imdb_info['genres'] = ', '.join(genre_list) or '' - imdb_info['rating'] = title_data.get('ratingsSummary', {}).get('aggregateRating', 'N/A') or '' + imdb_info['title'] = self.safe_get(title_data, ['titleText', 'text'], meta['title']) + imdb_info['year'] = self.safe_get(title_data, ['releaseYear', 'year'], meta['year']) + original_title = self.safe_get(title_data, ['originalTitleText', 'text'], '') + imdb_info['aka'] = original_title if original_title and original_title != imdb_info['title'] else imdb_info['title'] + imdb_info['type'] = self.safe_get(title_data, ['titleType', 'id'], None) + runtime_seconds = self.safe_get(title_data, ['runtime', 'seconds'], 0) + imdb_info['runtime'] = str(runtime_seconds // 60 if runtime_seconds else 60) + imdb_info['cover'] = self.safe_get(title_data, ['primaryImage', 'url'], meta.get('poster', '')) + imdb_info['plot'] = self.safe_get(title_data, ['plot', 'plotText', 'plainText'], 'No plot available') + genres = self.safe_get(title_data, ['titleGenres', 'genres'], []) + genre_list = [self.safe_get(g, ['genre', 'text'], '') for g in genres] + imdb_info['genres'] = ', '.join(filter(None, genre_list)) + imdb_info['rating'] = self.safe_get(title_data, ['ratingsSummary', 'aggregateRating'], 'N/A') imdb_info['directors'] = [] - principal_credits = title_data.get('principalCredits', []) - if principal_credits and isinstance(principal_credits, list): + principal_credits = self.safe_get(title_data, ['principalCredits'], []) + if isinstance(principal_credits, list): for pc in principal_credits: - category_text = pc.get('category', {}).get('text', '') + category_text = self.safe_get(pc, ['category', 'text'], '') if 'Direct' in category_text: - credits = pc.get('credits', []) - if credits and isinstance(credits, list): - for c in credits: - name_id = c.get('name', {}).get('id', '') - if name_id and name_id.startswith('nm'): - imdb_info['directors'].append(name_id) + credits = self.safe_get(pc, ['credits'], []) + for c in credits: + name_id = self.safe_get(c, ['name', 'id'], '') + if name_id.startswith('nm'): + imdb_info['directors'].append(name_id) break if meta.get('manual_language'): imdb_info['original_langauge'] = meta.get('manual_language') - if not title_data: - imdb_info = { - 'title': meta['title'], - 'year': meta['year'], - 'aka': '', - 'type': None, - 'runtime': meta.get('runtime', '60'), - 'cover': meta.get('poster'), - } - if len(meta.get('tmdb_directors', [])) >= 1: - imdb_info['directors'] = meta['tmdb_directors'] return imdb_info async def get_imdb_info(self, imdbID, meta): From 4583b0474153347748d47a8da984b4b4c98f9c8c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 09:47:26 +1000 Subject: [PATCH 711/741] HDB - allow anime dual audio --- src/trackers/HDB.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 2279048f5..decfb328d 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -210,8 +210,9 @@ async def upload(self, meta, disctype): if each == "EXIT": console.print("[bold red]Something didn't map correctly, or this content is not allowed on HDB") return - if "Dual-Audio" in meta['audio'] and meta['is_disc'] not in ("BDMV", "HDDVD", "DVD"): - console.print("[bold red]Dual-Audio Encodes are not allowed") + if "Dual-Audio" in meta['audio']: + if not (meta['anime'] or meta['is_disc']): + console.print("[bold red]Dual-Audio Encodes are not allowed for non-anime and non-disc content") return # Download new .torrent from site From 8fc40f4c5bc14ab7759e1577872cc6806ccf5eab Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 09:48:08 +1000 Subject: [PATCH 712/741] Ignore upload checks in debug mode And consistent color upload status messages --- src/prep.py | 25 ++++++++++++++----------- upload.py | 8 ++++---- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/prep.py b/src/prep.py index ea3fc0b1c..3bb5bdcf6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -805,19 +805,22 @@ async def process_tracker(tracker_name, meta): if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) - - if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: - console.print(f"[green]Tracker '{tracker_name}' passed all checks.[/green]") - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - edit_choice = input("Enter 'y' to upload, or press Enter to skip uploading:") - if edit_choice.lower() == 'y': + if not meta['debug']: + if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: + console.print(f"[bold yellow]Tracker '{tracker_name}' passed all checks.") + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + edit_choice = input("Enter 'y' to upload, or press enter to skip uploading:") + if edit_choice.lower() == 'y': + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 + else: + tracker_status[tracker_name]['upload'] = False + else: tracker_status[tracker_name]['upload'] = True successful_trackers += 1 - else: - tracker_status[tracker_name]['upload'] = False - else: - tracker_status[tracker_name]['upload'] = True - successful_trackers += 1 + else: + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 meta['skipping'] = None else: if tracker_name == "MANUAL": diff --git a/upload.py b/upload.py index face85fb9..3d7e15bbe 100644 --- a/upload.py +++ b/upload.py @@ -518,7 +518,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): tracker_class = tracker_class_map[tracker](config=config) tracker_status = meta.get('tracker_status', {}) upload_status = tracker_status.get(tracker, {}).get('upload', False) - console.print(f"[red]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/red]") + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") if upload_status: modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) @@ -565,7 +565,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): tracker_class = tracker_class_map[tracker](config=config) tracker_status = meta.get('tracker_status', {}) upload_status = tracker_status.get(tracker, {}).get('upload', False) - console.print(f"[blue]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/blue]") + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") if upload_status: console.print(f"Uploading to {tracker}") @@ -599,7 +599,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker == "THR": tracker_status = meta.get('tracker_status', {}) upload_status = tracker_status.get(tracker, {}).get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") if upload_status: thr = THR(config=config) @@ -616,7 +616,7 @@ async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): if tracker == "PTP": tracker_status = meta.get('tracker_status', {}) upload_status = tracker_status.get(tracker, {}).get('upload', False) - print(f"Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}") + print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[yellow]") if upload_status: ptp = PTP(config=config) From 8e882b9c0bfce6570a845dc84611982e0f39d320 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 09:49:38 +1000 Subject: [PATCH 713/741] HUNO - force user to input undetected language --- src/trackers/HUNO.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 35c596821..fdca6959f 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -150,12 +150,7 @@ def get_audio(self, meta): if language == "zxx": language = "Silent" elif not language: - if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): - language = cli_ui.ask_string('No audio language present, you must enter one:') - if not language: - language = "Unknown" - else: - language = "Unknown" + language = cli_ui.ask_string('No audio language present, you must enter one:') return f'{codec} {channels} {language}' From 772bd7a4a431ca432d6dc02452145b90ddc8704c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 10:26:51 +1000 Subject: [PATCH 714/741] HUNO - prohibit dvdrips --- src/trackers/HUNO.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index fdca6959f..fe0bc44b6 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -291,6 +291,10 @@ async def search_existing(self, meta, disctype): console.print('[bold red]Only x265/HEVC encodes are allowed') meta['skipping'] = "HUNO" return + if meta['type'] == "DVDRIP": + console.print('[bold red]DVDRIPs are not allowed') + meta['skipping'] = "HUNO" + return dupes = [] console.print("[yellow]Searching for existing torrents on HUNO...") From dc36cc1807bdce8f8b7711611a3cc1a83e80d5c3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 10:27:44 +1000 Subject: [PATCH 715/741] Ignore tracker pass check in debug --- src/prep.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/prep.py b/src/prep.py index 3bb5bdcf6..4b28abead 100644 --- a/src/prep.py +++ b/src/prep.py @@ -841,9 +841,10 @@ async def process_tracker(tracker_name, meta): console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") meta['skip_uploading'] = int(self.config['DEFAULT'].get('tracker_pass_checks', 1)) - if successful_trackers < meta['skip_uploading']: - console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") - return + if not meta['debug']: + if successful_trackers < meta['skip_uploading']: + console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") + return meta['we_are_uploading'] = True From 7e05b99999e3c20b688b10e346bf7f67fefc1a77 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 10:40:39 +1000 Subject: [PATCH 716/741] Fix dvd/dvdrip source fallback detection --- src/prep.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/prep.py b/src/prep.py index 4b28abead..6e11ea8f6 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2703,7 +2703,6 @@ def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): except Exception: if meta['debug']: console.print("No mediainfo.json") - resolution = meta['resolution'] try: try: source = guessit(video)['source'] @@ -2737,10 +2736,10 @@ def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): system = "NTSC" except Exception: system = "" - if system == "": + if system == "" or system is None: try: framerate = mi['media']['track'][1].get('FrameRate', '') - if framerate == "25": + if '25' in framerate or '50' in framerate: system = "PAL" elif framerate: system = "NTSC" @@ -2766,11 +2765,6 @@ def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): source = "Web" if source == "Ultra HDTV": source = "UHDTV" - if type == "DVDRIP": - if resolution in [540, 576]: - source = "PAL" - else: - source = "NTSC" except Exception: console.print(traceback.format_exc()) source = "BluRay" From 01853aa17e6551f9b2fb523e6a1016401f9d6875 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 11:48:06 +1000 Subject: [PATCH 717/741] HDR only is dupe against DV+HDR --- src/trackers/COMMON.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index b37bc67b5..3a44fc581 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -820,6 +820,9 @@ def simplify_hdr(hdr_set): file_hdr_simple = simplify_hdr(file_hdr) target_hdr_simple = simplify_hdr(target_hdr) + if file_hdr_simple == {"DV", "HDR"} or file_hdr_simple == {"HDR", "DV"}: + file_hdr_simple = {"HDR"} + return file_hdr_simple == target_hdr_simple class MediaInfoParser: From 3cfdbce59db1242aebb5cc96ac37eba24287aa8d Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 11:51:27 +1000 Subject: [PATCH 718/741] If existing DV+HDR is compressed, also compress target --- src/trackers/COMMON.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 3a44fc581..4f14c3fa7 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -822,6 +822,8 @@ def simplify_hdr(hdr_set): if file_hdr_simple == {"DV", "HDR"} or file_hdr_simple == {"HDR", "DV"}: file_hdr_simple = {"HDR"} + if target_hdr_simple == {"DV", "HDR"} or target_hdr_simple == {"HDR", "DV"}: + target_hdr_simple = {"HDR"} return file_hdr_simple == target_hdr_simple From c7eed2c839716ce8a2a44cf75fa83ae083007a1a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 12:01:31 +1000 Subject: [PATCH 719/741] fix encoder dupe matching --- src/trackers/COMMON.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 4f14c3fa7..828b8ac04 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -700,6 +700,7 @@ def process_exclusion(each): console.log(f" 'repack' in each.lower(): {'repack' in each.lower()}") console.log(f"[debug] meta['uuid']: {meta.get('uuid', '')}") console.log(f"[debug] meta['tag']: {meta.get('tag', '').lower()}") + console.log(f"[debug] normalized encoder: {normalized_encoder}") if has_is_disc and each.lower().endswith(".m2ts"): return False @@ -741,7 +742,7 @@ def process_exclusion(each): return True if not is_dvd: - if normalized_encoder and normalized_encoder in normalized: + if normalized_encoder and normalized_encoder in each: log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) return False From c81a218c74d16786836bad40e564aea6e381ff01 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 13:43:59 +1000 Subject: [PATCH 720/741] Exclude old style comparisons from ptp descriptions Prevents bad images being used in the image list --- src/bbcode.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/src/bbcode.py b/src/bbcode.py index fa013f6cb..e013694f8 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -154,10 +154,28 @@ def clean_ptp_description(self, desc, is_disc): # Catch Stray Images and Prepare Image List imagelist = [] - comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) + excluded_urls = set() + + source_encode_comps = re.findall(r"\[comparison=Source, Encode\][\s\S]*", desc, flags=re.IGNORECASE) + source_vs_encode_sections = re.findall(r"Source Vs Encode:[\s\S]*", desc, flags=re.IGNORECASE) + specific_cases = source_encode_comps + source_vs_encode_sections + + # Extract URLs and update excluded_urls + for block in specific_cases: + urls = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", block, flags=re.IGNORECASE) + excluded_urls.update(urls) + desc = desc.replace(block, '') + + # General [comparison=...] handling + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc, flags=re.IGNORECASE) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc, flags=re.IGNORECASE) comps.extend(hides) nocomp = desc + + # Exclude URLs from exculed array fom `nocomp` + for url in excluded_urls: + nocomp = nocomp.replace(url, '') + comp_placeholders = [] # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images @@ -172,12 +190,12 @@ def clean_ptp_description(self, desc, is_disc): # Extract loose images and add to imagelist as dictionaries loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if loose_images: - for img_url in loose_images: + for img_url in loose_images: + if img_url not in excluded_urls: # Only include URLs not part of excluded sections image_dict = { 'img_url': img_url, 'raw_url': img_url, - 'web_url': img_url # Since there is no distinction here, use the same URL for all + 'web_url': img_url } imagelist.append(image_dict) desc = desc.replace(img_url, '') From 098946e6509cf6e2d33dcb3028340411fb09d94a Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 15:27:34 +1000 Subject: [PATCH 721/741] Change webdl dupe matching When it matches it counts as a dupe, rather than having false mis-matches excluded as dupe. --- src/trackers/COMMON.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 828b8ac04..e773cc6ed 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -643,6 +643,7 @@ async def filter_dupes(self, dupes, meta): target_resolution = meta.get("resolution") tag = meta.get("tag").lower() is_dvd = meta['is_disc'] == "DVD" + web_dl = meta.get('type') == "WEBDL" attribute_checks = [ { @@ -663,12 +664,6 @@ async def filter_dupes(self, dupes, meta): "condition": lambda each: "uhd" in each.lower(), "exclude_msg": lambda each: f"Excluding result due to 'UHD' mismatch: {each}" }, - { - "key": "webdl", - "uuid_flag": "web-dl" in meta.get('name', '').lower(), - "condition": lambda each: "webdl" in each.lower() or "web-dl" in each.lower(), - "exclude_msg": lambda each: f"Excluding result due to 'WEBDL' mismatch: {each}" - }, { "key": "hdtv", "uuid_flag": "hdtv" in meta.get('name', '').lower(), @@ -746,6 +741,9 @@ def process_exclusion(each): log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) return False + if web_dl and ("web-dl" in normalized or "webdl" in normalized or "web dl" in normalized): + return False + console.log(f"[debug] Passed all checks: {each}") return False From 2ff3415119a7dec88426cd5def3d2dcefe5510af Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 17:07:32 +1000 Subject: [PATCH 722/741] HUNO - only ban non x265 dvdrip --- src/trackers/HUNO.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index fe0bc44b6..8eabb871c 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -287,14 +287,10 @@ async def is_plex_friendly(self, meta): return 0 async def search_existing(self, meta, disctype): - if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): + if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP" or meta['type'] == "DVDRIP"): console.print('[bold red]Only x265/HEVC encodes are allowed') meta['skipping'] = "HUNO" return - if meta['type'] == "DVDRIP": - console.print('[bold red]DVDRIPs are not allowed') - meta['skipping'] = "HUNO" - return dupes = [] console.print("[yellow]Searching for existing torrents on HUNO...") From e4ebd4eca382e923cb531d08f767609b91c608c2 Mon Sep 17 00:00:00 2001 From: Hielito <36553765+Hielito2@users.noreply.github.com> Date: Sat, 21 Dec 2024 02:37:27 -0600 Subject: [PATCH 723/741] Update LT.py 1.- Add signature 2.- Remove "AKA" from title (titles should not include AKA) 3.- If original_language is Spanish, use AKA title instead if available --- src/trackers/LT.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 0f1cae61c..62a1091ce 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -27,7 +27,7 @@ def __init__(self, config): self.source_flag = 'Lat-Team "Poder Latino"' self.upload_url = 'https://lat-team.com/api/torrents/upload' self.search_url = 'https://lat-team.com/api/torrents/filter' - self.signature = '' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass @@ -78,7 +78,7 @@ async def get_res_id(self, resolution): return resolution_id async def edit_name(self, meta): - lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(' ', ' ').strip() + lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(meta['aka'], '').replace(' ', ' ').strip() if meta['type'] != 'DISC': # DISC don't have mediainfo # Check if is HYBRID (Copied from BLU.py) if 'hybrid' in meta.get('uuid').lower(): @@ -86,6 +86,9 @@ async def edit_name(self, meta): lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') else: lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") + # Check if original language is "es" if true replace title for AKA if available + if meta.get('original_language') == 'es' and meta.get('aka') != "": + lt_name = lt_name.replace(meta.get('title'), meta.get('aka').replace('AKA', '')).strip() # Check if audio Spanish exists # Get all the audios 'es-419' or 'es' audios = [ @@ -191,7 +194,7 @@ async def upload(self, meta, disctype): async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on LT...") params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'tmdbId': meta['tmdb'], From ea8e50aa39d251ef5dab3381de1671c616b1fcf9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 18:58:49 +1000 Subject: [PATCH 724/741] replace - in dupe checking missed this from last commit --- src/trackers/COMMON.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index e773cc6ed..5d2ed2532 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -641,7 +641,7 @@ async def filter_dupes(self, dupes, meta): target_season = meta.get("season") target_episode = meta.get("episode") target_resolution = meta.get("resolution") - tag = meta.get("tag").lower() + tag = meta.get("tag").lower().replace("-", " ") is_dvd = meta['is_disc'] == "DVD" web_dl = meta.get('type') == "WEBDL" @@ -762,7 +762,7 @@ def normalize_filename(self, filename): Normalize a filename for easier matching. Retain season/episode information in the format SxxExx. """ - normalized = filename.lower().replace("-", " -").replace(" ", " ").replace(".", " ") + normalized = filename.lower().replace("-", " ").replace(" ", " ").replace(".", " ") return normalized From 11b87c126122af435bb2d47f4a1200370f3b2dbe Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 19:28:52 +1000 Subject: [PATCH 725/741] Image hosting rely on top domain only --- src/trackers/BHD.py | 42 ++++++++++++++++++++++++++++++------------ src/trackers/MTV.py | 37 +++++++++++++++++++++++++++---------- 2 files changed, 57 insertions(+), 22 deletions(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 5d2f670bb..717879b6a 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -33,26 +33,32 @@ def __init__(self, config): self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK'] pass + def match_host(self, hostname, approved_hosts): + for approved_host in approved_hosts: + if hostname == approved_host or hostname.endswith(f".{approved_host}"): + return approved_host + return hostname + async def upload(self, meta, disctype): common = COMMON(config=self.config) await self.upload_with_retry(meta, common) async def upload_with_retry(self, meta, common, img_host_index=1): url_host_mapping = { - "i.ibb.co": "imgbb", + "ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "img100.pixhost.to": "pixhost", - "images2.imgbox.com": "imgbox", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", } approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] - images_reuploaded = False - normalized_approved_hosts = set(approved_image_hosts + list(url_host_mapping.keys())) # noqa F841 + for image in meta['image_list']: raw_url = image['raw_url'] parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = url_host_mapping.get(hostname, hostname) + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) if meta['debug']: if mapped_host in approved_image_hosts: console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") @@ -60,7 +66,10 @@ async def upload_with_retry(self, meta, common, img_host_index=1): console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") if all( - url_host_mapping.get(urlparse(image['raw_url']).netloc, urlparse(image['raw_url']).netloc) in approved_image_hosts + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts for image in meta['image_list'] ): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") @@ -179,10 +188,10 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] url_host_mapping = { - "i.ibb.co": "imgbb", + "ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "img100.pixhost.to": "pixhost", - "images2.imgbox.com": "imgbox", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", } retry_mode = False @@ -288,13 +297,22 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts raw_url = image['raw_url'] parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = url_host_mapping.get(hostname, hostname) + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) if mapped_host not in approved_image_hosts: console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta[new_images_key], False, images_reuploaded + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded async def get_cat_id(self, category_name): category_id = { diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 017611f3e..cbdec5a18 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -40,6 +40,12 @@ def __init__(self, config): ] pass + def match_host(self, hostname, approved_hosts): + for approved_host in approved_hosts: + if hostname == approved_host or hostname.endswith(f".{approved_host}"): + return approved_host + return hostname + async def upload(self, meta, disctype): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") @@ -94,18 +100,17 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] url_host_mapping = { - "i.ibb.co": "imgbb", + "ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "images2.imgbox.com": "imgbox", + "imgbox.com": "imgbox", } - images_reuploaded = False - normalized_approved_hosts = set(approved_image_hosts + list(url_host_mapping.keys())) # noqa F841 for image in meta['image_list']: raw_url = image['raw_url'] parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = url_host_mapping.get(hostname, hostname) + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) if meta['debug']: if mapped_host in approved_image_hosts: console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") @@ -113,7 +118,10 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") if all( - url_host_mapping.get(urlparse(image['raw_url']).netloc, urlparse(image['raw_url']).netloc) in approved_image_hosts + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts for image in meta['image_list'] ): console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") @@ -208,9 +216,9 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] url_host_mapping = { - "i.ibb.co": "imgbb", + "ibb.co": "imgbb", "ptpimg.me": "ptpimg", - "images2.imgbox.com": "imgbox", + "imgbox.com": "imgbox", } retry_mode = False @@ -316,13 +324,22 @@ async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts raw_url = image['raw_url'] parsed_url = urlparse(raw_url) hostname = parsed_url.netloc - mapped_host = url_host_mapping.get(hostname, hostname) + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) if mapped_host not in approved_image_hosts: console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts - return meta[new_images_key], False, images_reuploaded + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() From ba2a8ff0b343ad8c85274bb107b45e41f8be7ae6 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 21:00:45 +1000 Subject: [PATCH 726/741] imgbb fall back to thumb image when medium is not in response --- src/prep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prep.py b/src/prep.py index 6e11ea8f6..33d4f8563 100644 --- a/src/prep.py +++ b/src/prep.py @@ -3312,7 +3312,7 @@ def upload_image_task(self, args): console.print("[yellow]imgbb failed, trying next image host") return {'status': 'failed', 'reason': 'imgbb upload failed'} - img_url = response_data['data']['medium']['url'] + img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] raw_url = response_data['data']['image']['url'] web_url = response_data['data']['url_viewer'] From 5c853ee59b9f591cf0530545575a8198d8d2f9a3 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sat, 21 Dec 2024 23:17:29 +1000 Subject: [PATCH 727/741] fix MTV config error fixes https://github.com/Audionut/Upload-Assistant/issues/217 --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index cbdec5a18..2fa49744d 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -59,7 +59,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): torrent = Torrent.read(torrent_path) if torrent.piece_size > 8388608: - tracker_config = self.config['TRACKERS'].get(MTV, {}) + tracker_config = self.config['TRACKERS'].get(self.tracker, {}) if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") From 20b63de7e61ec4b66fb18058eee5305108f143df Mon Sep 17 00:00:00 2001 From: Hielito <36553765+Hielito2@users.noreply.github.com> Date: Sat, 21 Dec 2024 18:52:03 -0600 Subject: [PATCH 728/741] Update LT.py Fix When audio track has not title it will be a empty dic "{}" and will raise an exception trying to .lower() it --- src/trackers/LT.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 62a1091ce..0a698c165 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -95,7 +95,7 @@ async def edit_name(self, meta): audio for audio in meta['mediainfo']['media']['track'][2:] if audio.get('@type') == 'Audio' and audio.get('Language') in {'es-419', 'es'} - and "commentary" not in audio.get('Title').lower() + and "commentary" not in str(audio.get('Title', '')).lower() ] if len(audios) > 0: # If there is at least 1 audio spanish lt_name = lt_name From dd80e9f20149363ad1254e45605d1fd674acd469 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 15:38:04 +1000 Subject: [PATCH 729/741] Skip scene lookup if scene in meta --- src/prep.py | 98 ++++++++++++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/src/prep.py b/src/prep.py index 33d4f8563..0fd20475a 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1303,60 +1303,60 @@ def is_scene(self, video, meta, imdb=None): base = os.path.splitext(base)[0] base = urllib.parse.quote(base) url = f"https://api.srrdb.com/v1/search/r:{base}" + if 'scene' not in meta: + try: + response = requests.get(url, timeout=30) + response_json = response.json() + + if int(response_json.get('resultsCount', 0)) > 0: + first_result = response_json['results'][0] + meta['scene_name'] = first_result['release'] + video = f"{first_result['release']}.mkv" + scene = True + if scene and meta.get('isdir', False) and meta.get('queue') is not None: + meta['keep_folder'] = True + + # NFO Download Handling + if first_result.get("hasNFO") == "yes": + try: + release = first_result['release'] + release_lower = release.lower() + nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" + + # Define path and create directory + save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) + os.makedirs(save_path, exist_ok=True) + nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") + + # Download the NFO file + nfo_response = requests.get(nfo_url, timeout=30) + if nfo_response.status_code == 200: + with open(nfo_file_path, 'wb') as f: + f.write(nfo_response.content) + meta['nfo'] = True + meta['auto_nfo'] = True + console.print(f"[green]NFO downloaded to {nfo_file_path}") + else: + console.print("[yellow]NFO file not available for download.") + except Exception as e: + console.print("[yellow]Failed to download NFO file:", e) - try: - response = requests.get(url, timeout=30) - response_json = response.json() - - if int(response_json.get('resultsCount', 0)) > 0: - first_result = response_json['results'][0] - meta['scene_name'] = first_result['release'] - video = f"{first_result['release']}.mkv" - scene = True - if scene and meta.get('isdir', False) and meta.get('queue') is not None: - meta['keep_folder'] = True - - # NFO Download Handling - if first_result.get("hasNFO") == "yes": + # IMDb Handling try: - release = first_result['release'] - release_lower = release.lower() - nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" - - # Define path and create directory - save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) - os.makedirs(save_path, exist_ok=True) - nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") - - # Download the NFO file - nfo_response = requests.get(nfo_url, timeout=30) - if nfo_response.status_code == 200: - with open(nfo_file_path, 'wb') as f: - f.write(nfo_response.content) - meta['nfo'] = True - meta['auto_nfo'] = True - console.print(f"[green]NFO downloaded to {nfo_file_path}") - else: - console.print("[yellow]NFO file not available for download.") - except Exception as e: - console.print("[yellow]Failed to download NFO file:", e) + r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") + r = r.json() - # IMDb Handling - try: - r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") - r = r.json() - - if r['releases'] != [] and imdb is None: - imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb - console.print(f"[green]SRRDB: Matched to {first_result['release']}") - except Exception as e: - console.print("[yellow]Failed to fetch IMDb information:", e) + if r['releases'] != [] and imdb is None: + imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb + console.print(f"[green]SRRDB: Matched to {first_result['release']}") + except Exception as e: + console.print("[yellow]Failed to fetch IMDb information:", e) - else: - console.print("[yellow]SRRDB: No match found") + else: + console.print("[yellow]SRRDB: No match found") - except Exception as e: - console.print("[yellow]SRRDB: No match found, or request has timed out", e) + except Exception as e: + console.print("[yellow]SRRDB: No match found, or request has timed out", e) return video, scene, imdb From ab07dbba1d3b919161dfb7f720e6851abf46d4d9 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 15:40:23 +1000 Subject: [PATCH 730/741] Set base_dir in meta before moving to prep --- src/prep.py | 1 - upload.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/prep.py b/src/prep.py index 0fd20475a..53f8df54b 100644 --- a/src/prep.py +++ b/src/prep.py @@ -426,7 +426,6 @@ async def gather_prep(self, meta, mode): if int(task_limit) > 0: meta['task_limit'] = task_limit meta['mode'] = mode - base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] meta['saved_description'] = False diff --git a/upload.py b/upload.py index 3d7e15bbe..c0e8ddb4b 100644 --- a/upload.py +++ b/upload.py @@ -211,7 +211,7 @@ async def process_meta(meta, base_dir): if str(ua).lower() == "true": meta['unattended'] = True console.print("[yellow]Running in Auto Mode") - + meta['base_dir'] = base_dir prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta = await prep.gather_prep(meta=meta, mode='cli') if not meta: From 746124bebef93b12703959542e00a88bae4e75ab Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 16:02:47 +1000 Subject: [PATCH 731/741] Add option to tonemap HDR images --- data/example-config.py | 8 +++- src/prep.py | 95 ++++++++++++++++++++++++++++++++++-------- upload.py | 2 +- 3 files changed, 85 insertions(+), 20 deletions(-) diff --git a/data/example-config.py b/data/example-config.py index 3934a9f90..743075b67 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -25,6 +25,11 @@ # Number of screenshots to capture "screens": "6", + # Tonemap HDR screenshots and set task limit when tonemapping + # When tonemapping, out of memory errors are more likely to occur with higher task limits + "tone_map": False, + "tone_task_limit": "1", + # Number of cutoff screenshots # If there are at least this many screenshots already, perhaps pulled from existing # description, skip creating and uploading any further screenshots. @@ -32,7 +37,8 @@ # multi processing task limit # When capturing/optimizing images, limit to this many concurrent tasks - # defaults to 'os.cpu_count()' + # Causes issues on UNIX based OS when task_limit > 1 + # defaults to os.cpu_count() if thiss value not set "task_limit": "1", # Providing the option to change the size of the screenshot thumbnails where supported. diff --git a/src/prep.py b/src/prep.py index 53f8df54b..d3a0200b5 100644 --- a/src/prep.py +++ b/src/prep.py @@ -425,6 +425,10 @@ async def gather_prep(self, meta, mode): task_limit = self.config['DEFAULT'].get('task_limit', "0") if int(task_limit) > 0: meta['task_limit'] = task_limit + meta['tone_map'] = self.config['DEFAULT'].get('tone_map', False) + tone_task_limit = self.config['DEFAULT'].get('tone_task_limit', "0") + if int(tone_task_limit) > 0: + meta['tone_task_limit'] = tone_task_limit meta['mode'] = mode meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] @@ -1408,9 +1412,19 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, if meta['debug'] and not force_screenshots: console.print(f"[bold yellow]Saving Screens... Total needed: {self.screens}, Existing: {total_existing}, To capture: {num_screens}") - capture_results = [] + + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + capture_tasks = [] - task_limit = int(meta.get('task_limit', os.cpu_count())) + capture_results = [] + if hdr_tonemap: + task_limit = int(meta.get('tone_task_limit')) + else: + task_limit = int(meta.get('task_limit', os.cpu_count())) if use_vs: from src.vs import vs_screengn @@ -1429,7 +1443,8 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, ss_times[i], os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{len(existing_indices) + i}.png"), keyframe, - loglevel + loglevel, + hdr_tonemap ) for i in range(num_screens + 1) ] @@ -1504,7 +1519,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, try: os.remove(image_path) random_time = random.uniform(0, length) - self.capture_disc_task((file, random_time, image_path, keyframe, loglevel)) + self.capture_disc_task((file, random_time, image_path, keyframe, loglevel, hdr_tonemap)) self.optimize_image_task((image_path, config)) new_size = os.path.getsize(image_path) valid_image = False @@ -1546,17 +1561,33 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") def capture_disc_task(self, task): - file, ss_time, image_path, keyframe, loglevel = task + file, ss_time, image_path, keyframe, loglevel, hdr_tonemap = task try: - ( - ffmpeg - .input(file, ss=ss_time, skip_frame=keyframe) + ff = ffmpeg.input(file, ss=ss_time, skip_frame=keyframe) + + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=8.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + + command = ( + ff .output(image_path, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) - .run() ) + + command.run(capture_stdout=True, capture_stderr=True) + return image_path + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + console.print(f"[red]FFmpeg error capturing screenshot: {error_output}[/red]") + return None except Exception as e: console.print(f"[red]Error capturing screenshot: {e}[/red]") return None @@ -1845,9 +1876,18 @@ def use_tqdm(): else: ss_times = self.valid_ss_time([], num_screens + 1, length) + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + capture_tasks = [] capture_results = [] - task_limit = int(meta.get('task_limit', os.cpu_count())) + if hdr_tonemap: + task_limit = int(meta.get('tone_task_limit')) + else: + task_limit = int(meta.get('task_limit', os.cpu_count())) existing_images = 0 for i in range(num_screens): @@ -1861,7 +1901,7 @@ def use_tqdm(): for i in range(num_screens + 1): image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") if not os.path.exists(image_path) or meta.get('retake', False): - capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel)) + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) elif meta['debug']: console.print(f"[yellow]Skipping existing screenshot: {image_path}") @@ -1873,7 +1913,10 @@ def use_tqdm(): with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: try: for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): - capture_results.append(result) + if isinstance(result, str) and result.startswith("Error:"): + console.print(f"[red]Capture Error: {result}") + else: + capture_results.append(result) pbar.update(1) finally: pool.close() @@ -1951,7 +1994,7 @@ def use_tqdm(): try: os.remove(image_path) random_time = random.uniform(0, length) - self.capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel)) + self.capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) self.optimize_image_task((image_path, config)) new_size = os.path.getsize(image_path) valid_image = False @@ -2017,9 +2060,8 @@ def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): return ss_times def capture_screenshot(self, args): - path, ss_time, image_path, width, height, w_sar, h_sar, loglevel = args + path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args try: - # Validate inputs if width <= 0 or height <= 0: return "Error: Invalid width or height for scaling" @@ -2030,14 +2072,31 @@ def capture_screenshot(self, args): if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=8.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + command = ( ff - .output(image_path, vframes=1, pix_fmt="rgb24") + .output( + image_path, + vframes=1, + pix_fmt="rgb24" + ) .overwrite_output() .global_args('-loglevel', loglevel) ) - command.run() + try: + command.run(capture_stdout=True, capture_stderr=True) + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + return f"Error: {error_output}" if not os.path.exists(image_path) or os.path.getsize(image_path) == 0: return f"Error: Screenshot not generated or is empty at {image_path}" @@ -3662,7 +3721,7 @@ async def get_name(self, meta): console.log(f"CATEGORY: {meta['category']}") console.log(f"TYPE: {meta['type']}") console.log("[cyan]get_name meta:") - console.log(meta) + # console.log(meta) # YAY NAMING FUN if meta['category'] == "MOVIE": # MOVIE SPECIFIC diff --git a/upload.py b/upload.py index c0e8ddb4b..6375097ee 100644 --- a/upload.py +++ b/upload.py @@ -26,7 +26,7 @@ from rich.style import Style -cli_ui.setup(color='always', title="L4G's Upload Assistant") +cli_ui.setup(color='always', title="Audionut's Upload Assistant") base_dir = os.path.abspath(os.path.dirname(__file__)) From 5ab7020272e1b149090aca22c74d7103662b5687 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 17:57:53 +1000 Subject: [PATCH 732/741] fix manual frames --- src/prep.py | 73 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/src/prep.py b/src/prep.py index d3a0200b5..244002e2f 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1387,6 +1387,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, sanitized_filename = self.sanitize_filename(filename) length = 0 file = None + frame_rate = None for each in bdinfo['files']: int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) if int_length > length: @@ -1396,6 +1397,14 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, if name.lower() == each['file'].lower(): file = os.path.join(root, name) + if 'video' in bdinfo and bdinfo['video']: + fps_string = bdinfo['video'][0].get('fps', None) + if fps_string: + try: + frame_rate = float(fps_string.split(' ')[0]) # Extract and convert to float + except ValueError: + console.print("[red]Error: Unable to parse frame rate from bdinfo['video'][0]['fps']") + keyframe = 'nokey' if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "" else 'none' os.chdir(f"{base_dir}/tmp/{folder_id}") @@ -1435,7 +1444,7 @@ def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, else: loglevel = 'quiet' - ss_times = self.valid_ss_time([], num_screens + 1, length) + ss_times = self.valid_ss_time([], num_screens + 1, length, frame_rate) existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} capture_tasks = [ ( @@ -1625,6 +1634,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): dar = float(track.display_aspect_ratio) width = float(track.width) height = float(track.height) + frame_rate = float(track.frame_rate) if par < 1: new_height = dar * height sar = width / new_height @@ -1675,7 +1685,7 @@ def _is_vob_good(n, loops, num_screens): main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") voblength, n = _is_vob_good(0, 0, num_screens) - ss_times = self.valid_ss_time([], num_screens + 1, voblength) + ss_times = self.valid_ss_time([], num_screens + 1, voblength, frame_rate) tasks = [] task_limit = int(meta.get('task_limit', os.cpu_count())) for i in range(num_screens + 1): @@ -1867,14 +1877,22 @@ def use_tqdm(): os.chdir(f"{base_dir}/tmp/{folder_id}") if manual_frames: - manual_frames = [int(frame) for frame in manual_frames] + if meta['debug']: + console.print(f"[yellow]Using manual frames: {manual_frames}") + manual_frames = [int(frame) for frame in manual_frames.split(',')] ss_times = [frame / frame_rate for frame in manual_frames] - - if len(ss_times) < num_screens: - random_times = self.valid_ss_time(ss_times, num_screens - len(ss_times), length) - ss_times.extend(random_times) else: - ss_times = self.valid_ss_time([], num_screens + 1, length) + ss_times = [] + + ss_times = self.valid_ss_time( + ss_times, + num_screens, + length, + frame_rate, + exclusion_zone=500 + ) + if meta['debug']: + console.print(f"[green]Final list of frames for screenshots: {ss_times}") tone_map = meta.get('tone_map', False) if tone_map and "HDR" in meta['hdr']: @@ -2039,25 +2057,38 @@ def use_tqdm(): finish_time = time.time() console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") - def valid_ss_time(self, ss_times, num_screens, length, manual_frames=None): - if manual_frames: - ss_times.extend(manual_frames[:num_screens]) # Use only as many as needed - console.print(f"[green]Using provided manual frame numbers for screenshots: {ss_times}") - return ss_times + def valid_ss_time(self, ss_times, num_screens, length, frame_rate, exclusion_zone=None): + total_screens = num_screens + 1 + exclusion_zone = exclusion_zone or length / 10 / total_screens + result_times = ss_times.copy() + attempts = 0 + max_attempts = 100 - # Generate random times if manual frames are not provided - while len(ss_times) < num_screens: + while len(result_times) < total_screens and attempts < max_attempts: + attempts += 1 valid_time = True - sst = random.randint(round(length / 5), round(4 * length / 5)) # Adjust range for more spread out times - for each in ss_times: - tolerance = length / 10 / num_screens - if abs(sst - each) <= tolerance: + frame = random.randint(round(length / 5), round(4 * length / 5)) + time = frame / frame_rate + + for existing_time in result_times: + if abs(frame - existing_time * frame_rate) <= exclusion_zone: valid_time = False break + if valid_time: - ss_times.append(sst) + result_times.append(time) + + if len(result_times) < total_screens: + remaining = total_screens - len(result_times) + start_frame = round(length / 5) + end_frame = round(4 * length / 5) + step = (end_frame - start_frame) / (remaining + 1) + + for i in range(remaining): + frame = start_frame + step * (i + 1) + result_times.append(frame / frame_rate) - return ss_times + return sorted(result_times) def capture_screenshot(self, args): path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args From 4ebda9cfe8818ba44782477d7f91118ac6a2424c Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 20:31:33 +1000 Subject: [PATCH 733/741] Make the ss_time function more robust --- src/prep.py | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/prep.py b/src/prep.py index 244002e2f..8ab865c88 100644 --- a/src/prep.py +++ b/src/prep.py @@ -2059,36 +2059,36 @@ def use_tqdm(): def valid_ss_time(self, ss_times, num_screens, length, frame_rate, exclusion_zone=None): total_screens = num_screens + 1 - exclusion_zone = exclusion_zone or length / 10 / total_screens + + if exclusion_zone is None: + exclusion_zone = max(length / (3 * total_screens), length / 15) + result_times = ss_times.copy() - attempts = 0 - max_attempts = 100 - - while len(result_times) < total_screens and attempts < max_attempts: - attempts += 1 - valid_time = True - frame = random.randint(round(length / 5), round(4 * length / 5)) - time = frame / frame_rate - - for existing_time in result_times: - if abs(frame - existing_time * frame_rate) <= exclusion_zone: - valid_time = False - break + section_size = (round(4 * length / 5) - round(length / 5)) / total_screens * 1.3 + section_starts = [round(length / 5) + i * (section_size * 0.9) for i in range(total_screens)] + + for section_index in range(total_screens): + valid_time = False + attempts = 0 + start_frame = round(section_starts[section_index] * frame_rate) + end_frame = round((section_starts[section_index] + section_size) * frame_rate) + + while not valid_time and attempts < 50: + attempts += 1 + frame = random.randint(start_frame, end_frame) + time = frame / frame_rate - if valid_time: - result_times.append(time) + if all(abs(frame - existing_time * frame_rate) > exclusion_zone * frame_rate for existing_time in result_times): + result_times.append(time) + valid_time = True - if len(result_times) < total_screens: - remaining = total_screens - len(result_times) - start_frame = round(length / 5) - end_frame = round(4 * length / 5) - step = (end_frame - start_frame) / (remaining + 1) + if not valid_time: + midpoint_frame = (start_frame + end_frame) // 2 + result_times.append(midpoint_frame / frame_rate) - for i in range(remaining): - frame = start_frame + step * (i + 1) - result_times.append(frame / frame_rate) + result_times = sorted(result_times) - return sorted(result_times) + return result_times def capture_screenshot(self, args): path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args From 3691d55d46015a83d04135b1ba8ac79a44ff2612 Mon Sep 17 00:00:00 2001 From: Audionut Date: Sun, 22 Dec 2024 23:06:01 +1000 Subject: [PATCH 734/741] save tracker state after editing input tags --- src/prep.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/prep.py b/src/prep.py index 8ab865c88..63f7fe3f9 100644 --- a/src/prep.py +++ b/src/prep.py @@ -714,8 +714,18 @@ async def process_tracker(tracker_name, meta): console.print(f"Metadata processed in {meta_finish_time - meta_start_time:.2f} seconds") parser = Args(config) helper = UploadHelper() + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + if "saved_trackers" not in meta: + meta['trackers'] = enabled_trackers + else: + meta['trackers'] = meta['saved_trackers'] confirm = helper.get_confirmation(meta) while confirm is False: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + meta['saved_trackers'] = meta['trackers'] editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") editargs = (meta['path'],) + tuple(editargs.split()) if meta.get('debug', False): @@ -723,19 +733,13 @@ async def process_tracker(tracker_name, meta): meta, help, before_args = parser.parse(editargs, meta) meta['edit'] = True meta = await self.gather_prep(meta=meta, mode='cli') - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) confirm = helper.get_confirmation(meta) - common = COMMON(config=config) - tracker_setup = TRACKER_SETUP(config=config) - enabled_trackers = tracker_setup.trackers_enabled(meta) - tracker_status = {} successful_trackers = 0 - for tracker_name in enabled_trackers: + for tracker_name in meta['trackers']: disctype = meta.get('disctype', None) tracker_name = tracker_name.replace(" ", "").upper().strip() From 2b5055a5e27fcb4f34e5e87f078fe5d9f393f68a Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 14:37:52 +1000 Subject: [PATCH 735/741] MTV skip hash should have been default false --- src/trackers/MTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 2fa49744d..fc5aa09d3 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -60,7 +60,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if torrent.piece_size > 8388608: tracker_config = self.config['TRACKERS'].get(self.tracker, {}) - if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "false": console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") meta['max_piece_size'] = '8' From b560108edccd7972aa1e9316d9e031402e96d3fd Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 14:57:26 +1000 Subject: [PATCH 736/741] Catch disc info in bbcode --- src/bbcode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bbcode.py b/src/bbcode.py index e013694f8..97e8b2150 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -70,6 +70,7 @@ def clean_ptp_description(self, desc, is_disc): elif is_disc == "BDMV": desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + desc = re.sub(r"DISC INFO:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) desc = re.sub(r"Disc Title:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) desc = re.sub(r"Disc Size:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) desc = re.sub(r"Protection:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) From 1357cf8b1e11f785848aa3f48ab01b87b8b5b2ad Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 19:31:08 +1000 Subject: [PATCH 737/741] Make tvmaze work better --tvmaze correctly override skip searching if specify --tvmaze return first result to speed search when --daily still search all and allow to select from returned results --- src/args.py | 2 + src/prep.py | 152 ++++++++++++++++++++++++++++------------------------ upload.py | 2 +- 3 files changed, 86 insertions(+), 70 deletions(-) diff --git a/src/args.py b/src/args.py index c18e75885..5f5249aa6 100644 --- a/src/args.py +++ b/src/args.py @@ -248,6 +248,8 @@ def parse(self, args, meta): meta[key] = "" elif key in ["manual_episode_title"]: meta[key] = value + elif key in ["tvmaze_manual"]: + meta[key] = value else: meta[key] = meta.get(key, None) if key in ('trackers'): diff --git a/src/prep.py b/src/prep.py index 63f7fe3f9..41e0220a1 100644 --- a/src/prep.py +++ b/src/prep.py @@ -4712,82 +4712,96 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): print(f"Error: tvdbID is not a valid integer. Received: {tvdbID}") tvdbID = 0 - tvmazeID = 0 - results = [] - - if imdbID is None: - imdbID = '0' - - if int(tvdbID) != 0: - tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) - if tvdb_resp: - results.append(tvdb_resp) - if int(imdbID) != 0: - imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) - if imdb_resp: - results.append(imdb_resp) - search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) - if search_resp: - if isinstance(search_resp, list): - results.extend([each['show'] for each in search_resp if 'show' in each]) + if meta.get('tvmaze_manual'): + tvmazeID = int(meta['tvmaze_manual']) + return tvmazeID, imdbID, tvdbID + else: + tvmazeID = 0 + results = [] + + if imdbID is None: + imdbID = '0' + + if meta['manual_date'] is None: + if int(tvdbID) != 0: + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + else: + if int(imdbID) != 0: + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + else: + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) else: - results.append(search_resp) + if int(tvdbID) != 0: + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + if int(imdbID) != 0: + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) - if year not in (None, ''): - results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] + if year not in (None, ''): + results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] - seen = set() - unique_results = [] - for show in results: - if show['id'] not in seen: - seen.add(show['id']) - unique_results.append(show) - results = unique_results + seen = set() + unique_results = [] + for show in results: + if show['id'] not in seen: + seen.add(show['id']) + unique_results.append(show) + results = unique_results - if not results: - if meta['debug']: - print("No results found.") - return tvmazeID, imdbID, tvdbID + if not results: + if meta['debug']: + print("No results found.") + return tvmazeID, imdbID, tvdbID - if meta.get('tvmaze_manual'): - tvmaze_manual_id = int(meta['tvmaze_manual']) - selected_show = next((show for show in results if show['id'] == tvmaze_manual_id), None) - if selected_show: - tvmazeID = selected_show['id'] - print(f"Selected manual show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + if meta['manual_date'] is not None: + print("Search results:") + for idx, show in enumerate(results): + console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") + console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") + console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") + + while True: + try: + choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) + if choice == 0: + print("Skipping selection.") + break + if 1 <= choice <= len(results): + selected_show = results[choice - 1] + tvmazeID = selected_show['id'] + print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + break + else: + print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") + except ValueError: + print("Invalid input. Please enter a number.") else: - print(f"Manual TVmaze ID {tvmaze_manual_id} not found in results.") - elif meta['manual_date'] is not None: - print("Search results:") - for idx, show in enumerate(results): - console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") - console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") - console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") - - while True: - try: - choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) - if choice == 0: - print("Skipping selection.") - break - if 1 <= choice <= len(results): - selected_show = results[choice - 1] - tvmazeID = selected_show['id'] - print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") - break - else: - print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") - except ValueError: - print("Invalid input. Please enter a number.") - else: - selected_show = results[0] - tvmazeID = selected_show['id'] - if meta['debug']: - print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + selected_show = results[0] + tvmazeID = selected_show['id'] + if meta['debug']: + print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") - if meta['debug']: - print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") - return tvmazeID, imdbID, tvdbID + if meta['debug']: + print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") + return tvmazeID, imdbID, tvdbID def _make_tvmaze_request(self, url, params, meta): if meta['debug']: diff --git a/upload.py b/upload.py index 6375097ee..8fc5ab4be 100644 --- a/upload.py +++ b/upload.py @@ -161,7 +161,7 @@ def merge_meta(meta, saved_meta, path): 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', - 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio', 'manual_type' + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio', 'manual_type', 'tvmaze_manual' ] sanitized_saved_meta = {} for key, value in saved_meta.items(): From 226a4e77d29e037194451751337522a77a20e387 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 19:35:46 +1000 Subject: [PATCH 738/741] Update console feedback --- src/prep.py | 5 +++-- src/uphelper.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/prep.py b/src/prep.py index 41e0220a1..142f4ba4e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -780,9 +780,10 @@ async def process_tracker(tracker_name, meta): dupes = await ptp.search_existing(groupID, meta, disctype) if 'skipping' not in meta or meta['skipping'] is None: dupes = await common.filter_dupes(dupes, meta) - meta, is_dupe = helper.dupe_check(dupes, meta) + meta, is_dupe = helper.dupe_check(dupes, meta, tracker_name) if is_dupe: - console.print(f"[yellow]Tracker '{tracker_name}' has confirmed dupes.[/yellow]") + console.print(f"[red]Skipping upload on {tracker_name}[/red]") + print() tracker_status[tracker_name]['dupe'] = True elif meta['skipping']: tracker_status[tracker_name]['skipped'] = True diff --git a/src/uphelper.py b/src/uphelper.py index 0334dfde4..f3a07213f 100644 --- a/src/uphelper.py +++ b/src/uphelper.py @@ -6,7 +6,7 @@ class UploadHelper: - def dupe_check(self, dupes, meta): + def dupe_check(self, dupes, meta, tracker_name): if not dupes: console.print("[green]No dupes found") meta['upload'] = True @@ -15,12 +15,13 @@ def dupe_check(self, dupes, meta): console.print() dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) console.print() - cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") + cli_ui.info_section(cli_ui.bold, f"Check if these are actually dupes from {tracker_name}!") cli_ui.info(dupe_text) if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): if meta.get('dupe', False) is False: - upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) + print() + upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) else: upload = True else: From 2c677db8cd4b6209d794ee2f8b594d3a7e02378d Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 21:50:20 +1000 Subject: [PATCH 739/741] BHD add more approved image hosts This should work to allow these hosts caught from other descriptions --- src/trackers/BHD.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 717879b6a..d3a2361a2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -49,9 +49,11 @@ async def upload_with_retry(self, meta, common, img_host_index=1): "ptpimg.me": "ptpimg", "pixhost.to": "pixhost", "imgbox.com": "imgbox", + "beyondhd.co": "bhd", + "imagebam.com": "bam", } - approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost', 'bhd', 'bam'] for image in meta['image_list']: raw_url = image['raw_url'] From 374b2c5f0197a87b3f3cf070aac8ad21a89f8c84 Mon Sep 17 00:00:00 2001 From: Audionut Date: Mon, 23 Dec 2024 23:56:51 +1000 Subject: [PATCH 740/741] NBL prohibit discs --- src/trackers/NBL.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 5eb39ea09..0f58e2fb7 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -84,6 +84,10 @@ async def search_existing(self, meta, disctype): console.print("[red]Only TV Is allowed at NBL") meta['skipping'] = "NBL" return + if meta.get('is_disc') is not None: + console.print('[bold red]This site does not allow raw discs') + meta['skipping'] = "NBL" + return dupes = [] console.print("[yellow]Searching for existing torrents on NBL...") if int(meta.get('tvmaze_id', 0)) != 0: From a1b1683f079f5936252f0706aaeddafa1e50279c Mon Sep 17 00:00:00 2001 From: Eric Lay Date: Tue, 24 Dec 2024 10:08:13 -0600 Subject: [PATCH 741/741] ADD - HHD support (#220) * Update trackersetup.py added HHD support * Create HHD.py added HHD.py * Fix resolution ids Update to match the fixed resolution ids on the backend * lint --------- Co-authored-by: Audionut --- src/trackers/HHD.py | 214 ++++++++++++++++++++++++++++++++++++++++++++ src/trackersetup.py | 5 +- 2 files changed, 217 insertions(+), 2 deletions(-) create mode 100644 src/trackers/HHD.py diff --git a/src/trackers/HHD.py b/src/trackers/HHD.py new file mode 100644 index 000000000..67630ea9b --- /dev/null +++ b/src/trackers/HHD.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class HHD(): + def __init__(self, config): + self.config = config + self.tracker = 'HHD' + self.source_flag = 'HHD' + self.upload_url = 'https://homiehelpdesk.net/api/torrents/upload' + self.search_url = 'https://homiehelpdesk.net/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [ + 'aXXo', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'd3g', 'DNL', 'FaNGDiNG0', 'GalaxyTV', 'HD2DVD', 'HDTime', 'iHYTECH', 'ION10', + 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'OFT', 'PRODJi', 'RARBG', 'Rifftrax', 'SANTi', 'SasukeducK', + 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TGALAXY', 'TORRENTGALAXY', 'TSP', 'TSPxL', 'ViSION', 'VXT', 'WAF', 'WKS', 'x0r', 'YAWNiX', 'YIFY', 'YTS', 'PSA'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9', + 'Other': '10' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on FNP...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackersetup.py b/src/trackersetup.py index 8357a0a65..1f4787993 100644 --- a/src/trackersetup.py +++ b/src/trackersetup.py @@ -39,6 +39,7 @@ from src.trackers.ULCX import ULCX from src.trackers.SPD import SPD from src.trackers.YOINK import YOINK +from src.trackers.HHD import HHD import cli_ui from src.console import console @@ -91,7 +92,7 @@ def check_banned_group(self, tracker, banned_group_list, meta): tracker_class_map = { 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'CBR': CBR, - 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, + 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HHD': HHD, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, @@ -105,7 +106,7 @@ def check_banned_group(self, tracker, banned_group_list, meta): } api_trackers = { - 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HHD', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' }