Skip to content

Commit

Permalink
Refactor filelist description handling
Browse files Browse the repository at this point in the history
This should be far more robust in handling varying config settings, but it's not well tested.
  • Loading branch information
Audionut committed Oct 31, 2024
1 parent 512601d commit 0d85349
Showing 1 changed file with 72 additions and 134 deletions.
206 changes: 72 additions & 134 deletions src/trackers/COMMON.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,158 +210,94 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des
max_char_limit = char_limit # Character limit
other_files_spoiler_open = False # Track if "Other files" spoiler has been opened

# Process each file
if len(filelist) > 1:
for i, file in enumerate(filelist):
# Check if character limit is reached
if char_count >= max_char_limit:
# Open the "Other files" spoiler if it's the first time we're exceeding the limit
if not other_files_spoiler_open and i >= 5:
descfile.write("[center][spoiler=Other files]\n")
char_count += len("[center][spoiler=Other files]\n")
other_files_spoiler_open = True

# Extract filename directly from the file path
filename = os.path.splitext(os.path.basename(file.strip()))[0]

# Write filename in BBCode format
descfile.write(f"[center]{filename}\n[/center]\n")
char_count += len(f"[center]{filename}\n[/center]\n")

# Check and write screenshots if they exist
new_images_key = f'new_images_file_{i}'
if new_images_key in meta and meta[new_images_key]:
if meta['debug']:
console.print(f"[yellow]Found needed image URLs for {new_images_key}")
descfile.write("[center]")
char_count += len("[center]")
for img in meta[new_images_key]:
web_url = img['web_url']
raw_url = img['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")

continue # Skip full MediaInfo and spoilers for remaining files

# First Pass: Create and Upload Images for Each File
for i, file in enumerate(filelist):
if i > 0:
new_images_key = f'new_images_file_{i}'
if new_images_key not in meta or not meta[new_images_key]:
# Proceed with image generation if not already present
meta[new_images_key] = []
new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png")

if i < file_limit:
if i == 0:
mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()
if mi_dump:
parsed_mediainfo = self.parser.parse_mediainfo(mi_dump)
formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo)
filename = os.path.splitext(os.path.basename(file.strip()))[0]

descfile.write(f"[center]{filename}\n[/center]\n")
char_count += len(f"[center]{filename}\n[/center]\n")

images = meta['image_list']
descfile.write("[center]")
char_count += len("[center]")
if meta['debug']:
console.print("[yellow]Using original uploaded images for first file")
for img_index in range(min(multi_screens, len(images))):
web_url = images[img_index]['web_url']
raw_url = images[img_index]['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")
else:
mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'})
parsed_mediainfo = self.parser.parse_mediainfo(mi_dump)
formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo)

filename = os.path.splitext(os.path.basename(file.strip()))[0]

descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n")
char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n")

if new_images_key in meta and meta[new_images_key]:
if meta['debug']:
console.print(f"[yellow]Found needed image URLs for {new_images_key}")
descfile.write("[center]")
char_count += len("[center]")
for img in meta[new_images_key]:
web_url = img['web_url']
raw_url = img['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")
else:
meta['retry_count'] = meta.get('retry_count', 0) + 1
meta[new_images_key] = []
# If no screenshots exist, create them
if not new_screens:
if meta['debug']:
console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.")
s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None))
s.start()
while s.is_alive():
await asyncio.sleep(1)

new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png")
if not new_screens:
if meta['debug']:
console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots")
s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens + 1, True, None))
s.start()
while s.is_alive():
await asyncio.sleep(1)
new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png")
new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png")

if new_screens:
uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]})
for img in uploaded_images:
meta[new_images_key].append({
'img_url': img['img_url'],
'raw_url': img['raw_url'],
'web_url': img['web_url']
})
descfile.write("[center]")
char_count += len("[center]")
for img in uploaded_images:
web_url = img['web_url']
raw_url = img['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")
# Upload generated screenshots
if new_screens:
uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]})
meta[new_images_key] = []
for img in uploaded_images:
meta[new_images_key].append({
'img_url': img['img_url'],
'raw_url': img['raw_url'],
'web_url': img['web_url']
})

# Save updated meta
meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json"
with open(meta_filename, 'w') as f:
json.dump(meta, f, indent=4)

meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json"
with open(meta_filename, 'w') as f:
json.dump(meta, f, indent=4)
# Second Pass: Process MediaInfo and Write Descriptions
if len(filelist) > 1:
for i, file in enumerate(filelist):
# Extract filename directly from the file path
filename = os.path.splitext(os.path.basename(file.strip()))[0]

elif i == file_limit and not other_files_spoiler_open:
# Open "Other files" spoiler for the fifth file
descfile.write("[center][spoiler=Other files]\n")
char_count += len("[center][spoiler=Other files]\n")
other_files_spoiler_open = True
# If we are beyond the file limit, add all further files in a spoiler
if i >= file_limit:
if not other_files_spoiler_open:
descfile.write("[center][spoiler=Other files]\n")
char_count += len("[center][spoiler=Other files]\n")
other_files_spoiler_open = True

if i >= file_limit and char_count < max_char_limit:
# Write filename in BBCode format with MediaInfo in spoiler if not the first file
if i > 0 and char_count < max_char_limit:
mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'})
parsed_mediainfo = self.parser.parse_mediainfo(mi_dump)
formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo)
descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n")
char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n")
else:
descfile.write(f"[center]{filename}\n[/center]\n")
char_count += len(f"[center]{filename}\n[/center]\n")

filename = os.path.splitext(os.path.basename(file.strip()))[0]

descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n")
char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n\n")

if new_images_key in meta and meta[new_images_key]:
console.print(f"[yellow]Found needed image URLs for {new_images_key}")
# Write images if they exist
new_images_key = f'new_images_file_{i}'
if i == 0: # For the first file, use 'image_list' key
images = meta['image_list']
if images:
descfile.write("[center]")
char_count += len("[center]")
for img in meta[new_images_key]:
web_url = img['web_url']
raw_url = img['raw_url']
if file_limit == 1:
multi_screens = len(images) # Use all images if only one file
for img_index in range(min(multi_screens, len(images))):
web_url = images[img_index]['web_url']
raw_url = images[img_index]['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")
else:
continue # Skip if character limit has been reached
elif new_images_key in meta and meta[new_images_key]:
descfile.write("[center]")
char_count += len("[center]")
for img in meta[new_images_key]:
web_url = img['web_url']
raw_url = img['raw_url']
image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]"
descfile.write(image_str)
char_count += len(image_str)
descfile.write("[/center]\n\n")
char_count += len("[/center]\n\n")

if other_files_spoiler_open:
descfile.write("[/spoiler][/center]\n")
Expand Down Expand Up @@ -779,6 +715,7 @@ class MediaInfoParser:
"belarusian": "https://ptpimg.me/iushg1.png",
"bengali": "https://ptpimg.me/jq996n.png",
"bosnian": "https://ptpimg.me/19t9rv.png",
"brazilian": "https://ptpimg.me/p8sgla.png",
"bulgarian": "https://ptpimg.me/un9dc6.png",
"catalan": "https://ptpimg.me/v4h5bf.png",
"chinese": "https://ptpimg.me/ea3yv3.png",
Expand Down Expand Up @@ -814,6 +751,7 @@ class MediaInfoParser:
"kurdish": "https://ptpimg.me/g290wo.png",
"kyrgyz": "https://ptpimg.me/336unh.png",
"lao": "https://ptpimg.me/n3nan1.png",
"latin american": "https://ptpimg.me/11350x.png",
"latvian": "https://ptpimg.me/3x2y1b.png",
"lithuanian": "https://ptpimg.me/b444z8.png",
"luxembourgish": "https://ptpimg.me/52x189.png",
Expand Down

0 comments on commit 0d85349

Please sign in to comment.