diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..9270bb728 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 6000 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 new file mode 100644 index 000000000..229297b69 --- /dev/null +++ b/.github/workflows/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 6000 diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 8a5d04241..8312fea9d 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,8 +1,10 @@ -name: Create and publish a Docker image +name: Create and publish Docker images on: - push: - branches: ['master'] + release: + types: + - published + workflow_dispatch: env: REGISTRY: ghcr.io @@ -40,18 +42,31 @@ jobs: run: | REPO_NAME=${{ env.IMAGE_NAME }} echo "LOWER_CASE_REPO_NAME=${REPO_NAME,,}" >> $GITHUB_ENV - - - name: Get short commit id - id: get_short_commit_id + + - name: Get release version or branch name + id: get_version_or_branch run: | - echo "SHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - + if [ "${{ github.event_name }}" == "release" ]; then + RELEASE_VERSION=${{ github.event.release.tag_name }} + if [ -z "$RELEASE_VERSION" ]; then + echo "RELEASE_VERSION is empty. Please ensure a release tag is provided." + exit 1 + fi + echo "VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV + elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + BRANCH_NAME=${{ github.ref_name }} + echo "VERSION=${BRANCH_NAME}" >> $GITHUB_ENV + else + echo "Unsupported event: ${{ github.event_name }}" + exit 1 + fi + - name: Build and push Docker image uses: docker/build-push-action@v3 with: context: . push: true - tags: ${{ steps.meta.outputs.tags }}, ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.SHA_SHORT }} + tags: ${{ env.REGISTRY }}/${{ env.LOWER_CASE_REPO_NAME }}:${{ env.VERSION }}, ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha - cache-to: type=gha,mode=max + cache-to: type=gha,mode=max \ No newline at end of file diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..85d13a19a --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,33 @@ +name: Lint + +on: + push: + branches: + - develop + - master + pull_request: + branches: + - master + - develop + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 + + - name: Run linter + run: flake8 . \ No newline at end of file diff --git a/.gitignore b/.gitignore index 38ae89b4c..4943e5364 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ data/cookies/*.pickle .vscode/ __pycache__/ tmp/* -.wdm/ \ No newline at end of file +.wdm/ +.DS_Store \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 4c3e3ccba..0dfb7af08 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,35 @@ -FROM alpine:latest +FROM python:3.12 -# add mono repo and mono -RUN apk add --no-cache mono --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing +# Update the package list and install system dependencies including mono +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ffmpeg \ + mediainfo=23.04-1 \ + git \ + g++ \ + cargo \ + mktorrent \ + rustc \ + mono-complete \ + nano && \ + rm -rf /var/lib/apt/lists/* -# install requirements -RUN apk add --no-cache --upgrade ffmpeg mediainfo python3 git py3-pip python3-dev g++ cargo mktorrent rust -RUN pip3 install wheel +# Set up a virtual environment to isolate our Python dependencies +RUN python -m venv /venv +ENV PATH="/venv/bin:$PATH" -WORKDIR Upload-Assistant +# Install wheel and other Python dependencies +RUN pip install --upgrade pip wheel -# install reqs +# Set the working directory in the container +WORKDIR /Upload-Assistant + +# Copy the Python requirements file and install Python dependencies COPY requirements.txt . -RUN pip3 install -r requirements.txt +RUN pip install -r requirements.txt -# copy everything +# Copy the rest of the application's code COPY . . -ENTRYPOINT ["python3", "/Upload-Assistant/upload.py"] \ No newline at end of file +# Set the entry point for the container +ENTRYPOINT ["python", "/Upload-Assistant/upload.py"] \ No newline at end of file diff --git a/README.md b/README.md index 596a3303a..57516e115 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +[![Create and publish a Docker image](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml/badge.svg?branch=master)](https://github.com/Audionut/Upload-Assistant/actions/workflows/docker-image.yml) + +Discord support https://discord.gg/QHHAZu7e2A + # L4G's Upload Assistant A simple tool to take the work out of uploading. @@ -6,42 +10,42 @@ A simple tool to take the work out of uploading. - Generates and Parses MediaInfo/BDInfo. - Generates and Uploads screenshots. - Uses srrdb to fix scene filenames - - Can grab descriptions from PTP (automatically on filename match or arg) / BLU (arg) + - Can grab descriptions from PTP/BLU/Aither/LST/OE (with config option automatically on filename match, or using arg) + - Can strip existing screenshots from descriptions to skip screenshot generation and uploading - Obtains TMDb/IMDb/MAL identifiers. - Converts absolute to season episode numbering for Anime - Generates custom .torrents without useless top level folders/nfos. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/STT/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN + - Uploads to ACM/Aither/AL/ANT/BHD/BHDTV/BLU/CBR/FNP/FL/HDB/HDT/HP/HUNO/JPTV/LCD/LST/LT/MTV/NBL/OE/OTW/PSS/PTP/PTER/PTT/RF/R4E(limited)/RTF/SHRI/SN/SPD/STC/STT/TLC/THR/TL/TVC/TTG/ULCX/UTP/YOINK - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs - +Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-ng ## Coming Soon: - Features - ## **Setup:** - - **REQUIRES AT LEAST PYTHON 3.7 AND PIP3** + - **REQUIRES AT LEAST PYTHON 3.12 AND PIP3** - Needs [mono](https://www.mono-project.com/) on linux systems for BDInfo - Also needs MediaInfo and ffmpeg installed on your system - On Windows systems, ffmpeg must be added to PATH (https://windowsloop.com/install-ffmpeg-windows-10/) - On linux systems, get it from your favorite package manager - - Clone the repo to your system `git clone https://github.com/L4GSP1KE/Upload-Assistant.git` + - Clone the repo to your system `git clone https://github.com/Audionut/Upload-Assistant.git` - or download a zip of the source - Copy and Rename `data/example-config.py` to `data/config.py` - - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/L4GSP1KE/Upload-Assistant/wiki)) + - Edit `config.py` to use your information (more detailed information in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)) - tmdb_api (v3) key can be obtained from https://developers.themoviedb.org/3/getting-started/introduction - image host api keys can be obtained from their respective sites - Install necessary python modules `pip3 install --user -U -r requirements.txt` - **Additional Resources are found in the [wiki](https://github.com/L4GSP1KE/Upload-Assistant/wiki)** + **Additional Resources are found in the [wiki](https://github.com/Audionut/Upload-Assistant/wiki)** Feel free to contact me if you need help, I'm not that hard to find. @@ -49,10 +53,11 @@ A simple tool to take the work out of uploading. - To update first navigate into the Upload-Assistant directory: `cd Upload-Assistant` - Run a `git pull` to grab latest updates - Run `python3 -m pip install --user -U -r requirements.txt` to ensure dependencies are up to date + - Or download a fresh zip and overwrite existing files ## **CLI Usage:** `python3 upload.py /downloads/path/to/content --args` Args are OPTIONAL, for a list of acceptable args, pass `--help` ## **Docker Usage:** - Visit our wonderful [docker usage wiki page](https://github.com/L4GSP1KE/Upload-Assistant/wiki/Docker) + Visit our wonderful [docker usage wiki page](https://github.com/Audionut/Upload-Assistant/wiki/Docker) diff --git a/bin/BDInfo/BDInfo.exe b/bin/BDInfo/BDInfo.exe index e2462867e..82e0a6f86 100644 Binary files a/bin/BDInfo/BDInfo.exe and b/bin/BDInfo/BDInfo.exe differ diff --git a/bin/BDInfo/System.Resources.Extensions.dll b/bin/BDInfo/System.Resources.Extensions.dll new file mode 100644 index 000000000..939c9f582 Binary files /dev/null and b/bin/BDInfo/System.Resources.Extensions.dll differ diff --git a/cogs/commands.py b/cogs/commands.py index 52efe6ac2..c40d66123 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -1,4 +1,3 @@ -from discord.ext.commands.errors import CommandInvokeError from src.prep import Prep from src.args import Args from src.clients import Clients @@ -8,27 +7,25 @@ from src.trackers.AITHER import AITHER from src.trackers.STC import STC from src.trackers.LCD import LCD -from data.config import config +from src.trackers.CBR import CBR +from data.config import config # type: ignore -import discord -from discord.ext import commands +import discord # type: ignore +from discord.ext import commands # type: ignore import os from datetime import datetime import asyncio import json -import shutil import multiprocessing from pathlib import Path from glob import glob import argparse - class Commands(commands.Cog): def __init__(self, bot): self.bot = bot - @commands.Cog.listener() async def on_guild_join(self, guild): """ @@ -45,7 +42,7 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): return parser = Args(config) - if path == None: + if path is None: await ctx.send("Missing Path") return elif path.lower() == "-h": @@ -60,17 +57,17 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): try: args = (meta['path'],) + args + search_args meta, help, before_args = parser.parse(args, meta) - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return - if meta['imghost'] == None: + if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] # if not meta['unattended']: # ua = config['DEFAULT'].get('auto_mode', False) # if str(ua).lower() == "true": # meta['unattended'] = True prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - preparing_embed = discord.Embed(title=f"Preparing to upload:", description=f"```{path}```", color=0xffff00) + preparing_embed = discord.Embed(title="Preparing to upload:", description=f"```{path}```", color=0xffff00) if message_id == 0: message = await ctx.send(embed=preparing_embed) meta['embed_msg_id'] = message.id @@ -86,7 +83,6 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): else: await ctx.send("Invalid Path") - @commands.command() async def args(self, ctx): f""" @@ -102,57 +98,7 @@ async def args(self, ctx): await ctx.send(f"```{help[1991:]}```") else: await ctx.send(help.format_help()) - # await ctx.send(""" - # ```Optional arguments: - - # -s, --screens [SCREENS] - # Number of screenshots - # -c, --category [{movie,tv,fanres}] - # Category - # -t, --type [{disc,remux,encode,webdl,web-dl,webrip,hdtv}] - # Type - # -res, --resolution - # [{2160p,1080p,1080i,720p,576p,576i,480p,480i,8640p,4320p,other}] - # Resolution - # -tmdb, --tmdb [TMDB] - # TMDb ID - # -g, --tag [TAG] - # Group Tag - # -serv, --service [SERVICE] - # Streaming Service - # -edition, --edition [EDITION] - # Edition - # -d, --desc [DESC] - # Custom Description (string) - # -nfo, --nfo - # Use .nfo in directory for description - # -k, --keywords [KEYWORDS] - # Add comma seperated keywords e.g. 'keyword, keyword2, etc' - # -reg, --region [REGION] - # Region for discs - # -a, --anon Upload anonymously - # -st, --stream Stream Optimized Upload - # -debug, --debug Debug Mode```""") - - - # @commands.group(invoke_without_command=True) - # async def foo(self, ctx): - # """ - # check out my subcommands! - # """ - # await ctx.send('check out my subcommands!') - - # @foo.command(aliases=['an_alias']) - # async def bar(self, ctx): - # """ - # I have an alias!, I also belong to command 'foo' - # """ - # await ctx.send('foo bar!') - - - - - + @commands.command() async def edit(self, ctx, uuid=None, *args): """ @@ -160,7 +106,7 @@ async def edit(self, ctx, uuid=None, *args): """ if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return - if uuid == None: + if uuid is None: await ctx.send("Missing ID, please try again using the ID in the footer") parser = Args(config) base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) @@ -171,7 +117,7 @@ async def edit(self, ctx, uuid=None, *args): except FileNotFoundError: await ctx.send("ID not found, please try again using the ID in the footer") return - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) try: args = (meta['path'],) + args meta, help, before_args = parser.parse(args, meta) @@ -182,15 +128,10 @@ async def edit(self, ctx, uuid=None, *args): new_msg = await msg.channel.send(f"Editing {meta['uuid']}") meta['embed_msg_id'] = new_msg.id meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode="discord") + meta = await prep.gather_prep(meta=meta, mode="discord") meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) await self.send_embed_and_upload(ctx, meta) - - - - - @commands.group(invoke_without_command=True) async def search(self, ctx, *, args=None): """ @@ -205,14 +146,14 @@ async def search(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return files_total = await search.searchFile(search_terms) @@ -233,14 +174,12 @@ async def search(self, ctx, *, args=None): message = await ctx.send(embed=embed) await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) @@ -249,8 +188,6 @@ def check(reaction, user): else: await self.upload(ctx, files_total[0], search_args=tuple(args.split(" ")), message_id=message.id) - - @search.command() async def dir(self, ctx, *, args=None): """ @@ -265,14 +202,14 @@ async def dir(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return folders_total = await search.searchFolder(search_terms) @@ -294,13 +231,11 @@ async def dir(self, ctx, *, args=None): await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) @@ -310,39 +245,31 @@ def check(reaction, user): await self.upload(ctx, path=folders_total[0], search_args=tuple(args.split(" ")), message_id=message.id) # await ctx.send(folders_total) return - - - - - - - - - async def send_embed_and_upload(self,ctx,meta): + + async def send_embed_and_upload(self, ctx, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('uploaded_screens', False) == False: + + if meta.get('uploaded_screens', False) is False: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) else: message = await ctx.send(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) meta['embed_msg_id'] = message.id - + channel = message.channel.id return_dict = multiprocessing.Manager().dict() - u = multiprocessing.Process(target = prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) + u = multiprocessing.Process(target=prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) u.start() - while u.is_alive() == True: + while u.is_alive() is True: await asyncio.sleep(3) meta['image_list'] = return_dict['image_list'] if meta['debug']: print(meta['image_list']) meta['uploaded_screens'] = True - #Create base .torrent - + # Create base .torrent if len(glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) == 0: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(int(meta['embed_msg_id'])) @@ -351,15 +278,15 @@ async def send_embed_and_upload(self,ctx,meta): message = await ctx.send(embed=discord.Embed(title="Creating .torrent", color=0xffff00)) meta['embed_msg_id'] = message.id channel = message.channel - if meta['nohash'] == False: - if meta.get('torrenthash', None) != None: - reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: + if meta['nohash'] is False: + if meta.get('torrenthash', None) is not None: + reuse_torrent = await client.find_existing_torrent(meta) # noqa F821 + if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - p = multiprocessing.Process(target = prep.create_torrent, args=(meta, Path(meta['path']))) + p = multiprocessing.Process(target=prep.create_torrent, args=(meta, Path(meta['path']))) p.start() - while p.is_alive() == True: + while p.is_alive() is True: await asyncio.sleep(5) if int(meta.get('randomized', 0)) >= 1: @@ -367,8 +294,7 @@ async def send_embed_and_upload(self,ctx,meta): else: meta['client'] = 'none' - - #Format for embed + # Format for embed if meta['tag'] == "": tag = "" else: @@ -387,19 +313,25 @@ async def send_embed_and_upload(self,ctx,meta): res = meta['resolution'] missing = await self.get_missing(meta) - embed=discord.Embed(title=f"Upload: {meta['title']}", url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", description=meta['overview'], color=0x0080ff, timestamp=datetime.utcnow()) + embed = discord.Embed( + title=f"Upload: {meta['title']}", + url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", + description=meta['overview'], + color=0x0080ff, + timestamp=datetime.utcnow() + ) embed.add_field(name="Links", value=f"[TMDB](https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}){imdb}{tvdb}") embed.add_field(name=f"{res} / {meta['type']}{tag}", value=f"```{meta['name']}```", inline=False) if missing != []: - embed.add_field(name=f"POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) + embed.add_field(name="POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) embed.set_thumbnail(url=f"https://image.tmdb.org/t/p/original{meta['poster']}") embed.set_footer(text=meta['uuid']) - embed.set_author(name="L4G's Upload Assistant", url="https://github.com/L4GSP1KE/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") - + embed.set_author(name="L4G's Upload Assistant", url="https://github.com/Audionut/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") + message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=embed) - if meta.get('trackers', None) != None: + if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] @@ -419,21 +351,24 @@ async def send_embed_and_upload(self,ctx,meta): await asyncio.sleep(0.3) if "LCD" in each.replace(' ', ''): await message.add_reaction(config['DISCORD']['discord_emojis']['LCD']) - await asyncio.sleep(0.3) + await asyncio.sleep(0.3) + if "CBR" in each.replace(' ', ''): + await message.add_reaction(config['DISCORD']['discord_emojis']['CBR']) + await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['MANUAL']) await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['CANCEL']) await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) - #Save meta to json - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + # Save meta to json + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() - + def check(reaction, user): if reaction.message.id == meta['embed_msg_id']: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction if str(reaction.emoji) == config['DISCORD']['discord_emojis']['CANCEL']: @@ -451,7 +386,7 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=timeout_embed) return - except: + except Exception: print("timeout after edit") pass except CancelException: @@ -460,20 +395,9 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=cancel_embed) return - # except ManualException: - # msg = await ctx.fetch_message(meta['embed_msg_id']) - # await msg.clear_reactions() - # archive_url = await prep.package(meta) - # if archive_url == False: - # archive_fail_embed = discord.Embed(title="Unable to upload prep files", description=f"The files can be found at `tmp/{meta['title']}.tar`", color=0xff0000) - # await msg.edit(embed=archive_fail_embed) - # else: - # archive_embed = discord.Embed(title="Files can be found at:",description=f"{archive_url} or `tmp/{meta['title']}.tar`", color=0x00ff40) - # await msg.edit(embed=archive_embed) - # return else: - - #Check which are selected and upload to them + + # Check which are selected and upload to them msg = await ctx.fetch_message(message.id) tracker_list = list() tracker_emojis = config['DISCORD']['discord_emojis'] @@ -484,59 +408,59 @@ def check(reaction, user): tracker = list(config['DISCORD']['discord_emojis'].keys())[list(config['DISCORD']['discord_emojis'].values()).index(str(each))] if tracker not in ("UPLOAD"): tracker_list.append(tracker) - + upload_embed_description = ' / '.join(tracker_list) upload_embed = discord.Embed(title=f"Uploading `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) await msg.edit(embed=upload_embed) await msg.clear_reactions() - - - client = Clients(config=config) if "MANUAL" in tracker_list: for manual_tracker in tracker_list: manual_tracker = manual_tracker.replace(" ", "") if manual_tracker.upper() == "BLU": - blu = BLU(config=config) + blu = BLU(config=config) await blu.edit_desc(meta) if manual_tracker.upper() == "BHD": bhd = BHD(config=config) - await bhd.edit_desc(meta) + await bhd.edit_desc(meta) if manual_tracker.upper() == "AITHER": aither = AITHER(config=config) - await aither.edit_desc(meta) + await aither.edit_desc(meta) if manual_tracker.upper() == "STC": stc = STC(config=config) - await stc.edit_desc(meta) + await stc.edit_desc(meta) if manual_tracker.upper() == "LCD": lcd = LCD(config=config) - await lcd.edit_desc(meta) + await lcd.edit_desc(meta) + if manual_tracker.upper() == "CBR": + cbr = CBR(config=config) + await cbr.edit_desc(meta) archive_url = await prep.package(meta) upload_embed_description = upload_embed_description.replace('MANUAL', '~~MANUAL~~') - if archive_url == False: + if archive_url is False: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0xff0000) upload_embed.add_field(name="Unable to upload prep files", value=f"The files can be found at `tmp/{meta['title']}.tar`") await msg.edit(embed=upload_embed) else: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - upload_embed.add_field(name="Files can be found at:",value=f"{archive_url} or `tmp/{meta['uuid']}`") + upload_embed.add_field(name="Files can be found at:", value=f"{archive_url} or `tmp/{meta['uuid']}`") await msg.edit(embed=upload_embed) if "BLU" in tracker_list: blu = BLU(config=config) dupes = await blu.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await blu.upload(meta) await client.add_to_client(meta, "BLU") upload_embed_description = upload_embed_description.replace('BLU', '~~BLU~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "BHD" in tracker_list: bhd = BHD(config=config) dupes = await bhd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") upload_embed_description = upload_embed_description.replace('BHD', '~~BHD~~') @@ -546,46 +470,54 @@ def check(reaction, user): aither = AITHER(config=config) dupes = await aither.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await aither.upload(meta) await client.add_to_client(meta, "AITHER") upload_embed_description = upload_embed_description.replace('AITHER', '~~AITHER~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "STC" in tracker_list: stc = STC(config=config) dupes = await stc.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await stc.upload(meta) await client.add_to_client(meta, "STC") upload_embed_description = upload_embed_description.replace('STC', '~~STC~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) if "LCD" in tracker_list: lcd = LCD(config=config) dupes = await lcd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await lcd.upload(meta) await client.add_to_client(meta, "LCD") upload_embed_description = upload_embed_description.replace('LCD', '~~LCD~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) + await msg.edit(embed=upload_embed) + if "CBR" in tracker_list: + cbr = CBR(config=config) + dupes = await cbr.search_existing(meta) + meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) + if meta['upload'] is True: + await cbr.upload(meta) + await client.add_to_client(meta, "CBR") + upload_embed_description = upload_embed_description.replace('CBR', '~~CBR~~') + upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) + await msg.edit(embed=upload_embed) return None - - - + async def dupe_embed(self, dupes, meta, emojis, channel): if not dupes: print("No dupes found") - meta['upload'] = True + meta['upload'] = True return meta else: dupe_text = "\n\nā€¢".join(dupes) dupe_text = f"```ā€¢{dupe_text}```" - embed = discord.Embed(title="Are these dupes?", description=dupe_text, color=0xff0000) - embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") + embed = discord.Embed(title="Check if these are actually dupes!", description=dupe_text, color=0xff0000) + embed.set_footer(text=f"{emojis['CANCEL']} to abort upload | {emojis['UPLOAD']} to upload anyways") message = await channel.send(embed=embed) await message.add_reaction(emojis['CANCEL']) await asyncio.sleep(0.3) @@ -593,7 +525,7 @@ async def dupe_embed(self, dupes, meta, emojis, channel): def check(reaction, user): if reaction.message.id == message.id: - if str(user.id) == config['DISCORD']['admin_id']: + if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == emojis['UPLOAD']: return reaction if str(reaction.emoji) == emojis['CANCEL']: @@ -607,7 +539,7 @@ def check(reaction, user): try: await channel.send(f"{meta['uuid']} timed out") meta['upload'] = False - except: + except Exception: return except CancelException: await channel.send(f"{meta['title']} cancelled") @@ -627,19 +559,18 @@ async def get_missing(self, meta): missing.append('--imdb') if isinstance(meta['potential_missing'], list) and len(meta['potential_missing']) > 0: for each in meta['potential_missing']: - if meta.get(each, '').replace(' ', '') == "": + if meta.get(each, '').replace(' ', '') == "": missing.append(f"--{each}") return missing + def setup(bot): bot.add_cog(Commands(bot)) - - - class CancelException(Exception): pass + class ManualException(Exception): pass diff --git a/data/example-config.py b/data/example-config.py index c1b37e8dc..743075b67 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -1,234 +1,407 @@ config = { - "DEFAULT" : { - + "DEFAULT": { + # ------ READ THIS ------ # Any lines starting with the # symbol are commented and will not be used. # If you change any of these options, remove the # # ----------------------- - "tmdb_api" : "tmdb_api key", - "imgbb_api" : "imgbb api key", - "ptpimg_api" : "ptpimg api key", - "lensdump_api" : "lensdump api key", + "tmdb_api": "tmdb_api key", + "imgbb_api": "imgbb api key", + "ptpimg_api": "ptpimg api key", + "lensdump_api": "lensdump api key", + "ptscreens_api": "ptscreens api key", + "oeimg_api": "oeimg api key", # Order of image hosts, and backup image hosts "img_host_1": "imgbb", "img_host_2": "ptpimg", "img_host_3": "imgbox", - "img_host_4": "pixhost", + "img_host_4": "pixhost", "img_host_5": "lensdump", + "img_host_6": "ptscreens", + "img_host_7": "oeimg", + + # Number of screenshots to capture + "screens": "6", + + # Tonemap HDR screenshots and set task limit when tonemapping + # When tonemapping, out of memory errors are more likely to occur with higher task limits + "tone_map": False, + "tone_task_limit": "1", + + # Number of cutoff screenshots + # If there are at least this many screenshots already, perhaps pulled from existing + # description, skip creating and uploading any further screenshots. + "cutoff_screens": "3", + + # multi processing task limit + # When capturing/optimizing images, limit to this many concurrent tasks + # Causes issues on UNIX based OS when task_limit > 1 + # defaults to os.cpu_count() if thiss value not set + "task_limit": "1", + + # Providing the option to change the size of the screenshot thumbnails where supported. + # Default is 350, ie [img=350] + "thumbnail_size": "350", + + # Number of screenshots to use for each (ALL) disc/episode when uploading packs to supported sites. + # 0 equals old behavior where only the original description and images are added. + # This setting also effect PTP, however PTP requries at least 2 images for each. + # PTP will always use a *minimum* of 2, regardless of what is set here. + "multiScreens": "2", + + # The below options for packed content do not effect PTP. PTP has a set standard. + + # When uploading packs, you can specifiy a different screenshot thumbnail size, default 300. + "pack_thumb_size": "300", + + # Description character count (including bbcode) cutoff for UNIT3D sites when **season packs only**. + # After hitting this limit, only filenames and screenshots will be used for any ADDITIONAL files + # still to be added to the description. You can set this small like 50, to only ever + # print filenames and screenshots for each file, no mediainfo will be printed. + # UNIT3D sites have a hard character limit for descriptions. A little over 17000 + # worked fine in a forum post at BLU. If the description is at 1 < charLimit, the next full + # description will be added before respecting this cutoff. + "charLimit": "14000", + + # How many files in a season pack will be added to the description before using an additional spoiler tag. + # Any other files past this limit will be hidden/added all within a spoiler tag. + "fileLimit": "2", + + # Absolute limit on processed files in packs. You might not want to upload images for a large number of episodes + "processLimit": "10", + # Providing the option to add a header, in bbcode, above the screenshot section where supported + # "screenshot_header": "[center] SCREENSHOTS [/center]", - "screens" : "6", # Enable lossless PNG Compression (True/False) - "optimize_images" : True, + "optimize_images": True, + # Use only half available CPU cores to avoid memory allocation errors + # Only when using lossless compression + "shared_seedbox": False, # The name of your default torrent client, set in the torrent client sections below - "default_torrent_client" : "Client1", + "default_torrent_client": "Client1", # Play the bell sound effect when asking for confirmation - "sfx_on_prompt" : True, + "sfx_on_prompt": True, + + # Run an API search after upload to find the permalink and insert as comment in torrent + # Needs a 5 second wait to ensure the API is updated + "get_permalink": False, + + # How many trackers need to pass successfull checking to continue with the upload process + # Default = 1. If 1 (or more) tracker/s pass banned_group and dupe checking, uploading will continue + # If less than the number of trackers pass the checking, exit immediately. + "tracker_pass_checks": "1", }, - "TRACKERS" : { + "TRACKERS": { # Which trackers do you want to upload to? - "default_trackers" : "BLU, BHD, AITHER, STC, STT, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV", + # Available tracker: ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, PTT, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK + # Remove the trackers from the default_trackers list that are not used, to save being asked everytime + "default_trackers": "ACM, AITHER, AL, ANT, BHD, BHDTV, BLU, CBR, FNP, HDB, HDT, HP, HUNO, LCD, LST, LT, MTV, NBL, OE, OTW, PSS, PTER, PTP, PTT, R4E, RF, RTF, SN, STC, STT, THR, TIK, TL, ULCX, UTP, YOINK", + + "ACM": { + "api_key": "ACM api key", + "announce_url": "https://asiancinema.me/announce/customannounceurl", + # "anon" : False, - "BLU" : { - "useAPI" : False, # Set to True if using BLU - "api_key" : "BLU api key", - "announce_url" : "https://blutopia.cc/announce/customannounceurl", + # FOR INTERNAL USE ONLY: + # "internal" : True, + # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], + }, + "AITHER": { + "useAPI": False, # Set to True if using Aither for automatic ID searching + "api_key": "AITHER api key", + "announce_url": "https://aither.cc/announce/customannounceurl", + # "anon" : False, + # "modq" : False ## Not working yet + }, + "AL": { + "api_key": "AL api key", + "announce_url": "https://animelovers.club/announce/customannounceurl", # "anon" : False }, - "BHD" : { - "api_key" : "BHD api key", - "announce_url" : "https://beyond-hd.me/announce/customannounceurl", - "draft_default" : "True", + "ANT": { + "api_key": "ANT api key", + "announce_url": "https://anthelion.me/announce/customannounceurl", + # "anon" : False + }, + "BHD": { + "api_key": "BHD api key", + "announce_url": "https://beyond-hd.me/announce/customannounceurl", + "draft_default": "True", # Send to drafts # "anon" : False }, "BHDTV": { "api_key": "found under https://www.bit-hdtv.com/my.php", "announce_url": "https://trackerr.bit-hdtv.com/announce", - #passkey found under https://www.bit-hdtv.com/my.php + # passkey found under https://www.bit-hdtv.com/my.php "my_announce_url": "https://trackerr.bit-hdtv.com/passkey/announce", # "anon" : "False" }, - "PTP" : { - "useAPI" : False, # Set to True if using PTP - "add_web_source_to_desc" : True, - "ApiUser" : "ptp api user", - "ApiKey" : 'ptp api key', - "username" : "", - "password" : "", - "announce_url" : "" - }, - "AITHER" :{ - "api_key" : "AITHER api key", - "announce_url" : "https://aither.cc/announce/customannounceurl", + "BLU": { + "useAPI": False, # Set to True if using BLU for automatic ID searching + "api_key": "BLU api key", + "announce_url": "https://blutopia.cc/announce/customannounceurl", + # "anon" : False, + # "modq" : False ## Not working yet + }, + "CBR": { + "api_key": "CBR api key", + "announce_url": "https://capybarabr.com/announce/customannounceurl", + # "anon" : False + }, + "FL": { + "username": "FL username", + "passkey": "FL passkey", + "uploader_name": "https://filelist.io/Custom_Announce_URL", + # "anon": False, + }, + "FNP": { + "api_key": "FNP api key", + "announce_url": "https://fearnopeer.com/announce/customannounceurl", + # "anon" : "False" + }, + "HDB": { + "useAPI": False, # Set to True if using HDB for automatic ID searching + "username": "HDB username", + "passkey": "HDB passkey", + "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", + # "anon": False, + "img_rehost": True, + }, + "HDT": { + "username": "username", + "password": "password", + "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", + # "anon" : "False" + "announce_url": "https://hdts-announce.ru/announce.php", # DO NOT EDIT THIS LINE + }, + "HP": { + "api_key": "HP", + "announce_url": "https://hidden-palace.net/announce/customannounceurl", # "anon" : False }, - "R4E" :{ - "api_key" : "R4E api key", - "announce_url" : "https://racing4everyone.eu/announce/customannounceurl", + "HUNO": { + "api_key": "HUNO api key", + "announce_url": "https://hawke.uno/announce/customannounceurl", # "anon" : False }, - "HUNO" : { - "api_key" : "HUNO api key", - "announce_url" : "https://hawke.uno/announce/customannounceurl", + "JPTV": { + "api_key": "JPTV api key", + "announce_url": "https://jptv.club/announce/customannounceurl", + # "anon" : False + }, + "LCD": { + "api_key": "LCD api key", + "announce_url": "https://locadora.cc/announce/customannounceurl", + # "anon" : False + }, + "LST": { + "useAPI": False, # Set to True if using LST for automatic ID searching + "api_key": "LST api key", + "announce_url": "https://lst.gg/announce/customannounceurl", + # "anon" : False, + # "modq" : False, # Send to modq for staff approval + # "draft" : False # Send to drafts + }, + "LT": { + "api_key": "LT api key", + "announce_url": "https://lat-team.com/announce/customannounceurl", # "anon" : False }, "MTV": { - 'api_key' : 'get from security page', - 'username' : '', - 'password' : '', - 'announce_url' : "get from https://www.morethantv.me/upload.php", - 'anon' : False, + 'api_key': 'get from security page', + 'username': '', + 'password': '', + 'announce_url': "get from https://www.morethantv.me/upload.php", + # 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' + 'skip_if_rehash': False, # Skip uploading to MTV if it would require a torrent rehash because existing piece size > 8 MiB + 'prefer_mtv_torrent': False, # Iterate over found torrents and prefer MTV suitable torrents if found. }, - "STC" :{ - "api_key" : "STC", - "announce_url" : "https://skipthecommericals.xyz/announce/customannounceurl", - # "anon" : False + "NBL": { + "api_key": "NBL api key", + "announce_url": "https://nebulance.io/customannounceurl", }, - "STT" :{ - "api_key" : "STC", - "announce_url" : "https://stt.xyz/announce/customannounceurl", + "OE": { + "useAPI": False, # Set to True if using OE for automatic ID searching + "api_key": "OE api key", + "announce_url": "https://onlyencodes.cc/announce/customannounceurl", # "anon" : False }, - "SN": { - "api_key": "6Z1tMrXzcYpIeSdGZueQWqb3BowlS6YuIoZLHe3dvIqkSfY0Ws5SHx78oGSTazG0jQ1agduSqe07FPPE8sdWTg", - "announce_url": "https://tracker.swarmazon.club:8443//announce", + "OTW": { + "api_key": "OTW api key", + "announce_url": "https://oldtoons.world/announce/customannounceurl", + # "anon" : False }, - "HP" :{ - "api_key" : "HP", - "announce_url" : "https://hidden-palace.net/announce/customannounceurl", + "PSS": { + "api_key": "PSS api key", + "announce_url": "https://privatesilverscreen.cc/announce/customannounceurl", # "anon" : False }, - "ACM" :{ - "api_key" : "ACM api key", - "announce_url" : "https://asiancinema.me/announce/customannounceurl", + "PTER": { # Does not appear to be working at all + "passkey": 'passkey', + "img_rehost": False, + "username": "", + "password": "", + "ptgen_api": "", + # "anon": True, + }, + "PTP": { + "useAPI": False, # Set to True if using PTP for automatic ID searching + "add_web_source_to_desc": True, + "ApiUser": "ptp api user", + "ApiKey": 'ptp api key', + "username": "", + "password": "", + "announce_url": "" + }, + "PTT": { + "api_key": "PTT api key", + "announce_url": "https://polishtorrent.top/announce/customannounceurl", # "anon" : False, - - # FOR INTERNAL USE ONLY: - # "internal" : True, - # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], - }, - "NBL" : { - "api_key" : "NBL api key", - "announce_url" : "https://nebulance.io/customannounceurl", }, - "ANT" :{ - "api_key" : "ANT api key", - "announce_url" : "https://anthelion.me/announce/customannounceurl", + "R4E": { + "api_key": "R4E api key", + "announce_url": "https://racing4everyone.eu/announce/customannounceurl", # "anon" : False }, - "THR" : { - "username" : "username", - "password" : "password", - "img_api" : "get this from the forum post", - "announce_url" : "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", - "pronfo_api_key" : "pronfo api key", - "pronfo_theme" : "pronfo theme code", - "pronfo_rapi_id" : "pronfo remote api id", + "RF": { + "api_key": "RF api key", + "announce_url": "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, - "LCD" : { - "api_key" : "LCD api key", - "announce_url" : "https://locadora.cc/announce/customannounceurl", + "RTF": { + "username": "username", + "password": "password", + "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', + "announce_url": "get from upload page", + # "anon": True + }, + "SHRI": { + "api_key": "SHRI api key", + "announce_url": "https://shareisland.org/announce/customannounceurl", + # "anon" : "False" + }, + "SN": { + "api_key": "SN", + "announce_url": "https://tracker.swarmazon.club:8443//announce", + }, + "SPD": { + "api_key": "SPEEDAPP API KEY", + "announce_url": "https://ramjet.speedapp.io//announce", + }, + "STC": { + "api_key": "STC", + "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", # "anon" : False }, - "LST" : { - "api_key" : "LST api key", - "announce_url" : "https://lst.gg/announce/customannounceurl", + "STT": { + "api_key": "STT", + "announce_url": "https://stt.xyz/announce/customannounceurl", # "anon" : False }, - "LT" : { - "api_key" : "LT api key", - "announce_url" : "https://lat-team.com/announce/customannounceurl", + "THR": { + "username": "username", + "password": "password", + "img_api": "get this from the forum post", + "announce_url": "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", + "pronfo_api_key": "pronfo api key", + "pronfo_theme": "pronfo theme code", + "pronfo_rapi_id": "pronfo remote api id", # "anon" : False }, - "PTER" : { - "passkey":'passkey', - "img_rehost" : False, - "username" : "", - "password" : "", - "ptgen_api": "", - "anon": True, + "TIK": { + "useAPI": False, # Set to True if using TIK for automatic ID searching, won't work great until folder searching is added to UNIT3D API + "api_key": "TIK api key", + "announce_url": "https://cinematik.net/announce/", + # "anon": False, + # "modq": True, # Not working for now, ignored unless correct class }, "TL": { "announce_key": "TL announce key", }, - "TDC" :{ - "api_key" : "TDC api key", - "announce_url" : "https://thedarkcommunity.cc/announce/customannounceurl", - # "anon" : "False" - }, - "HDT" : { - "username" : "username", - "password" : "password", - "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", - # "anon" : "False" - "announce_url" : "https://hdts-announce.ru/announce.php", #DO NOT EDIT THIS LINE - }, - "OE" : { - "api_key" : "OE api key", - "announce_url" : "https://onlyencodes.cc/announce/customannounceurl", + "TTG": { + "username": "username", + "password": "password", + "login_question": "login_question", + "login_answer": "login_answer", + "user_id": "user_id", + "announce_url": "https://totheglory.im/announce/", # "anon" : False }, - "RTF": { - "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', - "announce_url": "get from upload page", - # "tag": "RetroFlix, nd", - "anon": True + "TVC": { + "api_key": "TVC API Key", + "announce_url": "https://tvchaosuk.com/announce/", + # "anon": "False" + }, + "ULCX": { + "api_key": "ULCX api key", + "announce_url": "https://upload.cx/announce/customannounceurl", + # "anon" : False, }, - "RF" : { - "api_key" : "RF api key", - "announce_url" : "https://reelflix.xyz/announce/customannounceurl", + # "UNIT3D_TEMPLATE": { + # "api_key": "UNIT3D_TEMPLATE api key", + # "announce_url": "https://domain.tld/announce/customannounceurl", + # # "anon" : False, + # # "modq" : False ## Not working yet + # }, + "UTP": { + "api_key": "UTP api key", + "announce_url": "https://UTP/announce/customannounceurl", # "anon" : False }, - "MANUAL" : { - # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se - # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" + "YOINK": { + "api_key": "YOINK api key", + "announce_url": "https://yoinked.org/announce/customannounceurl", + # "anon" : "False" }, }, - - "TORRENT_CLIENTS" : { - # Name your torrent clients here, for example, this example is named "Client1" - "Client1" : { - "torrent_client" : "qbit", - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", + # enable_search to True will automatically try and find a suitable hash to save having to rehash when creating torrents + # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes + # If you find issue, use the "--debug" argument to print out some related details + "TORRENT_CLIENTS": { + # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above + # All options relate to the webui, make sure you have the webui secured if it has WAN access + # See https://github.com/Audionut/Upload-Assistant/wiki + "Client1": { + "torrent_client": "qbit", + # "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" ## use double-backslash on windows eg: "C:\\client\\backup" # Remote path mapping (docker/etc.) CASE SENSITIVE - # "local_path" : "/LocalPath", - # "remote_path" : "/RemotePath" - }, - "qbit_sample" : { - "torrent_client" : "qbit", - "enable_search" : True, - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", - # "torrent_storage_dir" : "path/to/BT_backup folder" - # "qbit_tag" : "tag", - # "qbit_cat" : "category" - + # "local_path": "/LocalPath", + # "remote_path": "/RemotePath" + }, + "qbit_sample": { + "torrent_client": "qbit", + "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" + # "qbit_tag": "tag", + # "qbit_cat": "category" + # Content Layout for adding .torrents: "Original"(recommended)/"Subfolder"/"NoSubfolder" - "content_layout" : "Original" - + "content_layout": "Original" + # Enable automatic torrent management if listed path(s) are present in the path - # If using remote path mapping, use remote path - # For using multiple paths, use a list ["path1", "path2"] + # If using remote path mapping, use remote path + # For using multiple paths, use a list ["path1", "path2"] # "automatic_management_paths" : "" - - - # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "E:\\downloads\\tv", # "remote_path" : "/remote/downloads/tv" @@ -237,9 +410,9 @@ # "VERIFY_WEBUI_CERTIFICATE" : True }, - "rtorrent_sample" : { - "torrent_client" : "rtorrent", - "rtorrent_url" : "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", + "rtorrent_sample": { + "torrent_client": "rtorrent", + "rtorrent_url": "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", # "torrent_storage_dir" : "path/to/session folder", # "rtorrent_label" : "Add this label to all uploads" @@ -248,54 +421,47 @@ # "remote_path" : "/RemotePath" }, - "deluge_sample" : { - "torrent_client" : "deluge", - "deluge_url" : "localhost", - "deluge_port" : "8080", - "deluge_user" : "username", - "deluge_pass" : "password", + "deluge_sample": { + "torrent_client": "deluge", + "deluge_url": "localhost", + "deluge_port": "8080", + "deluge_user": "username", + "deluge_pass": "password", # "torrent_storage_dir" : "path/to/session folder", - + # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", # "remote_path" : "/RemotePath" }, - "watch_sample" : { - "torrent_client" : "watch", - "watch_folder" : "/Path/To/Watch/Folder" + "watch_sample": { + "torrent_client": "watch", + "watch_folder": "/Path/To/Watch/Folder" }, }, + "DISCORD": { + "discord_bot_token": "discord bot token", + "discord_bot_description": "L4G's Upload Assistant", + "command_prefix": "!", + "discord_channel_id": "discord channel id for use", + "admin_id": "your discord user id", - - - - - - "DISCORD" :{ - "discord_bot_token" : "discord bot token", - "discord_bot_description" : "L4G's Upload Assistant", - "command_prefix" : "!", - "discord_channel_id" : "discord channel id for use", - "admin_id" : "your discord user id", - - "search_dir" : "Path/to/downloads/folder/ this is used for search", + "search_dir": "Path/to/downloads/folder/ this is used for search", # Alternatively, search multiple folders: # "search_dir" : [ # "/downloads/dir1", # "/data/dir2", # ] - "discord_emojis" : { - "BLU": "šŸ’™", - "BHD": "šŸŽ‰", - "AITHER": "šŸ›«", - "STC": "šŸ“ŗ", - "ACM": "šŸ™", - "MANUAL" : "šŸ“©", - "UPLOAD" : "āœ…", - "CANCEL" : "šŸš«" + "discord_emojis": { + "BLU": "šŸ’™", + "BHD": "šŸŽ‰", + "AITHER": "šŸ›«", + "STC": "šŸ“ŗ", + "ACM": "šŸ™", + "MANUAL": "šŸ“©", + "UPLOAD": "āœ…", + "CANCEL": "šŸš«" } } } - diff --git a/discordbot.py b/discordbot.py index 4e6e6d3ae..297c29713 100644 --- a/discordbot.py +++ b/discordbot.py @@ -1,21 +1,18 @@ import asyncio import datetime -import json import logging -import configparser from pathlib import Path import discord from discord.ext import commands - - def config_load(): # Python Config from data.config import config return config + async def run(): """ Where the bot gets started. If you wanted to create an database connection pool or other session for the bot to use, @@ -75,7 +72,6 @@ async def load_all_extensions(self): error = f'{extension}\n {type(e).__name__} : {e}' print(f'failed to load extension {error}') print('-' * 10) - async def on_ready(self): """ @@ -102,10 +98,6 @@ async def on_message(self, message): await self.process_commands(message) - - - - if __name__ == '__main__': logging.basicConfig(level=logging.INFO) diff --git a/requirements.txt b/requirements.txt index 19e7c5038..d5de5ed5e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ torf guessit ffmpeg-python -pymediainfo +pymediainfo==6.0.1 tmdbsimple anitopy cli-ui @@ -18,4 +18,10 @@ beautifulsoup4 pyoxipng rich Jinja2 -pyotp \ No newline at end of file +pyotp +str2bool +click +aiohttp +Pillow +tqdm +urllib3 diff --git a/src/args.py b/src/args.py index ff93c8e0e..5f5249aa6 100644 --- a/src/args.py +++ b/src/args.py @@ -3,7 +3,7 @@ import urllib.parse import os import datetime -import traceback +import sys from src.console import console @@ -15,62 +15,80 @@ class Args(): def __init__(self, config): self.config = config pass - - def parse(self, args, meta): input = args parser = argparse.ArgumentParser() parser.add_argument('path', nargs='*', help="Path to file/directory") + parser.add_argument('--queue', nargs='*', required=False, help="(--queue queue_name) Process an entire folder (files/subfolders) in a queue") + parser.add_argument('--unit3d', action='store_true', required=False, help="[parse a txt output file from UNIT3D-Upload-Checker]") parser.add_argument('-s', '--screens', nargs='*', required=False, help="Number of screenshots", default=int(self.config['DEFAULT']['screens'])) + parser.add_argument('-mf', '--manual_frames', required=False, help="Comma-separated frame numbers to use as screenshots", type=str, default=None) parser.add_argument('-c', '--category', nargs='*', required=False, help="Category [MOVIE, TV, FANRES]", choices=['movie', 'tv', 'fanres']) - parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv']) - parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV'], dest="manual_source") + parser.add_argument('-t', '--type', nargs='*', required=False, help="Type [DISC, REMUX, ENCODE, WEBDL, WEBRIP, HDTV, DVDRIP]", choices=['disc', 'remux', 'encode', 'webdl', 'web-dl', 'webrip', 'hdtv', 'dvdrip'], dest="manual_type") + parser.add_argument('--source', nargs='*', required=False, help="Source [Blu-ray, BluRay, DVD, HDDVD, WEB, HDTV, UHDTV, LaserDisc, DCP]", choices=['Blu-ray', 'BluRay', 'DVD', 'HDDVD', 'WEB', 'HDTV', 'UHDTV', 'LaserDisc', 'DCP'], dest="manual_source") parser.add_argument('-res', '--resolution', nargs='*', required=False, help="Resolution [2160p, 1080p, 1080i, 720p, 576p, 576i, 480p, 480i, 8640p, 4320p, OTHER]", choices=['2160p', '1080p', '1080i', '720p', '576p', '576i', '480p', '480i', '8640p', '4320p', 'other']) parser.add_argument('-tmdb', '--tmdb', nargs='*', required=False, help="TMDb ID", type=str, dest='tmdb_manual') parser.add_argument('-imdb', '--imdb', nargs='*', required=False, help="IMDb ID", type=str) parser.add_argument('-mal', '--mal', nargs='*', required=False, help="MAL ID", type=str) + parser.add_argument('-tvmaze', '--tvmaze', nargs='*', required=False, help="TVMAZE ID", type=str, dest='tvmaze_manual') + parser.add_argument('-tvdb', '--tvdb', nargs='*', required=False, help="TVDB ID", type=str, dest='tvdb_manual') parser.add_argument('-g', '--tag', nargs='*', required=False, help="Group Tag", type=str) parser.add_argument('-serv', '--service', nargs='*', required=False, help="Streaming Service", type=str) parser.add_argument('-dist', '--distributor', nargs='*', required=False, help="Disc Distributor e.g.(Criterion, BFI, etc.)", type=str) - parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default="") + parser.add_argument('-edition', '--edition', '--repack', nargs='*', required=False, help="Edition/Repack String e.g.(Director's Cut, Uncut, Hybrid, REPACK, REPACK3)", type=str, dest='manual_edition', default=None) parser.add_argument('-season', '--season', nargs='*', required=False, help="Season (number)", type=str) parser.add_argument('-episode', '--episode', nargs='*', required=False, help="Episode (number)", type=str) + parser.add_argument('-met', '--manual-episode-title', nargs=1, required=False, help="Set episode title, empty = empty", type=str, dest="manual_episode_title") parser.add_argument('-daily', '--daily', nargs=1, required=False, help="Air date of this episode (YYYY-MM-DD)", type=datetime.date.fromisoformat, dest="manual_date") parser.add_argument('--no-season', dest='no_season', action='store_true', required=False, help="Remove Season from title") parser.add_argument('--no-year', dest='no_year', action='store_true', required=False, help="Remove Year from title") parser.add_argument('--no-aka', dest='no_aka', action='store_true', required=False, help="Remove AKA from title") parser.add_argument('--no-dub', dest='no_dub', action='store_true', required=False, help="Remove Dubbed from title") parser.add_argument('--no-tag', dest='no_tag', action='store_true', required=False, help="Remove Group Tag from title") + parser.add_argument('--no-edition', dest='no_edition', action='store_true', required=False, help="Remove Edition from title") + parser.add_argument('--dual-audio', dest='dual_audio', action='store_true', required=False, help="Add Dual-Audio to the title") + parser.add_argument('-ol', '--original-language', dest='manual_language', nargs='*', required=False, help="Set original audio language") parser.add_argument('-ns', '--no-seed', action='store_true', required=False, help="Do not add torrent to the client") - parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Year", type=int, default=0) + parser.add_argument('-year', '--year', dest='manual_year', nargs='?', required=False, help="Override the year found", type=int, default=0) parser.add_argument('-ptp', '--ptp', nargs='*', required=False, help="PTP torrent id/permalink", type=str) parser.add_argument('-blu', '--blu', nargs='*', required=False, help="BLU torrent id/link", type=str) + parser.add_argument('-aither', '--aither', nargs='*', required=False, help="Aither torrent id/link", type=str) + parser.add_argument('-lst', '--lst', nargs='*', required=False, help="LST torrent id/link", type=str) + parser.add_argument('-oe', '--oe', nargs='*', required=False, help="OE torrent id/link", type=str) + parser.add_argument('-tik', '--tik', nargs='*', required=False, help="TIK torrent id/link", type=str) parser.add_argument('-hdb', '--hdb', nargs='*', required=False, help="HDB torrent id/link", type=str) + parser.add_argument('--foreign', dest='foreign', action='store_true', required=False, help="Set for TIK Foreign category") + parser.add_argument('--opera', dest='opera', action='store_true', required=False, help="Set for TIK Opera & Musical category") + parser.add_argument('--asian', dest='asian', action='store_true', required=False, help="Set for TIK Asian category") + parser.add_argument('-disctype', '--disctype', nargs='*', required=False, help="Type of disc for TIK (BD100, BD66, BD50, BD25, NTSC DVD9, NTSC DVD5, PAL DVD9, PAL DVD5, Custom, 3D)", type=str) + parser.add_argument('--untouched', dest='untouched', action='store_true', required=False, help="Set when a completely untouched disc at TIK") parser.add_argument('-d', '--desc', nargs='*', required=False, help="Custom Description (string)") + parser.add_argument('-manual_dvds', '--manual_dvds', nargs='*', required=False, help="Override the default number of DVD's (eg: use 2xDVD9+DVD5 instead)", type=str, dest='manual_dvds', default="") parser.add_argument('-pb', '--desclink', nargs='*', required=False, help="Custom Description (link to hastebin/pastebin)") parser.add_argument('-df', '--descfile', nargs='*', required=False, help="Custom Description (path to file)") - parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump']) + parser.add_argument('-ih', '--imghost', nargs='*', required=False, help="Image Host", choices=['imgbb', 'ptpimg', 'imgbox', 'pixhost', 'lensdump', 'ptscreens', 'oeimg']) parser.add_argument('-siu', '--skip-imagehost-upload', dest='skip_imghost_upload', action='store_true', required=False, help="Skip Uploading to an image host") parser.add_argument('-th', '--torrenthash', nargs='*', required=False, help="Torrent Hash to re-use from your client's session directory") parser.add_argument('-nfo', '--nfo', action='store_true', required=False, help="Use .nfo in directory for description") parser.add_argument('-k', '--keywords', nargs='*', required=False, help="Add comma seperated keywords e.g. 'keyword, keyword2, etc'") + parser.add_argument('-kf', '--keep-folder', action='store_true', required=False, help="Keep the folder containing the single file. Works only when supplying a directory as input. For uploads with poor filenames, like some scene.") parser.add_argument('-reg', '--region', nargs='*', required=False, help="Region for discs") parser.add_argument('-a', '--anon', action='store_true', required=False, help="Upload anonymously") parser.add_argument('-st', '--stream', action='store_true', required=False, help="Stream Optimized Upload") parser.add_argument('-webdv', '--webdv', action='store_true', required=False, help="Contains a Dolby Vision layer converted using dovi_tool") parser.add_argument('-hc', '--hardcoded-subs', action='store_true', required=False, help="Contains hardcoded subs", dest="hardcoded-subs") parser.add_argument('-pr', '--personalrelease', action='store_true', required=False, help="Personal Release") - parser.add_argument('-sdc','--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") + parser.add_argument('-sdc', '--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") + parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 256 MiB)", choices=['2', '4', '8', '16', '32', '64', '128', '256']) parser.add_argument('-nh', '--nohash', action='store_true', required=False, help="Don't hash .torrent") parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") - parser.add_argument('-ps', '--piece-size-max', dest='piece_size_max', nargs='*', required=False, help="Maximum piece size in MiB", choices=[1, 2, 4, 8, 16], type=int) - parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD)") - parser.add_argument('-tc', '--torrent-creation', dest='torrent_creation', nargs='*', required=False, help="What tool should be used to create the base .torrent", choices=['torf', 'torrenttools', 'mktorrent']) + parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD, LST)") + parser.add_argument('-mq', '--modq', action='store_true', required=False, help="Send to modQ") parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs='*', required=False, help="Add to qbit with this category") @@ -78,13 +96,25 @@ def parse(self, args, meta): parser.add_argument('-tk', '--trackers', nargs='*', required=False, help="Upload to these trackers, space seperated (--trackers blu bhd)") parser.add_argument('-rt', '--randomized', nargs='*', required=False, help="Number of extra, torrents with random infohash", default=0) parser.add_argument('-ua', '--unattended', action='store_true', required=False, help=argparse.SUPPRESS) + parser.add_argument('-uac', '--unattended-confirm', action='store_true', required=False, help=argparse.SUPPRESS) parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") - parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") + parser.add_argument('--infohash', nargs='*', required=False, help="V1 Info Hash") args, before_args = parser.parse_known_args(input) args = vars(args) # console.print(args) + if meta.get('manual_frames') is not None: + try: + # Join the list into a single string, split by commas, and convert to integers + meta['manual_frames'] = [int(time.strip()) for time in meta['manual_frames'].split(',')] + # console.print(f"Processed manual_frames: {meta['manual_frames']}") + except ValueError: + console.print("[red]Invalid format for manual_frames. Please provide a comma-separated list of integers.") + console.print(f"Processed manual_frames: {meta['manual_frames']}") + sys.exit(1) + else: + meta['manual_frames'] = None # Explicitly set it to None if not provided if len(before_args) >= 1 and not os.path.exists(' '.join(args['path'])): for each in before_args: args['path'].append(each) @@ -94,16 +124,16 @@ def parse(self, args, meta): break else: break - - if meta.get('tmdb_manual') != None or meta.get('imdb') != None: + + if meta.get('tmdb_manual') is not None or meta.get('imdb') is not None: meta['tmdb_manual'] = meta['imdb'] = None for key in args: value = args.get(key) if value not in (None, []): if isinstance(value, list): value2 = self.list_to_string(value) - if key == 'type': - meta[key] = value2.upper().replace('-','') + if key == 'manual_type': + meta['manual_type'] = value2.upper().replace('-', '') elif key == 'tag': meta[key] = f"-{value2}" elif key == 'screens': @@ -121,7 +151,7 @@ def parse(self, args, meta): parsed = urllib.parse.urlparse(value2) try: meta['ptp'] = urllib.parse.parse_qs(parsed.query)['torrentid'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -ptp') else: @@ -134,17 +164,69 @@ def parse(self, args, meta): if blupath.endswith('/'): blupath = blupath[:-1] meta['blu'] = blupath.split('/')[-1] - except: + except Exception: console.print('[red]Unable to parse id from url') console.print('[red]Continuing without --blu') else: meta['blu'] = value2 + elif key == 'aither': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + aitherpath = parsed.path + if aitherpath.endswith('/'): + aitherpath = aitherpath[:-1] + meta['aither'] = aitherpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --aither') + else: + meta['aither'] = value2 + elif key == 'lst': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + lstpath = parsed.path + if lstpath.endswith('/'): + lstpath = lstpath[:-1] + meta['lst'] = lstpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --lst') + else: + meta['lst'] = value2 + elif key == 'oe': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + oepath = parsed.path + if oepath.endswith('/'): + oepath = oepath[:-1] + meta['oe'] = oepath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --oe') + else: + meta['oe'] = value2 + elif key == 'tik': + if value2.startswith('http'): + parsed = urllib.parse.urlparse(value2) + try: + tikpath = parsed.path + if tikpath.endswith('/'): + tikpath = tikpath[:-1] + meta['tik'] = tikpath.split('/')[-1] + except Exception: + console.print('[red]Unable to parse id from url') + console.print('[red]Continuing without --tik') + else: + meta['tik'] = value2 elif key == 'hdb': if value2.startswith('http'): parsed = urllib.parse.urlparse(value2) try: meta['hdb'] = urllib.parse.parse_qs(parsed.query)['id'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -hdb') else: @@ -156,10 +238,18 @@ def parse(self, args, meta): meta[key] = value elif key in ("manual_edition"): meta[key] = value + elif key in ("manual_dvds"): + meta[key] = value elif key in ("freeleech"): meta[key] = 100 elif key in ("tag") and value == []: meta[key] = "" + elif key in ["manual_episode_title"] and value == []: + meta[key] = "" + elif key in ["manual_episode_title"]: + meta[key] = value + elif key in ["tvmaze_manual"]: + meta[key] = value else: meta[key] = meta.get(key, None) if key in ('trackers'): @@ -168,17 +258,15 @@ def parse(self, args, meta): # parser.print_help() return meta, parser, before_args - def list_to_string(self, list): if len(list) == 1: return str(list[0]) try: result = " ".join(list) - except: + except Exception: result = "None" return result - def parse_tmdb_id(self, id, category): id = id.lower().lstrip() if id.startswith('tv'): @@ -190,18 +278,3 @@ def parse_tmdb_id(self, id, category): else: id = id return category, id - - - - - - - - - - - - - - - diff --git a/src/bbcode.py b/src/bbcode.py index 8ddff33c8..97e8b2150 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -1,6 +1,7 @@ import re import html import urllib.parse +from src.console import console # Bold - KEEP # Italic - KEEP @@ -36,63 +37,111 @@ def __init__(self): pass def clean_ptp_description(self, desc, is_disc): + # console.print("[yellow]Cleaning PTP description...") + # Convert Bullet Points to - desc = desc.replace("•", "-") # Unescape html desc = html.unescape(desc) - # End my suffering desc = desc.replace('\r\n', '\n') # Remove url tags with PTP/HDB links - url_tags = re.findall("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - url_tags = url_tags + re.findall("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - if url_tags != []: + url_tags = re.findall( + r"(?:\[url(?:=|\])[^\]]*https?:\/\/passthepopcorn\.m[^\]]*\]|\bhttps?:\/\/passthepopcorn\.m[^\s]+)", + desc, + flags=re.IGNORECASE, + ) + url_tags += re.findall(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + if url_tags: for url_tag in url_tags: url_tag = ''.join(url_tag) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(url_tag, url_tag_removed) - # Remove links to PTP + # Remove links to PTP/HDB desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') - # Remove Mediainfo Tags / Attempt to regex out mediainfo - mediainfo_tags = re.findall("\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) - if len(mediainfo_tags) >= 1: - desc = re.sub("\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) - elif is_disc != "BDMV": - desc = re.sub("(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "" - + if is_disc == "DVD": + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + + elif is_disc == "BDMV": + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + desc = re.sub(r"DISC INFO:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Disc Title:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Disc Size:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Protection:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"BD-Java:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"BDInfo:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"PLAYLIST REPORT:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Name:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Length:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Size:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Total Bitrate:[\s\S]*?(\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"VIDEO:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"AUDIO:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"SUBTITLES:[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Codec\s+Bitrate\s+Description[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"Codec\s+Language\s+Bitrate\s+Description[\s\S]*?(?=\n\n|$)", "", desc, flags=re.IGNORECASE) + + else: + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + desc = re.sub(r"(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + + desc = re.sub( + r"\[b\](.*?)(Matroska|DTS|AVC|x264|Progressive|23\.976 fps|16:9|[0-9]+x[0-9]+|[0-9]+ MiB|[0-9]+ Kbps|[0-9]+ bits|cabac=.*?/ aq=.*?|\d+\.\d+ Mbps)\[/b\]", + "", + desc, + flags=re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"(Matroska|DTS|AVC|x264|Progressive|23\.976 fps|16:9|[0-9]+x[0-9]+|[0-9]+ MiB|[0-9]+ Kbps|[0-9]+ bits|cabac=.*?/ aq=.*?|\d+\.\d+ Mbps|[0-9]+\s+channels|[0-9]+\.[0-9]+\s+KHz|[0-9]+ KHz|[0-9]+\s+bits)", + "", + desc, + flags=re.IGNORECASE | re.DOTALL, + ) + desc = re.sub( + r"\[u\](Format|Bitrate|Channels|Sampling Rate|Resolution):\[/u\]\s*\d*.*?", + "", + desc, + flags=re.IGNORECASE, + ) + desc = re.sub( + r"^\s*\d+\s*(channels|KHz|bits)\s*$", + "", + desc, + flags=re.MULTILINE | re.IGNORECASE, + ) + + desc = re.sub(r"^\s+$", "", desc, flags=re.MULTILINE) + desc = re.sub(r"\n{2,}", "\n", desc) # Convert Quote tags: - desc = re.sub("\[quote.*?\]", "[code]", desc) + desc = re.sub(r"\[quote.*?\]", "[code]", desc) desc = desc.replace("[/quote]", "[/code]") - + # Remove Alignments: - desc = re.sub("\[align=.*?\]", "", desc) + desc = re.sub(r"\[align=.*?\]", "", desc) desc = desc.replace("[/align]", "") # Remove size tags - desc = re.sub("\[size=.*?\]", "", desc) + desc = re.sub(r"\[size=.*?\]", "", desc) desc = desc.replace("[/size]", "") # Remove Videos - desc = re.sub("\[video\][\s\S]*?\[\/video\]", "", desc) + desc = re.sub(r"\[video\][\s\S]*?\[\/video\]", "", desc) # Remove Staff tags - desc = re.sub("\[staff[\s\S]*?\[\/staff\]", "", desc) - + desc = re.sub(r"\[staff[\s\S]*?\[\/staff\]", "", desc) - #Remove Movie/Person/User/hr/Indent + # Remove Movie/Person/User/hr/Indent remove_list = [ '[movie]', '[/movie]', '[artist]', '[/artist]', @@ -103,34 +152,59 @@ def clean_ptp_description(self, desc, is_disc): ] for each in remove_list: desc = desc.replace(each, '') - - #Catch Stray Images - comps = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall("\[hide[\s\S]*?\[\/hide\]", desc) + + # Catch Stray Images and Prepare Image List + imagelist = [] + excluded_urls = set() + + source_encode_comps = re.findall(r"\[comparison=Source, Encode\][\s\S]*", desc, flags=re.IGNORECASE) + source_vs_encode_sections = re.findall(r"Source Vs Encode:[\s\S]*", desc, flags=re.IGNORECASE) + specific_cases = source_encode_comps + source_vs_encode_sections + + # Extract URLs and update excluded_urls + for block in specific_cases: + urls = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", block, flags=re.IGNORECASE) + excluded_urls.update(urls) + desc = desc.replace(block, '') + + # General [comparison=...] handling + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc, flags=re.IGNORECASE) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc, flags=re.IGNORECASE) comps.extend(hides) nocomp = desc + + # Exclude URLs from exculed array fom `nocomp` + for url in excluded_urls: + nocomp = nocomp.replace(url, '') + comp_placeholders = [] # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images - for i in range(len(comps)): - nocomp = nocomp.replace(comps[i], '') - desc = desc.replace(comps[i], f"COMPARISON_PLACEHOLDER-{i} ") - comp_placeholders.append(comps[i]) - + for i, comp in enumerate(comps): + nocomp = nocomp.replace(comp, '') + desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") + comp_placeholders.append(comp) # Remove Images in IMG tags: - desc = re.sub("\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) - desc = re.sub("\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - # Replace Images - loose_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if len(loose_images) >= 1: - for image in loose_images: - desc = desc.replace(image, '') + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + + # Extract loose images and add to imagelist as dictionaries + loose_images = re.findall(r"(https?:\/\/[^\s\[\]]+\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + for img_url in loose_images: + if img_url not in excluded_urls: # Only include URLs not part of excluded sections + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url + } + imagelist.append(image_dict) + desc = desc.replace(img_url, '') + # Re-place comparisons - if comp_placeholders != []: - for i, comp in enumerate(comp_placeholders): - comp = re.sub("\[\/?img[\s\S]*?\]", "",comp, flags=re.IGNORECASE) - desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) + for i, comp in enumerate(comp_placeholders): + comp = re.sub(r"\[\/?img[\s\S]*?\]", "", comp, flags=re.IGNORECASE) + desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) # Convert hides with multiple images to comparison desc = self.convert_collapse_to_comparison(desc, "hide", hides) @@ -142,25 +216,26 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('\n', '', 1) desc = desc.strip('\n') - if desc.replace('\n', '') == '': - return "" - return desc + if desc.replace('\n', '').strip() == '': + console.print("[yellow]Description is empty after cleaning.") + return "", imagelist + + return desc, imagelist - def clean_unit3d_description(self, desc, site): - # Unescape html + # Unescape HTML desc = html.unescape(desc) - # End my suffering + # Replace carriage returns with newlines desc = desc.replace('\r\n', '\n') # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc - site_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" site_url_tags = re.findall(site_regex, desc) - if site_url_tags != []: + if site_url_tags: for site_url_tag in site_url_tags: site_url_tag = ''.join(site_url_tag) - url_tag_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" + url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" url_tag_removed = re.sub(url_tag_regex, "", site_url_tag) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(site_url_tag, url_tag_removed) @@ -168,98 +243,108 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(site_netloc, site_netloc.split('.')[0]) # Temporarily hide spoiler tags - spoilers = re.findall("\[spoiler[\s\S]*?\[\/spoiler\]", desc) + spoilers = re.findall(r"\[spoiler[\s\S]*?\[\/spoiler\]", desc) nospoil = desc spoiler_placeholders = [] for i in range(len(spoilers)): nospoil = nospoil.replace(spoilers[i], '') desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) - - # Get Images from outside spoilers - imagelist = [] - url_tags = re.findall("\[url=[\s\S]*?\[\/url\]", desc) - if url_tags != []: - for tag in url_tags: - image = re.findall("\[img[\s\S]*?\[\/img\]", tag) - if len(image) == 1: - image_dict = {} - img_url = image[0].lower().replace('[img]', '').replace('[/img]', '') - image_dict['img_url'] = image_dict['raw_url'] = re.sub("\[img[\s\S]*\]", "", img_url) - url_tag = tag.replace(image[0], '') - image_dict['web_url'] = re.match("\[url=[\s\S]*?\]", url_tag, flags=re.IGNORECASE)[0].lower().replace('[url=', '')[:-1] - imagelist.append(image_dict) - desc = desc.replace(tag, '') - # Remove bot signatures - desc = desc.replace("[img=35]https://blutopia/favicon.ico[/img] [b]Uploaded Using [url=https://github.com/HDInnovations/UNIT3D]UNIT3D[/url] Auto Uploader[/b] [img=35]https://blutopia/favicon.ico[/img]", '') - desc = re.sub("\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + # Get Images from [img] tags and remove them from the description + imagelist = [] + img_tags = re.findall(r"\[img[^\]]*\](.*?)\[/img\]", desc, re.IGNORECASE) + if img_tags: + for img_url in img_tags: + image_dict = { + 'img_url': img_url.strip(), + 'raw_url': img_url.strip(), + 'web_url': img_url.strip(), + } + imagelist.append(image_dict) + # Remove the [img] tag and its contents from the description + desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) + + # Now, remove matching URLs from [URL] tags + for img in imagelist: + img_url = re.escape(img['img_url']) + desc = re.sub(rf"\[URL={img_url}\]\[/URL\]", '', desc, flags=re.IGNORECASE) + desc = re.sub(rf"\[URL={img_url}\]\[img[^\]]*\]{img_url}\[/img\]\[/URL\]", '', desc, flags=re.IGNORECASE) + + # Filter out bot images from imagelist + bot_image_urls = [ + "https://blutopia.xyz/favicon.ico", # Example bot image URL + "https://i.ibb.co/2NVWb0c/uploadrr.webp", + "https://blutopia/favicon.ico", + "https://ptpimg.me/606tk4.png", + # Add any other known bot image URLs here + ] + imagelist = [ + img for img in imagelist + if img['img_url'] not in bot_image_urls and not re.search(r'thumbs', img['img_url'], re.IGNORECASE) + ] - # Replace spoiler tags - if spoiler_placeholders != []: + # Restore spoiler tags + if spoiler_placeholders: for i, spoiler in enumerate(spoiler_placeholders): desc = desc.replace(f"SPOILER_PLACEHOLDER-{i} ", spoiler) - # Check for empty [center] tags - centers = re.findall("\[center[\s\S]*?\[\/center\]", desc) - if centers != []: + # Check for and clean up empty [center] tags + centers = re.findall(r"\[center[\s\S]*?\[\/center\]", desc) + if centers: for center in centers: - full_center = center - replace = ['[center]', ' ', '\n', '[/center]'] - for each in replace: - center = center.replace(each, '') - if center == "": - desc = desc.replace(full_center, '') + # If [center] contains only whitespace or empty tags, remove the entire tag + cleaned_center = re.sub(r'\[center\]\s*\[\/center\]', '', center) + cleaned_center = re.sub(r'\[center\]\s+', '[center]', cleaned_center) + cleaned_center = re.sub(r'\s*\[\/center\]', '[/center]', cleaned_center) + if cleaned_center == '[center][/center]': + desc = desc.replace(center, '') + else: + desc = desc.replace(center, cleaned_center.strip()) - # Convert Comparison spoilers to [comparison=] - desc = self.convert_collapse_to_comparison(desc, "spoiler", spoilers) - - # Strip blank lines: - desc = desc.strip('\n') - desc = re.sub("\n\n+", "\n\n", desc) - while desc.startswith('\n'): - desc = desc.replace('\n', '', 1) - desc = desc.strip('\n') + # Remove bot signatures + bot_signature_regex = r""" + \[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\] + Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\]\s + Auto\sUploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]| + \[center\]\s*\[b\]Uploaded\sUsing\s\[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] + \sAuto\sUploader\[\/b\]\s*\[\/center\]| + \[center\]\[url=https:\/\/github\.com\/z-ink\/uploadrr\]\[img=\d+\]https:\/\/i\.ibb\.co\/2NVWb0c\/uploadrr\.webp\[\/img\]\[\/url\]\[\/center\] + """ + desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE | re.VERBOSE) + desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + + # Remove leftover [img] or [URL] tags in the description + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[URL=[\s\S]*?\]\[\/URL\]", "", desc, flags=re.IGNORECASE) + + # Strip trailing whitespace and newlines: + desc = desc.rstrip() if desc.replace('\n', '') == '': return "", imagelist return desc, imagelist - - - - - - - - - - - - - - - def convert_pre_to_code(self, desc): desc = desc.replace('[pre]', '[code]') desc = desc.replace('[/pre]', '[/code]') return desc - def convert_hide_to_spoiler(self, desc): desc = desc.replace('[hide', '[spoiler') desc = desc.replace('[/hide]', '[/spoiler]') return desc - + def convert_spoiler_to_hide(self, desc): desc = desc.replace('[spoiler', '[hide') desc = desc.replace('[/spoiler]', '[/hide]') return desc def remove_spoiler(self, desc): - desc = re.sub("\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) return desc - + def convert_spoiler_to_code(self, desc): desc = desc.replace('[spoiler', '[code') desc = desc.replace('[/spoiler]', '[/code]') @@ -269,15 +354,15 @@ def convert_code_to_quote(self, desc): desc = desc.replace('[code', '[quote') desc = desc.replace('[/code]', '[/quote]') return desc - + def convert_comparison_to_collapse(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -295,15 +380,14 @@ def convert_comparison_to_collapse(self, desc, max_width): desc = desc.replace(comp, new_bbcode) return desc - def convert_comparison_to_centered(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -326,17 +410,17 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): if collapses != []: for i in range(len(collapses)): tag = collapses[i] - images = re.findall("\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) + images = re.findall(r"\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) if len(images) >= 6: comp_images = [] final_sources = [] for image in images: - image_url = re.sub("\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) + image_url = re.sub(r"\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) comp_images.append(image_url) if spoiler_hide == "spoiler": - sources = re.match("\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] + sources = re.match(r"\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] elif spoiler_hide == "hide": - sources = re.match("\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] + sources = re.match(r"\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] sources = re.sub("comparison", "", sources, flags=re.IGNORECASE) for each in ['vs', ',', '|']: sources = sources.split(each) @@ -348,4 +432,4 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): final_sources = ', '.join(final_sources) spoil2comp = f"[comparison={final_sources}]{comp_images}[/comparison]" desc = desc.replace(tag, spoil2comp) - return desc \ No newline at end of file + return desc diff --git a/src/clients.py b/src/clients.py index c8d5fcba1..cbd9e3b06 100644 --- a/src/clients.py +++ b/src/clients.py @@ -4,7 +4,7 @@ import bencode import os import qbittorrentapi -from deluge_client import DelugeRPCClient, LocalDelugeRPCClient +from deluge_client import DelugeRPCClient import base64 from pyrobase.parts import Bunch import errno @@ -12,10 +12,8 @@ import ssl import shutil import time - - -from src.console import console - +from src.console import console +import re class Clients(): @@ -25,31 +23,30 @@ class Clients(): def __init__(self, config): self.config = config pass - async def add_to_client(self, meta, tracker): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent" - if meta.get('no_seed', False) == True: - console.print(f"[bold red]--no-seed was passed, so the torrent will not be added to the client") - console.print(f"[bold yellow]Add torrent manually to the client") + if meta.get('no_seed', False) is True: + console.print("[bold red]--no-seed was passed, so the torrent will not be added to the client") + console.print("[bold yellow]Add torrent manually to the client") return if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) else: return - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] if meta.get('client', None) == 'none': return if default_torrent_client == "none": - return + return client = self.config['TORRENT_CLIENTS'][default_torrent_client] torrent_client = client['torrent_client'] - + local_path, remote_path = await self.remote_path_map(meta) - + console.print(f"[bold green]Adding to {torrent_client}") if torrent_client.lower() == "rtorrent": self.rtorrent(meta['path'], torrent_path, torrent, meta, local_path, remote_path, client) @@ -57,125 +54,223 @@ async def add_to_client(self, meta, tracker): await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta) elif torrent_client.lower() == "deluge": if meta['type'] == "DISC": - path = os.path.dirname(meta['path']) + path = os.path.dirname(meta['path']) # noqa F841 self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) elif torrent_client.lower() == "watch": shutil.copy(torrent_path, client['watch_folder']) return - - async def find_existing_torrent(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] if meta.get('client', None) == 'none' or default_torrent_client == 'none': return None + client = self.config['TORRENT_CLIENTS'][default_torrent_client] - torrent_storage_dir = client.get('torrent_storage_dir', None) - torrent_client = client.get('torrent_client', None).lower() - if torrent_storage_dir == None and torrent_client != "watch": + torrent_storage_dir = client.get('torrent_storage_dir') + torrent_client = client.get('torrent_client', '').lower() + + if torrent_storage_dir is None and torrent_client != "watch": console.print(f'[bold red]Missing torrent_storage_dir for {default_torrent_client}') return None - elif not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": + if not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") - torrenthash = None - if torrent_storage_dir != None and os.path.exists(torrent_storage_dir): - if meta.get('torrenthash', None) != None: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) + + prefer_small_pieces = meta.get('prefer_small_pieces', False) + best_match = None # Track the best match for fallback if prefer_small_pieces is enabled + + # Iterate through pre-specified hashes + for hash_key in ['torrenthash', 'ext_torrenthash']: + hash_value = meta.get(hash_key) + if hash_value: + valid, torrent_path = await self.is_valid_torrent( + meta, f"{torrent_storage_dir}/{hash_value}.torrent", + hash_value, torrent_client, client, print_err=True + ) if valid: - torrenthash = meta['torrenthash'] - elif meta.get('ext_torrenthash', None) != None: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) + if not prefer_small_pieces: + console.print(f"[green]Found a valid torrent: [bold yellow]{hash_value}") + return torrent_path + + # Get piece size and update the best match + torrent = Torrent.read(torrent_path) + piece_size = torrent.piece_size + if piece_size <= 8388608: + console.print(f"[green]Found a valid torrent with preferred piece size: [bold yellow]{hash_value}") + return torrent_path + + if best_match is None or piece_size < best_match['piece_size']: + best_match = {'torrenthash': hash_value, 'torrent_path': torrent_path, 'piece_size': piece_size} + console.print(f"[yellow]Storing valid torrent as best match: [bold yellow]{hash_value}") + + # Search the client if no pre-specified hash matches + if torrent_client == 'qbit' and client.get('enable_search'): + found_hash = await self.search_qbit_for_torrent(meta, client) + if found_hash: + valid, torrent_path = await self.is_valid_torrent( + meta, f"{torrent_storage_dir}/{found_hash}.torrent", found_hash, torrent_client, client, print_err=False + ) if valid: - torrenthash = meta['ext_torrenthash'] - if torrent_client == 'qbit' and torrenthash == None and client.get('enable_search') == True: - torrenthash = await self.search_qbit_for_torrent(meta, client) - if not torrenthash: - console.print("[bold yellow]No Valid .torrent found") - if not torrenthash: - return None - torrent_path = f"{torrent_storage_dir}/{torrenthash}.torrent" - valid2, torrent_path = await self.is_valid_torrent(meta, torrent_path, torrenthash, torrent_client, print_err=False) - if valid2: - return torrent_path - + # Continue checking other torrents if `prefer_small_pieces` is enabled + if not prefer_small_pieces: + console.print(f"[green]Found a valid torrent from client search: [bold yellow]{found_hash}") + return torrent_path + + # Get piece size and update the best match + torrent = Torrent.read(torrent_path) + piece_size = torrent.piece_size + if piece_size <= 8388608: + console.print(f"[green]Found a valid torrent with preferred piece size from client search: [bold yellow]{found_hash}") + return torrent_path + + if best_match is None or piece_size < best_match['piece_size']: + best_match = {'torrenthash': found_hash, 'torrent_path': torrent_path, 'piece_size': piece_size} + console.print(f"[yellow]Storing valid torrent from client search as best match: [bold yellow]{found_hash}") + + # Use best match if no preferred torrent found + if prefer_small_pieces and best_match: + console.print(f"[yellow]Using best match torrent with hash: [bold yellow]{best_match['torrenthash']}") + return best_match['torrent_path'] + + console.print("[bold yellow]No Valid .torrent found") return None - - async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, print_err=False): + async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client, client, print_err=False): valid = False wrong_file = False - err_print = "" + + # Normalize the torrent hash based on the client if torrent_client in ('qbit', 'deluge'): torrenthash = torrenthash.lower().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) elif torrent_client == 'rtorrent': torrenthash = torrenthash.upper().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) + if meta['debug']: - console.log(torrent_path) + console.log(f"Torrent path after normalization: {torrent_path}") + + # Check if torrent file exists if os.path.exists(torrent_path): - torrent = Torrent.read(torrent_path) - # Reuse if disc and basename matches - if meta.get('is_disc', None) != None: + try: + torrent = Torrent.read(torrent_path) + except Exception as e: + console.print(f'[bold red]Error reading torrent file: {e}') + return valid, torrent_path + + # Reuse if disc and basename matches or --keep-folder was specified + if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): + torrent_name = torrent.metainfo['info']['name'] + if meta['uuid'] != torrent_name: + console.print("Modified file structure, skipping hash") + valid = False torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True + if meta['debug']: + console.log(f"Torrent is valid based on disc/basename or keep-folder: {valid}") + # If one file, check for folder - if len(torrent.files) == len(meta['filelist']) == 1: + elif len(torrent.files) == len(meta['filelist']) == 1: if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): if str(torrent.files[0]) == os.path.basename(torrent.files[0]): valid = True - else: - wrong_file = True + else: + wrong_file = True + if meta['debug']: + console.log(f"Single file match status: valid={valid}, wrong_file={wrong_file}") + # Check if number of files matches number of videos elif len(torrent.files) == len(meta['filelist']): torrent_filepath = os.path.commonpath(torrent.files) actual_filepath = os.path.commonpath(meta['filelist']) local_path, remote_path = await self.remote_path_map(meta) + if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): - actual_filepath = torrent_path.replace(local_path, remote_path) - actual_filepath = torrent_path.replace(os.sep, '/') + actual_filepath = actual_filepath.replace(local_path, remote_path).replace(os.sep, '/') + if meta['debug']: - console.log(f"torrent_filepath: {torrent_filepath}") - console.log(f"actual_filepath: {actual_filepath}") + console.log(f"Torrent_filepath: {torrent_filepath}") + console.log(f"Actual_filepath: {actual_filepath}") + if torrent_filepath in actual_filepath: valid = True + if meta['debug']: + console.log(f"Multiple file match status: valid={valid}") + else: console.print(f'[bold yellow]{torrent_path} was not found') + + # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): - reuse_torrent = Torrent.read(torrent_path) - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): # Allow up to 7k pieces at 8MiB or 4k pieces at 4MiB or less - err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" - valid = False - elif reuse_torrent.piece_size < 32768: - err_print = "[bold yellow]Piece size too small to reuse" - valid = False - elif wrong_file == True: - err_print = "[bold red] Provided .torrent has files that were not expected" + try: + reuse_torrent = Torrent.read(torrent_path) + torrent_storage_dir_valid = torrent_path + torrent_file_size_kib = os.path.getsize(torrent_storage_dir_valid) / 1024 + if meta['debug']: + console.log(f"Checking piece size, count and size: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}, size={torrent_file_size_kib}") + + # Piece size and count validations + if not meta.get('prefer_small_pieces', False): + if reuse_torrent.pieces >= 8000 and reuse_torrent.piece_size < 8388608: + console.print("[bold yellow]Too many pieces detected") + valid = False + elif reuse_torrent.pieces >= 5000 and reuse_torrent.piece_size < 4194304: + console.print("[bold yellow]Too many pieces detected") + valid = False + elif reuse_torrent.pieces >= 12000: + console.print("[bold yellow]Too many pieces detected") + valid = False + elif reuse_torrent.piece_size < 32768: + console.print("[bold yellow]Piece size too small to reuse") + valid = False + elif torrent_file_size_kib > 250: + console.print("[bold yellow]Torrent file size exceeds 250 KiB") + valid = False + elif wrong_file: + console.print("[bold red] Provided .torrent has files that were not expected") + valid = False + else: + console.print(f"[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}") + except Exception as e: + console.print(f'[bold red]Error checking reuse torrent: {e}') valid = False - else: - err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + + if meta['debug']: + console.log(f"Final validity after piece checks: valid={valid}") else: - err_print = '[bold yellow]Unwanted Files/Folders Identified' - if print_err: - console.print(err_print) - return valid, torrent_path + console.print("[bold yellow]Unwanted Files/Folders Identified") + return valid, torrent_path async def search_qbit_for_torrent(self, meta, client): console.print("[green]Searching qbittorrent for an existing .torrent") torrent_storage_dir = client.get('torrent_storage_dir', None) - if torrent_storage_dir == None and client.get("torrent_client", None) != "watch": + + if meta['debug']: + if torrent_storage_dir: + console.print(f"Torrent storage directory found: {torrent_storage_dir}") + else: + console.print("No torrent storage directory found.") + + if torrent_storage_dir is None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None try: - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) qbt_client.auth_log_in() + if meta['debug']: + console.print("We logged into qbittorrent") except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return None @@ -183,12 +278,18 @@ async def search_qbit_for_torrent(self, meta, client): console.print("[bold red]APIConnectionError: INCORRECT HOST/PORT") return None - # Remote path map if needed + # Remote path mapping if needed remote_path_map = False local_path, remote_path = await self.remote_path_map(meta) if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): remote_path_map = True + if meta['debug']: + console.print("Remote path mapping found!") + console.print(f"Local path: {local_path}") + console.print(f"Remote path: {remote_path}") + # Iterate through torrents and evaluate + best_match = None torrents = qbt_client.torrents.info() for torrent in torrents: try: @@ -198,33 +299,39 @@ async def search_qbit_for_torrent(self, meta, client): console.print(torrent) console.print_exception() continue + # Apply remote-to-local path mapping if remote_path_map: - torrent_path = torrent_path.replace(remote_path, local_path) + if not torrent_path.startswith(local_path): + torrent_path = torrent_path.replace(remote_path, local_path) + if torrent_path.startswith(f"{local_path}/{local_path.split('/')[-1]}"): + torrent_path = torrent_path.replace(f"{local_path}/{local_path.split('/')[-1]}", local_path) torrent_path = torrent_path.replace(os.sep, '/').replace('/', os.sep) if meta['is_disc'] in ("", None) and len(meta['filelist']) == 1: - if torrent_path == meta['filelist'][0] and len(torrent.files) == len(meta['filelist']): - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) - if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") - return torrent.hash - elif meta['path'] == torrent_path: - valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', print_err=False) - if valid: - console.print(f"[green]Found a matching .torrent with hash: [bold yellow]{torrent.hash}") - return torrent.hash - return None - - - - - - - + if torrent_path.lower() != meta['filelist'][0].lower() or len(torrent.files) != len(meta['filelist']): + continue + elif os.path.normpath(meta['path']).lower() != os.path.normpath(torrent_path).lower(): + continue + # Check piece size if prefer_small_pieces is enabled + torrent_file_path = os.path.join(torrent_storage_dir, f"{torrent.hash}.torrent") + torrent_data = Torrent.read(torrent_file_path) + piece_size = torrent_data.piece_size + if meta.get('prefer_small_pieces', False): + if best_match is None or piece_size < best_match['piece_size']: + valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{torrent.hash}.torrent", torrent.hash, 'qbit', client, print_err=False) + if valid: + best_match = {'hash': torrent.hash, 'torrent_path': torrent_path, 'piece_size': piece_size} + else: + return torrent.hash + # Return the best match if prefer_small_pieces is enabled and no direct match was found + if best_match: + console.print(f"[green]Using best match torrent with hash: {best_match['hash']}") + return best_match['hash'] + return None def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, client): rtorrent = xmlrpc.client.Server(client['rtorrent_url'], context=ssl._create_stdlib_context()) @@ -234,19 +341,17 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c except EnvironmentError as exc: console.print("[red]Error making fast-resume data (%s)" % (exc,)) raise - - + new_meta = bencode.bencode(fast_resume) if new_meta != metainfo: fr_file = torrent_path.replace('.torrent', '-resume.torrent') console.print("Creating fast resume") bencode.bwrite(fast_resume, fr_file) - isdir = os.path.isdir(path) # if meta['type'] == "DISC": # path = os.path.dirname(path) - #Remote path mount + # Remote path mount modified_fr = False if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path_dir = os.path.dirname(path) @@ -255,17 +360,16 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c shutil.copy(fr_file, f"{path_dir}/fr.torrent") fr_file = f"{os.path.dirname(path)}/fr.torrent" modified_fr = True - if isdir == False: + if isdir is False: path = os.path.dirname(path) - - + console.print("[bold yellow]Adding and starting torrent") rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") time.sleep(1) # Add labels - if client.get('rtorrent_label', None) != None: + if client.get('rtorrent_label', None) is not None: rtorrent.d.custom1.set(torrent.infohash, client['rtorrent_label']) - if meta.get('rtorrent_label') != None: + if meta.get('rtorrent_label') is not None: rtorrent.d.custom1.set(torrent.infohash, meta['rtorrent_label']) # Delete modified fr_file location @@ -275,79 +379,108 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c console.print(f"[cyan]Path: {path}") return - async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): - # infohash = torrent.infohash - #Remote path mount - isdir = os.path.isdir(path) - if not isdir and len(filelist) == 1: - path = os.path.dirname(path) - if len(filelist) != 1: + # Remote path mount + if meta.get('keep_folder'): + # Keep only the root folder (e.g., "D:\\Movies") path = os.path.dirname(path) + else: + # Adjust path based on filelist and directory status + isdir = os.path.isdir(path) + if len(filelist) != 1 or not isdir: + path = os.path.dirname(path) + + # Ensure remote path replacement and normalization if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path = path.replace(local_path, remote_path) path = path.replace(os.sep, '/') - if not path.endswith(os.sep): - path = f"{path}/" - qbt_client = qbittorrentapi.Client(host=client['qbit_url'], port=client['qbit_port'], username=client['qbit_user'], password=client['qbit_pass'], VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True)) + + # Ensure trailing slash for qBittorrent + if not path.endswith('/'): + path += '/' + + # Initialize qBittorrent client + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) console.print("[bold yellow]Adding and rechecking torrent") + try: qbt_client.auth_log_in() except qbittorrentapi.LoginFailed: console.print("[bold red]INCORRECT QBIT LOGIN CREDENTIALS") return + + # Check for automatic management auto_management = False am_config = client.get('automatic_management_paths', '') if isinstance(am_config, list): for each in am_config: - if os.path.normpath(each).lower() in os.path.normpath(path).lower(): + if os.path.normpath(each).lower() in os.path.normpath(path).lower(): auto_management = True else: - if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": + if os.path.normpath(am_config).lower() in os.path.normpath(path).lower() and am_config.strip() != "": auto_management = True qbt_category = client.get("qbit_cat") if not meta.get("qbit_cat") else meta.get('qbit_cat') - content_layout = client.get('content_layout', 'Original') - - qbt_client.torrents_add(torrent_files=torrent.dump(), save_path=path, use_auto_torrent_management=auto_management, is_skip_checking=True, content_layout=content_layout, category=qbt_category) - # Wait for up to 30 seconds for qbit to actually return the download - # there's an async race conditiion within qbt that it will return ok before the torrent is actually added - for _ in range(0, 30): + + # Add the torrent + try: + qbt_client.torrents_add( + torrent_files=torrent.dump(), + save_path=path, + use_auto_torrent_management=auto_management, + is_skip_checking=True, + content_layout=content_layout, + category=qbt_category + ) + except qbittorrentapi.APIConnectionError as e: + console.print(f"[red]Failed to add torrent: {e}") + return + + # Wait for torrent to be added + timeout = 30 + for _ in range(timeout): if len(qbt_client.torrents_info(torrent_hashes=torrent.infohash)) > 0: break await asyncio.sleep(1) + else: + console.print("[red]Torrent addition timed out.") + return + + # Resume and tag torrent qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) != None: - qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') != None: - qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) - console.print(f"Added to: {path}") - + if client.get('qbit_tag'): + qbt_client.torrents_add_tags(tags=client['qbit_tag'], torrent_hashes=torrent.infohash) + if meta.get('qbit_tag'): + qbt_client.torrents_add_tags(tags=meta['qbit_tag'], torrent_hashes=torrent.infohash) + console.print(f"Added to: {path}") def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, meta): client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) # client = LocalDelugeRPCClient() client.connect() - if client.connected == True: - console.print("Connected to Deluge") - isdir = os.path.isdir(path) - #Remote path mount + if client.connected is True: + console.print("Connected to Deluge") + isdir = os.path.isdir(path) # noqa F841 + # Remote path mount if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path = path.replace(local_path, remote_path) path = path.replace(os.sep, '/') - + path = os.path.dirname(path) - client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location' : path, 'seed_mode' : True}) + client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location': path, 'seed_mode': True}) if meta['debug']: console.print(f"[cyan]Path: {path}") else: console.print("[bold red]Unable to connect to deluge") - - - def add_fast_resume(self, metainfo, datapath, torrent): """ Add fast resume data to a metafile dict. """ @@ -385,30 +518,94 @@ def add_fast_resume(self, metainfo, datapath, torrent): resume["files"].append(dict( priority=1, mtime=int(os.path.getmtime(filepath)), - completed=(offset+fileinfo["length"]+piece_length-1) // piece_length - - offset // piece_length, + completed=( + (offset + fileinfo["length"] + piece_length - 1) // piece_length - + offset // piece_length + ), )) offset += fileinfo["length"] return metainfo - async def remote_path_map(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: torrent_client = self.config['DEFAULT']['default_torrent_client'] else: torrent_client = meta['client'] - local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path','/LocalPath') + local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path', '/LocalPath') remote_path = list_remote_path = self.config['TORRENT_CLIENTS'][torrent_client].get('remote_path', '/RemotePath') if isinstance(local_path, list): for i in range(len(local_path)): if os.path.normpath(local_path[i]).lower() in meta['path'].lower(): list_local_path = local_path[i] list_remote_path = remote_path[i] - + local_path = os.path.normpath(list_local_path) remote_path = os.path.normpath(list_remote_path) if local_path.endswith(os.sep): remote_path = remote_path + os.sep - return local_path, remote_path \ No newline at end of file + return local_path, remote_path + + async def get_ptp_from_hash(self, meta): + default_torrent_client = self.config['DEFAULT']['default_torrent_client'] + client = self.config['TORRENT_CLIENTS'][default_torrent_client] + qbt_client = qbittorrentapi.Client( + host=client['qbit_url'], + port=client['qbit_port'], + username=client['qbit_user'], + password=client['qbit_pass'], + VERIFY_WEBUI_CERTIFICATE=client.get('VERIFY_WEBUI_CERTIFICATE', True) + ) + + try: + qbt_client.auth_log_in() + except qbittorrentapi.LoginFailed as e: + console.print(f"[bold red]Login failed while trying to get info hash: {e}") + exit(1) + + info_hash_v1 = meta.get('infohash') + torrents = qbt_client.torrents_info() + found = False + + for torrent in torrents: + if torrent.get('infohash_v1') == info_hash_v1: + comment = torrent.get('comment', "") + + if "https://passthepopcorn.me" in comment: + match = re.search(r'torrentid=(\d+)', comment) + if match: + meta['ptp'] = match.group(1) + console.print(f"[bold cyan]meta['ptp'] set to torrentid: {meta['ptp']}") + + elif "https://aither.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['aither'] = match.group(1) + console.print(f"[bold cyan]meta['aither'] set to ID: {meta['aither']}") + + elif "https://lst.gg" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['lst'] = match.group(1) + console.print(f"[bold cyan]meta['lst'] set to ID: {meta['lst']}") + + elif "https://onlyencodes.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['oe'] = match.group(1) + console.print(f"[bold cyan]meta['oe'] set to ID: {meta['oe']}") + + elif "https://blutopia.cc" in comment: + match = re.search(r'/(\d+)$', comment) + if match: + meta['blu'] = match.group(1) + console.print(f"[bold cyan]meta['blu'] set to ID: {meta['blu']}") + + found = True + break + + if not found: + console.print("[bold red]Torrent with the specified infohash_v1 not found.") + + return meta diff --git a/src/console.py b/src/console.py index 61aeecb04..223c51181 100644 --- a/src/console.py +++ b/src/console.py @@ -1,2 +1,2 @@ -from rich.console import Console -console = Console() \ No newline at end of file +from rich.console import Console +console = Console() diff --git a/src/discparse.py b/src/discparse.py index 33d9b8c68..af0826037 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -9,8 +9,8 @@ import json from src.console import console - - + + class DiscParse(): def __init__(self): pass @@ -28,7 +28,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): for file in os.listdir(save_dir): if file == f"BD_SUMMARY_{str(i).zfill(2)}.txt": bdinfo_text = save_dir + "/" + file - if bdinfo_text == None or meta_discs == []: + if bdinfo_text is None or meta_discs == []: if os.path.exists(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt"): bdinfo_text = os.path.abspath(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt") else: @@ -39,7 +39,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): console.print(f"[bold green]Scanning {path}") proc = await asyncio.create_subprocess_exec('mono', f"{base_dir}/bin/BDInfo/BDInfo.exe", '-w', path, save_dir) await proc.wait() - except: + except Exception: console.print('[bold red]mono not found, please install mono') elif sys.platform.startswith('win32'): @@ -54,7 +54,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): try: if bdinfo_text == "": for file in os.listdir(save_dir): - if file.startswith(f"BDINFO"): + if file.startswith("BDINFO"): bdinfo_text = save_dir + "/" + file with open(bdinfo_text, 'r') as f: text = f.read() @@ -64,7 +64,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): result = result2.split("********************", 1) bd_summary = result[0].rstrip(" \n") f.close() - with open(bdinfo_text, 'r') as f: # parse extended BDInfo + with open(bdinfo_text, 'r') as f: # parse extended BDInfo text = f.read() result = text.split("[code]", 3) result2 = result[2].rstrip(" \n") @@ -84,21 +84,19 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): with open(f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}.txt", 'w') as f: f.write(bd_summary.strip()) f.close() - with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file + with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file f.write(ext_bd_summary.strip()) f.close() - + bdinfo = self.parse_bdinfo(bd_summary, files[1], path) - + discs[i]['summary'] = bd_summary.strip() discs[i]['bdinfo'] = bdinfo # shutil.rmtree(f"{base_dir}/tmp") else: discs = meta_discs - + return discs, discs[0]['bdinfo'] - - def parse_bdinfo(self, bdinfo_input, files, path): bdinfo = dict() @@ -107,56 +105,56 @@ def parse_bdinfo(self, bdinfo_input, files, path): bdinfo['subtitles'] = list() bdinfo['path'] = path lines = bdinfo_input.splitlines() - for l in lines: + for l in lines: # noqa E741 line = l.strip().lower() if line.startswith("*"): line = l.replace("*", "").strip().lower() if line.startswith("playlist:"): playlist = l.split(':', 1)[1] - bdinfo['playlist'] = playlist.split('.',1)[0].strip() + bdinfo['playlist'] = playlist.split('.', 1)[0].strip() if line.startswith("disc size:"): size = l.split(':', 1)[1] - size = size.split('bytes', 1)[0].replace(',','') - size = float(size)/float(1<<30) + size = size.split('bytes', 1)[0].replace(',', '') + size = float(size) / float(1 << 30) bdinfo['size'] = size if line.startswith("length:"): length = l.split(':', 1)[1] - bdinfo['length'] = length.split('.',1)[0].strip() + bdinfo['length'] = length.split('.', 1)[0].strip() if line.startswith("video:"): split1 = l.split(':', 1)[1] split2 = split1.split('/', 12) while len(split2) != 9: split2.append("") - n=0 + n = 0 if "Eye" in split2[2].strip(): n = 1 three_dim = split2[2].strip() else: three_dim = "" try: - bit_depth = split2[n+6].strip() - hdr_dv = split2[n+7].strip() - color = split2[n+8].strip() - except: + bit_depth = split2[n + 6].strip() + hdr_dv = split2[n + 7].strip() + color = split2[n + 8].strip() + except Exception: bit_depth = "" hdr_dv = "" color = "" bdinfo['video'].append({ - 'codec': split2[0].strip(), - 'bitrate': split2[1].strip(), - 'res': split2[n+2].strip(), - 'fps': split2[n+3].strip(), - 'aspect_ratio' : split2[n+4].strip(), - 'profile': split2[n+5].strip(), - 'bit_depth' : bit_depth, - 'hdr_dv' : hdr_dv, - 'color' : color, - '3d' : three_dim, - }) + 'codec': split2[0].strip(), + 'bitrate': split2[1].strip(), + 'res': split2[n + 2].strip(), + 'fps': split2[n + 3].strip(), + 'aspect_ratio': split2[n + 4].strip(), + 'profile': split2[n + 5].strip(), + 'bit_depth': bit_depth, + 'hdr_dv': hdr_dv, + 'color': color, + '3d': three_dim, + }) elif line.startswith("audio:"): if "(" in l: - l = l.split("(")[0] - l = l.strip() + l = l.split("(")[0] # noqa E741 + l = l.strip() # noqa E741 split1 = l.split(':', 1)[1] split2 = split1.split('/') n = 0 @@ -166,18 +164,18 @@ def parse_bdinfo(self, bdinfo_input, files, path): else: fuckatmos = "" try: - bit_depth = split2[n+5].strip() - except: + bit_depth = split2[n + 5].strip() + except Exception: bit_depth = "" bdinfo['audio'].append({ - 'language' : split2[0].strip(), - 'codec' : split2[1].strip(), - 'channels' : split2[n+2].strip(), - 'sample_rate' : split2[n+3].strip(), - 'bitrate' : split2[n+4].strip(), - 'bit_depth' : bit_depth, # Also DialNorm, but is not in use anywhere yet + 'language': split2[0].strip(), + 'codec': split2[1].strip(), + 'channels': split2[n + 2].strip(), + 'sample_rate': split2[n + 3].strip(), + 'bitrate': split2[n + 4].strip(), + 'bit_depth': bit_depth, # Also DialNorm, but is not in use anywhere yet 'atmos_why_you_be_like_this': fuckatmos, - }) + }) elif line.startswith("disc title:"): title = l.split(':', 1)[1] bdinfo['title'] = title @@ -195,19 +193,17 @@ def parse_bdinfo(self, bdinfo_input, files, path): stripped = line.split() m2ts = {} bd_file = stripped[0] - time_in = stripped[1] + time_in = stripped[1] # noqa F841 bd_length = stripped[2] - bd_size = stripped[3] - bd_bitrate = stripped[4] + bd_size = stripped[3] # noqa F841 + bd_bitrate = stripped[4] # noqa F841 m2ts['file'] = bd_file m2ts['length'] = bd_length bdinfo['files'].append(m2ts) - except: + except Exception: pass return bdinfo - - """ Parse VIDEO_TS and get mediainfos """ @@ -215,12 +211,10 @@ async def get_dvdinfo(self, discs): for each in discs: path = each.get('path') os.chdir(path) - files = glob(f"VTS_*.VOB") + files = glob("VTS_*.VOB") files.sort() - # Switch to ordered dictionary filesdict = OrderedDict() main_set = [] - # Use ordered dictionary in place of list of lists for file in files: trimmed = file[4:] if trimmed[:2] not in filesdict: @@ -228,35 +222,49 @@ async def get_dvdinfo(self, discs): filesdict[trimmed[:2]].append(trimmed) main_set_duration = 0 for vob_set in filesdict.values(): - # Parse media info for this VOB set - vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') - vob_set_mi = json.loads(vob_set_mi) - vob_set_duration = vob_set_mi['media']['track'][1]['Duration'] - - - # If the duration of the new vob set > main set by more than 10% then it's our new main set - # This should make it so TV shows pick the first episode - if (float(vob_set_duration) * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: + try: + vob_set_mi = MediaInfo.parse(f"VTS_{vob_set[0][:2]}_0.IFO", output='JSON') + vob_set_mi = json.loads(vob_set_mi) + tracks = vob_set_mi.get('media', {}).get('track', []) + if len(tracks) > 1: + vob_set_duration = tracks[1].get('Duration', "Unknown") + else: + console.print("Warning: Expected track[1] is missing.") + vob_set_duration = "Unknown" + + except Exception as e: + console.print(f"Error processing VOB set: {e}") + vob_set_duration = "Unknown" + + if vob_set_duration == "Unknown" or not vob_set_duration.replace('.', '', 1).isdigit(): + console.print(f"Skipping VOB set due to invalid duration: {vob_set_duration}") + continue + + vob_set_duration_float = float(vob_set_duration) + + # If the duration of the new vob set > main set by more than 10%, it's the new main set + # This should make it so TV shows pick the first episode + if (vob_set_duration_float * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: main_set = vob_set - main_set_duration = vob_set_duration + main_set_duration = vob_set_duration_float + each['main_set'] = main_set set = main_set[0][:2] each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" each['ifo'] = ifo = f"{path}/VTS_{set}_0.IFO" - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - + each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))/float(1<<30) + size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) / float(1 << 30) if size <= 7.95: dvd_size = "DVD9" if size <= 4.37: dvd_size = "DVD5" each['size'] = dvd_size return discs - + async def get_hddvd_info(self, discs): for each in discs: path = each.get('path') @@ -270,6 +278,6 @@ async def get_hddvd_info(self, discs): if file_size > size: largest = file size = file_size - each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}) + each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version': '1'}) each['largest_evo'] = os.path.abspath(f"{path}/{largest}") return discs diff --git a/src/exceptions.py b/src/exceptions.py index b4c6dbead..e5de6f944 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -7,9 +7,10 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) + class UploadException(Exception): def __init__(self, *args, **kwargs): @@ -20,14 +21,18 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) class XEMNotFound(Exception): pass + + class WeirdSystem(Exception): pass + + class ManualDateException(Exception): - pass \ No newline at end of file + pass diff --git a/src/prep.py b/src/prep.py index 7bbbd970a..142f4ba4e 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1,23 +1,30 @@ # -*- coding: utf-8 -*- from src.args import Args from src.console import console -from src.exceptions import * -from src.trackers.PTP import PTP -from src.trackers.BLU import BLU -from src.trackers.HDB import HDB +from src.exceptions import * # noqa: F403 +from src.trackers.PTP import PTP # noqa F401 +from src.trackers.BLU import BLU # noqa F401 +from src.trackers.AITHER import AITHER # noqa F401 +from src.trackers.LST import LST # noqa F401 +from src.trackers.OE import OE # noqa F401 +from src.trackers.HDB import HDB # noqa F401 +from src.trackers.TIK import TIK # noqa F401 from src.trackers.COMMON import COMMON +from src.clients import Clients +from data.config import config +from src.uphelper import UploadHelper +from src.trackersetup import TRACKER_SETUP, tracker_class_map try: import traceback - import nest_asyncio from src.discparse import DiscParse import multiprocessing + from multiprocessing import get_context + from tqdm import tqdm import os - from os.path import basename import re import math - import sys - import distutils.util + from str2bool import str2bool import asyncio from guessit import guessit import ntpath @@ -32,20 +39,24 @@ import pyimgbox from pymediainfo import MediaInfo import tmdbsimple as tmdb - from datetime import datetime, date + from datetime import datetime from difflib import SequenceMatcher + import torf from torf import Torrent import base64 import time import anitopy import shutil from imdb import Cinemagoer - from subprocess import Popen - import subprocess import itertools import cli_ui - from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn + from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn # noqa F401 import platform + import aiohttp + from PIL import Image + import io + from io import BytesIO + import sys except ModuleNotFoundError: console.print(traceback.print_exc()) console.print('[bold red]Missing Module Found. Please reinstall required dependancies.') @@ -55,9 +66,6 @@ exit() - - - class Prep(): """ Prepare for upload: @@ -72,272 +80,602 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] + async def prompt_user_for_confirmation(self, message: str) -> bool: + try: + response = input(f"{message} (Y/n): ").strip().lower() + if response in ["y", "yes", ""]: + return True + return False + except EOFError: + sys.exit(1) + + async def check_images_concurrently(self, imagelist, meta): + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + invalid_host_found = False # Track if any image is on a non-approved host + + # Ensure meta['image_sizes'] exists + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + + # Map fixed resolution names to vertical resolutions + resolution_map = { + '8640p': 8640, + '4320p': 4320, + '2160p': 2160, + '1440p': 1440, + '1080p': 1080, + '1080i': 1080, + '720p': 720, + '576p': 576, + '576i': 576, + '480p': 480, + '480i': 480, + } + + # Get expected vertical resolution + expected_resolution_name = meta.get('resolution', None) + expected_vertical_resolution = resolution_map.get(expected_resolution_name, None) + + # If no valid resolution is found, skip processing + if expected_vertical_resolution is None: + console.print("[red]Meta resolution is invalid or missing. Skipping all images.[/red]") + return [] + + # Function to check each image's URL, host, and log resolution + async def check_and_collect(image_dict): + img_url = image_dict.get('raw_url') + if not img_url: + return None + + if "ptpimg.me" in img_url and img_url.startswith("http://"): + img_url = img_url.replace("http://", "https://") + image_dict['raw_url'] = img_url + image_dict['web_url'] = img_url + + # Verify the image link + if await self.check_image_link(img_url): + # Check if the image is hosted on an approved image host + if not any(host in img_url for host in approved_image_hosts): + nonlocal invalid_host_found + invalid_host_found = True # Mark that we found an invalid host + + async with aiohttp.ClientSession() as session: + async with session.get(img_url) as response: + if response.status == 200: + image_content = await response.read() + + try: + image = Image.open(BytesIO(image_content)) + vertical_resolution = image.height + lower_bound = expected_vertical_resolution * 0.70 # 30% below + if meta['is_disc'] == "DVD": + upper_bound = expected_vertical_resolution * 1.30 + else: + upper_bound = expected_vertical_resolution * 1.00 + + if not (lower_bound <= vertical_resolution <= upper_bound): + console.print( + f"[red]Image {img_url} resolution ({vertical_resolution}p) " + f"is outside the allowed range ({int(lower_bound)}-{int(upper_bound)}p). Skipping.[/red]" + ) + return None + + meta['image_sizes'][img_url] = len(image_content) + console.print( + f"Valid image {img_url} with resolution {image.width}x{image.height} " + f"and size {len(image_content) / 1024:.2f} KiB" + ) + except Exception as e: + console.print(f"[red]Failed to process image {img_url}: {e}") + return None + else: + console.print(f"[red]Failed to fetch image {img_url}. Skipping.") + + return image_dict + else: + return None + + # Run image verification concurrently + tasks = [check_and_collect(image_dict) for image_dict in imagelist] + results = await asyncio.gather(*tasks) + + # Collect valid images + valid_images = [image for image in results if image is not None] + + # Convert default_trackers string into a list + default_trackers = self.config['TRACKERS'].get('default_trackers', '') + trackers_list = [tracker.strip() for tracker in default_trackers.split(',')] + + # Ensure meta['trackers'] is a list + if meta.get('trackers') is not None: + if isinstance(meta.get('trackers', ''), str): + meta['trackers'] = [tracker.strip() for tracker in meta['trackers'].split(',')] + if 'MTV' in meta.get('trackers', []): + if invalid_host_found: + console.print( + "[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]" + ) + # Issue warning if any valid image is on an unapproved host and MTV is in the trackers list + elif 'MTV' in trackers_list: + if invalid_host_found: + console.print("[red]Warning: Some images are not hosted on an MTV-approved image host. MTV will need new images later.[/red]") + + return valid_images + + async def check_image_link(self, url): + async with aiohttp.ClientSession() as session: + try: + async with session.get(url) as response: + if response.status == 200: + content_type = response.headers.get('Content-Type', '').lower() + if 'image' in content_type: + # Attempt to load the image + image_data = await response.read() + try: + image = Image.open(io.BytesIO(image_data)) + image.verify() # This will check if the image is broken + console.print(f"[green]Image verified successfully: {url}[/green]") + return True + except (IOError, SyntaxError) as e: # noqa #F841 + console.print(f"[red]Image verification failed (corrupt image): {url}[/red]") + return False + else: + console.print(f"[red]Content type is not an image: {url}[/red]") + return False + else: + console.print(f"[red]Failed to retrieve image: {url} (status code: {response.status})[/red]") + return False + except Exception as e: + console.print(f"[red]Exception occurred while checking image: {url} - {str(e)}[/red]") + return False + + async def update_meta_with_unit3d_data(self, meta, tracker_data, tracker_name): + # Unpack the expected 9 elements, ignoring any additional ones + tmdb, imdb, tvdb, mal, desc, category, infohash, imagelist, filename, *rest = tracker_data + + if tmdb not in [None, '0']: + meta['tmdb_manual'] = tmdb + if imdb not in [None, '0']: + meta['imdb'] = str(imdb).zfill(7) + if tvdb not in [None, '0']: + meta['tvdb_id'] = tvdb + if mal not in [None, '0']: + meta['mal'] = mal + if desc not in [None, '0', '']: + meta['description'] = desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((desc or "") + "\n") + if category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if category.upper() == 'TV SHOW' else category.upper() + + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if imagelist: # Ensure imagelist is not empty before setting + valid_images = await self.check_images_concurrently(imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + if meta.get('image_list'): # Double-check if image_list is set before handling it + if not (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')) or meta['unattended']: + await self.handle_image_list(meta, tracker_name) + + if filename: + meta[f'{tracker_name.lower()}_filename'] = filename + + console.print(f"[green]{tracker_name} data successfully updated in meta[/green]") + + async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): + tracker_key = tracker_name.lower() + manual_key = f"{tracker_key}_manual" + found_match = False + + if tracker_name in ["BLU", "AITHER", "LST", "OE", "TIK"]: + if meta.get(tracker_key) is not None: + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + tracker_data = await COMMON(self.config).unit3d_torrent_info( + tracker_name, + tracker_instance.torrent_url, + tracker_instance.search_url, + meta, + id=meta[tracker_key] + ) + else: + console.print(f"[yellow]No ID found in meta for {tracker_name}, searching by file name[/yellow]") + tracker_data = await COMMON(self.config).unit3d_torrent_info( + tracker_name, + tracker_instance.torrent_url, + tracker_instance.search_url, + meta, + file_name=search_term + ) + + if any(item not in [None, '0'] for item in tracker_data[:3]): # Check for valid tmdb, imdb, or tvdb + console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") + await self.update_meta_with_unit3d_data(meta, tracker_data, tracker_name) + found_match = True + else: + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + found_match = False + + elif tracker_name == "PTP": + imdb_id = None + if meta.get('ptp') is None: + imdb_id, ptp_torrent_id, ptp_torrent_hash = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder, meta) + if ptp_torrent_id: + meta['imdb'] = str(imdb_id).zfill(7) if imdb_id else None + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + + if not meta['unattended']: + if await self.prompt_user_for_confirmation("Do you want to use this ID data from PTP?"): + found_match = True + meta['ptp'] = ptp_torrent_id + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((ptp_desc or "") + "\n") + + if not meta.get('image_list'): + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + await self.handle_image_list(meta, tracker_name) + + else: + found_match = False + meta['imdb'] = None + + else: + found_match = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(ptp_torrent_id, meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write((ptp_desc or "") + "\n") + meta['saved_description'] = True + + if not meta.get('image_list'): + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + else: + console.print("[yellow]Skipping PTP as no match found[/yellow]") + found_match = False + + else: + ptp_torrent_id = meta['ptp'] + console.print("[cyan]Using specified PTP ID to get IMDb ID[/cyan]") + imdb_id, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + if imdb_id: + meta['imdb'] = str(imdb_id).zfill(7) + console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") + found_match = True + meta['skipit'] = True + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta, meta.get('is_disc', False)) + meta['description'] = ptp_desc + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(ptp_desc + "\n") + meta['saved_description'] = True + if not meta.get('image_list'): # Only handle images if image_list is not already populated + valid_images = await self.check_images_concurrently(ptp_imagelist, meta) + if valid_images: + meta['image_list'] = valid_images + console.print("[green]PTP images added to metadata.[/green]") + else: + console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") + found_match = False + + elif tracker_name == "HDB": + if meta.get('hdb') is not None: + meta[manual_key] = meta[tracker_key] + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + + # Use get_info_from_torrent_id function if ID is found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + found_match = True + + # Skip user confirmation if searching by ID + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + else: + console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + + # Use search_filename function if ID is not found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder, meta) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + if tracker_id: + meta[tracker_key] = tracker_id + found_match = True + + if found_match: + if imdb or tvdb_id or hdb_name: + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + if await self.prompt_user_for_confirmation(f"Do you want to use the ID's found on {tracker_name}?"): + console.print(f"[green]{tracker_name} data retained.[/green]") + else: + console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") + meta[tracker_key] = None + meta['tvdb_id'] = None + meta['hdb_name'] = None + found_match = False + else: + found_match = False + + return meta, found_match + + async def handle_image_list(self, meta, tracker_name): + if meta.get('image_list'): + console.print(f"[cyan]Found the following images from {tracker_name}:") + for img in meta['image_list']: + console.print(f"[blue]{img}[/blue]") + + if meta['unattended']: + keep_images = True + else: + keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + if not keep_images: + meta['image_list'] = [] + meta['image_sizes'] = {} + console.print(f"[yellow]Images discarded from {tracker_name}.") + else: + console.print(f"[green]Images retained from {tracker_name}.") async def gather_prep(self, meta, mode): + meta['cutoff'] = int(self.config['DEFAULT'].get('cutoff_screens', 3)) + task_limit = self.config['DEFAULT'].get('task_limit', "0") + if int(task_limit) > 0: + meta['task_limit'] = task_limit + meta['tone_map'] = self.config['DEFAULT'].get('tone_map', False) + tone_task_limit = self.config['DEFAULT'].get('tone_task_limit', "0") + if int(tone_task_limit) > 0: + meta['tone_task_limit'] = tone_task_limit meta['mode'] = mode - base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] + meta['saved_description'] = False - if meta.get('uuid', None) == None: - folder_id = os.path.basename(meta['path']) - meta['uuid'] = folder_id + folder_id = os.path.basename(meta['path']) + if meta.get('uuid', None) is None: + meta['uuid'] = folder_id if not os.path.exists(f"{base_dir}/tmp/{meta['uuid']}"): Path(f"{base_dir}/tmp/{meta['uuid']}").mkdir(parents=True, exist_ok=True) - + if meta['debug']: console.print(f"[cyan]ID: {meta['uuid']}") - meta['is_disc'], videoloc, bdinfo, meta['discs'] = await self.get_disc(meta) - - # If BD: + + # Debugging information + # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") + if meta['is_disc'] == "BDMV": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) + meta['filelist'] = [] # No filelist for discs, use path + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' try: - guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['title'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] except Exception: meta['search_year'] = "" except Exception: - guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['label'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] except Exception: meta['search_year'] = "" - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) mi = None - mi_dump = None - #IF DVD + elif meta['is_disc'] == "DVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-',' ') - # filename = guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name))['title'] - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', ' ') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - - #NTSC/PAL - meta['dvd_size'] = await self.get_dvd_size(meta['discs']) + + meta['dvd_size'] = await self.get_dvd_size(meta['discs'], meta.get('manual_dvds')) meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) + elif meta['is_disc'] == "HDDVD": - video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) + video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta, meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-','') - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', '') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(meta['discs'][0]['path']) videopath = meta['discs'][0]['largest_evo'] try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) - #If NOT BD/DVD/HDDVD + else: - videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) - video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) - guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub("[^0-9a-zA-Z\[\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None + search_file_folder = 'file' + video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta, meta.get('imdb', None)) + guess_name = ntpath.basename(video).replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] except Exception: meta['search_year'] = "" - - if meta.get('edit', False) == False: + + if not meta.get('edit', False): mi = self.exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) - - - if " AKA " in filename.replace('.',' '): + if " AKA " in filename.replace('.', ' '): filename = filename.split('AKA')[0] meta['filename'] = filename meta['bdinfo'] = bdinfo - - + # Debugging information after population + # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") + if 'description' not in meta: + meta['description'] = "" - # Reuse information from other trackers - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - ptp = PTP(config=self.config) - if meta.get('ptp', None) != None: - meta['ptp_manual'] = meta['ptp'] - meta['imdb'], meta['ext_torrenthash'] = await ptp.get_imdb_from_torrent_id(meta['ptp']) - else: - if meta['is_disc'] in [None, ""]: - ptp_search_term = os.path.basename(meta['filelist'][0]) - search_file_folder = 'file' - else: - search_file_folder = 'folder' - ptp_search_term = os.path.basename(meta['path']) - ptp_imdb, ptp_id, meta['ext_torrenthash'] = await ptp.get_ptp_id_imdb(ptp_search_term, search_file_folder) - if ptp_imdb != None: - meta['imdb'] = ptp_imdb - if ptp_id != None: - meta['ptp'] = ptp_id - - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - hdb = HDB(config=self.config) - if meta.get('ptp', None) == None or meta.get('hdb', None) != None: - hdb_imdb = hdb_tvdb = hdb_id = None - hdb_id = meta.get('hdb') - if hdb_id != None: - meta['hdb_manual'] = hdb_id - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'] = await hdb.get_info_from_torrent_id(hdb_id) - else: - if meta['is_disc'] in [None, ""]: - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'], hdb_id = await hdb.search_filename(meta['filelist']) - else: - # Somehow search for disc - pass - if hdb_imdb != None: - meta['imdb'] = str(hdb_imdb) - if hdb_tvdb != None: - meta['tvdb_id'] = str(hdb_tvdb) - if hdb_id != None: - meta['hdb'] = hdb_id - - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - if meta.get('blu', None) != None: - meta['blu_manual'] = meta['blu'] - blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist = await COMMON(self.config).unit3d_torrent_info("BLU", blu.torrent_url, meta['blu']) - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb) - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - if blu_category.upper() == 'TV SHOW': - meta['category'] = 'TV' - else: - meta['category'] = blu_category.upper() - if meta.get('image_list', []) == []: - meta['image_list'] = blu_imagelist - else: - # Seach automatically - pass - - + description_text = meta.get('description', '') + if description_text is None: + description_text = "" + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(description_text) + + client = Clients(config=config) + if meta.get('infohash') is not None: + meta = await client.get_ptp_from_hash(meta) + + if not meta.get('image_list'): + # Reuse information from trackers with fallback + found_match = False + + if search_term: + # Check if a specific tracker is already set in meta + tracker_keys = { + 'ptp': 'PTP', + 'hdb': 'HDB', + 'blu': 'BLU', + 'aither': 'AITHER', + 'lst': 'LST', + 'oe': 'OE', + 'tik': 'TIK', + } + specific_tracker = next((tracker_keys[key] for key in tracker_keys if meta.get(key)), None) + async def process_tracker(tracker_name, meta): + nonlocal found_match + tracker_class = globals().get(tracker_name) + if tracker_class is None: + print(f"Tracker class for {tracker_name} not found.") + return meta + tracker_instance = tracker_class(config=self.config) + try: + updated_meta, match = await self.update_metadata_from_tracker( + tracker_name, tracker_instance, meta, search_term, search_file_folder + ) + if match: + found_match = True + console.print(f"[green]Match found on tracker: {tracker_name}[/green]") + return updated_meta + except aiohttp.ClientSSLError: + print(f"{tracker_name} tracker request failed due to SSL error.") + except requests.exceptions.ConnectionError as conn_err: + print(f"{tracker_name} tracker request failed due to connection error: {conn_err}") + return meta - # Take Screenshots - if meta['is_disc'] == "BDMV": - if meta.get('edit', False) == False: - if meta.get('vapoursynth', False) == True: - use_vs = True + # If a specific tracker is found, process only that tracker + if specific_tracker: + meta = await process_tracker(specific_tracker, meta) else: - use_vs = False - try: - ds = multiprocessing.Process(target=self.disc_screenshots, args=(filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) - ds.start() - while ds.is_alive() == True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() - elif meta['is_disc'] == "DVD": - if meta.get('edit', False) == False: - try: - ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None)) - ds.start() - while ds.is_alive() == True: - await asyncio.sleep(1) - except KeyboardInterrupt: - ds.terminate() - else: - if meta.get('edit', False) == False: - try: - s = multiprocessing.Process(target=self.screenshots, args=(videopath, filename, meta['uuid'], base_dir, meta)) - s.start() - while s.is_alive() == True: - await asyncio.sleep(3) - except KeyboardInterrupt: - s.terminate() - + # Process all trackers with API = true if no specific tracker is set in meta + tracker_order = ["PTP", "BLU", "AITHER", "LST", "OE", "TIK", "HDB"] + for tracker_name in tracker_order: + if not found_match: # Stop checking once a match is found + tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('useAPI', 'false')).lower() == "true": + meta = await process_tracker(tracker_name, meta) + if not found_match: + console.print("[yellow]No matches found on any trackers.[/yellow]") + else: + console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") + else: + console.print("Skipping existing search as meta already populated") + console.print("[yellow]Building meta data.....") + if meta['debug']: + meta_start_time = time.time() + if meta.get('manual_language'): + meta['original_langauge'] = meta.get('manual_language').lower() meta['tmdb'] = meta.get('tmdb_manual', None) - if meta.get('type', None) == None: - meta['type'] = self.get_type(video, meta['scene'], meta['is_disc']) - if meta.get('category', None) == None: + meta['type'] = self.get_type(video, meta['scene'], meta['is_disc'], meta) + if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: meta['category'] = meta['category'].upper() - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: + meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) - elif meta.get('imdb', None) != None and meta.get('tmdb_manual', None) == None: + elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') meta = await self.get_tmdb_from_imdb(meta, filename) else: meta['tmdb_manual'] = meta.get('tmdb', None) - # If no tmdb, use imdb for meta if int(meta['tmdb']) == 0: meta = await self.imdb_other_meta(meta) else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id','0'), meta.get('tvdb_id', 0)) + if meta['category'] == "TV": + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0), meta) + else: + meta.setdefault('tvmaze_id', '0') # If no imdb, search for it - if meta.get('imdb_id', None) == None: + if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) - if meta.get('imdb_info', None) == None and int(meta['imdb_id']) != 0: - meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) - if meta.get('tag', None) == None: + if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: + meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) + if meta.get('tag', None) is None: meta['tag'] = self.get_tag(video, meta) else: if not meta['tag'].startswith('-') and meta['tag'] != "": meta['tag'] = f"-{meta['tag']}" - meta = await self.get_season_episode(video, meta) + if meta['category'] == "TV": + meta = await self.get_season_episode(video, meta) meta = await self.tag_override(meta) - + if meta.get('tag') == "-SubsPlease": # SubsPlease-specific + tracks = meta.get('mediainfo').get('media', {}).get('track', []) # Get all tracks + bitrate = tracks[1].get('BitRate', '') if len(tracks) > 1 else '' # Get video bitrate if available + bitrate_oldMediaInfo = tracks[0].get('OverallBitRate', '') if len(tracks) > 0 else '' # For old MediaInfo (< 24.x where video bitrate is empty, use 'OverallBitRate' instead) + meta['episode_title'] = "" + if (bitrate.isdigit() and int(bitrate) >= 8000000) or (bitrate_oldMediaInfo.isdigit() and int(bitrate_oldMediaInfo) >= 8000000): + meta['service'] = "CR" + elif (bitrate.isdigit() or bitrate_oldMediaInfo.isdigit()): # Only assign if at least one bitrate is present, otherwise leave it to user + meta['service'] = "HIDI" meta['video'] = video meta['audio'], meta['channels'], meta['has_commentary'] = self.get_audio_v2(mi, meta, bdinfo) if meta['tag'][1:].startswith(meta['channels']): @@ -345,39 +683,231 @@ async def gather_prep(self, meta, mode): if meta.get('no_tag', False): meta['tag'] = "" meta['3D'] = self.is_3d(mi, bdinfo) - meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta) + if meta.get('manual_source', None): + meta['source'] = meta['manual_source'] + _, meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) + else: + meta['source'], meta['type'] = self.get_source(meta['type'], video, meta['path'], meta['is_disc'], meta, folder_id, base_dir) if meta.get('service', None) in (None, ''): meta['service'], meta['service_longname'] = self.get_service(video, meta.get('tag', ''), meta['audio'], meta['filename']) + elif meta.get('service'): + services = self.get_service(get_services_only=True) + meta['service_longname'] = max((k for k, v in services.items() if v == meta['service']), key=len, default=meta['service']) meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) meta['hdr'] = self.get_hdr(mi, bdinfo) meta['distributor'] = self.get_distributor(meta['distributor']) - if meta.get('is_disc', None) == "BDMV": #Blu-ray Specific + if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific meta['region'] = self.get_region(bdinfo, meta.get('region', None)) meta['video_codec'] = self.get_video_codec(bdinfo) else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - - meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) - if "REPACK" in meta.get('edition', ""): - meta['repack'] = re.search("REPACK[\d]?", meta['edition'])[0] - meta['edition'] = re.sub("REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') - - - - #WORK ON THIS + if meta.get('no_edition') is False: + meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) + if "REPACK" in meta.get('edition', ""): + meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] + meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') + else: + meta['edition'] = "" + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) + if meta['debug']: + meta_finish_time = time.time() + console.print(f"Metadata processed in {meta_finish_time - meta_start_time:.2f} seconds") + parser = Args(config) + helper = UploadHelper() + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + if "saved_trackers" not in meta: + meta['trackers'] = enabled_trackers + else: + meta['trackers'] = meta['saved_trackers'] + confirm = helper.get_confirmation(meta) + while confirm is False: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + meta['saved_trackers'] = meta['trackers'] + editargs = cli_ui.ask_string("Input args that need correction e.g. (--tag NTb --category tv --tmdb 12345)") + editargs = (meta['path'],) + tuple(editargs.split()) + if meta.get('debug', False): + editargs += ("--debug",) + meta, help, before_args = parser.parse(editargs, meta) + meta['edit'] = True + meta = await self.gather_prep(meta=meta, mode='cli') + meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await self.get_name(meta) + confirm = helper.get_confirmation(meta) + + tracker_status = {} + successful_trackers = 0 + + for tracker_name in meta['trackers']: + disctype = meta.get('disctype', None) + tracker_name = tracker_name.replace(" ", "").upper().strip() + + if meta['name'].endswith('DUPE?'): + meta['name'] = meta['name'].replace(' DUPE?', '') + + if tracker_name in tracker_class_map: + tracker_class = tracker_class_map[tracker_name](config=config) + tracker_status[tracker_name] = {'banned': False, 'skipped': False, 'dupe': False, 'upload': False} + + if tracker_name in {"THR", "PTP"}: + if meta.get('imdb_id', '0') == '0': + imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") + meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) + if tracker_name == "PTP": + console.print("[yellow]Searching for Group ID") + ptp = PTP(config=config) + groupID = await ptp.get_group_by_imdb(meta['imdb_id']) + if groupID is None: + console.print("[yellow]No Existing Group found") + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") + meta['youtube'] = youtube + meta['ptp_groupID'] = groupID + + if tracker_name == "THR": + youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") + meta['youtube'] = youtube + + if tracker_setup.check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): + console.print(f"[red]Tracker '{tracker_name}' is banned. Skipping.[/red]") + tracker_status[tracker_name]['banned'] = True + continue + + if tracker_name not in {"THR", "PTP", "TL"}: + dupes = await tracker_class.search_existing(meta, disctype) + elif tracker_name == "PTP": + dupes = await ptp.search_existing(groupID, meta, disctype) + if 'skipping' not in meta or meta['skipping'] is None: + dupes = await common.filter_dupes(dupes, meta) + meta, is_dupe = helper.dupe_check(dupes, meta, tracker_name) + if is_dupe: + console.print(f"[red]Skipping upload on {tracker_name}[/red]") + print() + tracker_status[tracker_name]['dupe'] = True + elif meta['skipping']: + tracker_status[tracker_name]['skipped'] = True + if tracker_name == "MTV": + if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: + tracker_config = self.config['TRACKERS'].get(tracker_name, {}) + if str(tracker_config.get('prefer_mtv_torrent', 'false')).lower() == "true": + meta['prefer_small_pieces'] = True + else: + meta['prefer_small_pieces'] = False + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "true": + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + check_torrent = await client.find_existing_torrent(meta) + if check_torrent: + console.print(f"[yellow]Existing torrent found on {check_torrent}[/yellow]") + self.create_base_from_existing_torrent(check_torrent, meta['base_dir'], meta['uuid']) + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + console.print("[yellow]No existing torrent found with piece size lesser than 8MB[/yellow]") + tracker_status[tracker_name]['skipped'] = True + elif os.path.exists(torrent_path): + torrent = Torrent.read(torrent_path) + if torrent.piece_size > 8388608: + console.print("[yellow]Existing torrent found with piece size greater than 8MB[/yellow]") + tracker_status[tracker_name]['skipped'] = True + if meta.get('skipping') is None and not is_dupe and tracker_name == "PTP": + if meta.get('imdb_info', {}) == {}: + meta['imdb_info'] = self.get_imdb_info_api(meta['imdb_id'], meta) + if not meta['debug']: + if not tracker_status[tracker_name]['banned'] and not tracker_status[tracker_name]['skipped'] and not tracker_status[tracker_name]['dupe']: + console.print(f"[bold yellow]Tracker '{tracker_name}' passed all checks.") + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + edit_choice = input("Enter 'y' to upload, or press enter to skip uploading:") + if edit_choice.lower() == 'y': + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 + else: + tracker_status[tracker_name]['upload'] = False + else: + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 + else: + tracker_status[tracker_name]['upload'] = True + successful_trackers += 1 + meta['skipping'] = None + else: + if tracker_name == "MANUAL": + successful_trackers += 1 + + meta['tracker_status'] = tracker_status + + if meta['debug']: + console.print("\n[bold]Tracker Processing Summary:[/bold]") + for t_name, status in tracker_status.items(): + banned_status = 'Yes' if status['banned'] else 'No' + skipped_status = 'Yes' if status['skipped'] else 'No' + dupe_status = 'Yes' if status['dupe'] else 'No' + upload_status = 'Yes' if status['upload'] else 'No' + if meta['debug']: + console.print(f"Tracker: {t_name} | Banned: {banned_status} | Skipped: {skipped_status} | Dupe: {dupe_status} | [yellow]Upload:[/yellow] {upload_status}") + if meta['debug']: + console.print(f"\n[bold]Trackers Passed all Checks:[/bold] {successful_trackers}") + + meta['skip_uploading'] = int(self.config['DEFAULT'].get('tracker_pass_checks', 1)) + if not meta['debug']: + if successful_trackers < meta['skip_uploading']: + console.print(f"[red]Not enough successful trackers ({successful_trackers}/{meta['skip_uploading']}). EXITING........[/red]") + return + + meta['we_are_uploading'] = True + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + + if 'manual_frames' not in meta: + meta['manual_frames'] = {} + manual_frames = meta['manual_frames'] + # Take Screenshots + if meta['is_disc'] == "BDMV": + if meta.get('edit', False) is False: + if meta.get('vapoursynth', False) is True: + use_vs = True + else: + use_vs = False + try: + ds = multiprocessing.Process(target=self.disc_screenshots, args=(meta, filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + except KeyboardInterrupt: + ds.terminate() + elif meta['is_disc'] == "DVD": + if meta.get('edit', False) is False: + try: + ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None, None)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + except KeyboardInterrupt: + ds.terminate() + else: + if meta.get('edit', False) is False: + try: + s = multiprocessing.Process( + target=self.screenshots, + args=(videopath, filename, meta['uuid'], base_dir, meta), # Positional arguments + kwargs={'manual_frames': manual_frames} # Keyword argument + ) + s.start() + while s.is_alive() is True: + await asyncio.sleep(3) + except KeyboardInterrupt: + s.terminate() + + # WORK ON THIS meta.get('stream', False) meta['stream'] = self.stream_optimized(meta['stream']) meta.get('anon', False) meta['anon'] = self.is_anon(meta['anon']) - - - - meta = await self.gen_desc(meta) + if meta['saved_description'] is False: + meta = await self.gen_desc(meta) return meta - - - """ Determine if disc and if so, get bdinfo """ @@ -385,45 +915,45 @@ async def get_disc(self, meta): is_disc = None videoloc = meta['path'] bdinfo = None - bd_summary = None + bd_summary = None # noqa: F841 discs = [] parse = DiscParse() for path, directories, files in os. walk(meta['path']): for each in directories: - if each.upper() == "BDMV": #BDMVs + if each.upper() == "BDMV": # BDMVs is_disc = "BDMV" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'BDMV', - 'summary' : "", - 'bdinfo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'BDMV', + 'summary': "", + 'bdinfo': "" } discs.append(disc) - elif each == "VIDEO_TS": #DVDs + elif each == "VIDEO_TS": # DVDs is_disc = "DVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'DVD', - 'vob_mi' : '', - 'ifo_mi' : '', - 'main_set' : [], - 'size' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'DVD', + 'vob_mi': '', + 'ifo_mi': '', + 'main_set': [], + 'size': "" } discs.append(disc) elif each == "HVDVD_TS": is_disc = "HDDVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'HDDVD', - 'evo_mi' : '', - 'largest_evo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'HDDVD', + 'evo_mi': '', + 'largest_evo': "" } discs.append(disc) if is_disc == "BDMV": - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: discs, bdinfo = await parse.get_bdinfo(discs, meta['uuid'], meta['base_dir'], meta.get('discs', [])) else: discs, bdinfo = await parse.get_bdinfo(meta['discs'], meta['uuid'], meta['base_dir'], meta['discs']) @@ -432,6 +962,9 @@ async def get_disc(self, meta): export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') export.write(discs[0]['ifo_mi']) export.close() + export_clean = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') + export_clean.write(discs[0]['ifo_mi']) + export_clean.close() elif is_disc == "HDDVD": discs = await parse.get_hddvd_info(discs) export = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') @@ -440,9 +973,6 @@ async def get_disc(self, meta): discs = sorted(discs, key=lambda d: d['name']) return is_disc, videoloc, bdinfo, discs - - - """ Get video files @@ -456,7 +986,7 @@ def get_video(self, videoloc, mode): if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): filelist.append(os.path.abspath(f"{videoloc}{os.sep}{file}")) try: - video = sorted(filelist)[0] + video = sorted(filelist)[0] except IndexError: console.print("[bold red]No Video files found") if mode == 'cli': @@ -467,42 +997,202 @@ def get_video(self, videoloc, mode): filelist = sorted(filelist) return video, filelist - - - - - """ Get and parse mediainfo """ def exportInfo(self, video, isdir, folder_id, base_dir, export_text): - if os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") == False and export_text != False: + def filter_mediainfo(data): + filtered = { + "creatingLibrary": data.get("creatingLibrary"), + "media": { + "@ref": data["media"]["@ref"], + "track": [] + } + } + + for track in data["media"]["track"]: + if track["@type"] == "General": + filtered["media"]["track"].append({ + "@type": track["@type"], + "UniqueID": track.get("UniqueID", {}), + "VideoCount": track.get("VideoCount", {}), + "AudioCount": track.get("AudioCount", {}), + "TextCount": track.get("TextCount", {}), + "MenuCount": track.get("MenuCount", {}), + "FileExtension": track.get("FileExtension", {}), + "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "FileSize": track.get("FileSize", {}), + "Duration": track.get("Duration", {}), + "OverallBitRate": track.get("OverallBitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "StreamSize": track.get("StreamSize", {}), + "IsStreamable": track.get("IsStreamable", {}), + "File_Created_Date": track.get("File_Created_Date", {}), + "File_Created_Date_Local": track.get("File_Created_Date_Local", {}), + "File_Modified_Date": track.get("File_Modified_Date", {}), + "File_Modified_Date_Local": track.get("File_Modified_Date_Local", {}), + "Encoded_Application": track.get("Encoded_Application", {}), + "Encoded_Library": track.get("Encoded_Library", {}), + }) + elif track["@type"] == "Video": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Level": track.get("Format_Level", {}), + "Format_Tier": track.get("Format_Tier", {}), + "HDR_Format": track.get("HDR_Format", {}), + "HDR_Format_Version": track.get("HDR_Format_Version", {}), + "HDR_Format_String": track.get("HDR_Format_String", {}), + "HDR_Format_Profile": track.get("HDR_Format_Profile", {}), + "HDR_Format_Level": track.get("HDR_Format_Level", {}), + "HDR_Format_Settings": track.get("HDR_Format_Settings", {}), + "HDR_Format_Compression": track.get("HDR_Format_Compression", {}), + "HDR_Format_Compatibility": track.get("HDR_Format_Compatibility", {}), + "CodecID": track.get("CodecID", {}), + "CodecID_Hint": track.get("CodecID_Hint", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "Width": track.get("Width", {}), + "Height": track.get("Height", {}), + "Stored_Height": track.get("Stored_Height", {}), + "Sampled_Width": track.get("Sampled_Width", {}), + "Sampled_Height": track.get("Sampled_Height", {}), + "PixelAspectRatio": track.get("PixelAspectRatio", {}), + "DisplayAspectRatio": track.get("DisplayAspectRatio", {}), + "FrameRate_Mode": track.get("FrameRate_Mode", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameRate_Num": track.get("FrameRate_Num", {}), + "FrameRate_Den": track.get("FrameRate_Den", {}), + "FrameCount": track.get("FrameCount", {}), + "Standard": track.get("Standard", {}), + "ColorSpace": track.get("ColorSpace", {}), + "ChromaSubsampling": track.get("ChromaSubsampling", {}), + "ChromaSubsampling_Position": track.get("ChromaSubsampling_Position", {}), + "BitDepth": track.get("BitDepth", {}), + "ScanType": track.get("ScanType", {}), + "ScanOrder": track.get("ScanOrder", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "StreamSize": track.get("StreamSize", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "colour_description_present": track.get("colour_description_present", {}), + "colour_description_present_Source": track.get("colour_description_present_Source", {}), + "colour_range": track.get("colour_range", {}), + "colour_range_Source": track.get("colour_range_Source", {}), + "colour_primaries": track.get("colour_primaries", {}), + "colour_primaries_Source": track.get("colour_primaries_Source", {}), + "transfer_characteristics": track.get("transfer_characteristics", {}), + "transfer_characteristics_Source": track.get("transfer_characteristics_Source", {}), + "transfer_characteristics_Original": track.get("transfer_characteristics_Original", {}), + "matrix_coefficients": track.get("matrix_coefficients", {}), + "matrix_coefficients_Source": track.get("matrix_coefficients_Source", {}), + "MasteringDisplay_ColorPrimaries": track.get("MasteringDisplay_ColorPrimaries", {}), + "MasteringDisplay_ColorPrimaries_Source": track.get("MasteringDisplay_ColorPrimaries_Source", {}), + "MasteringDisplay_Luminance": track.get("MasteringDisplay_Luminance", {}), + "MasteringDisplay_Luminance_Source": track.get("MasteringDisplay_Luminance_Source", {}), + "MaxCLL": track.get("MaxCLL", {}), + "MaxCLL_Source": track.get("MaxCLL_Source", {}), + "MaxFALL": track.get("MaxFALL", {}), + "MaxFALL_Source": track.get("MaxFALL_Source", {}), + "Encoded_Library_Settings": track.get("Encoded_Library_Settings", {}), + }) + elif track["@type"] == "Audio": + filtered["media"]["track"].append({ + "@type": track["@type"], + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "Format_Version": track.get("Format_Version", {}), + "Format_Profile": track.get("Format_Profile", {}), + "Format_Settings": track.get("Format_Settings", {}), + "Format_Commercial_IfAny": track.get("Format_Commercial_IfAny", {}), + "Format_Settings_Endianness": track.get("Format_Settings_Endianness", {}), + "Format_AdditionalFeatures": track.get("Format_AdditionalFeatures", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate_Mode": track.get("BitRate_Mode", {}), + "BitRate": track.get("BitRate", {}), + "Channels": track.get("Channels", {}), + "ChannelPositions": track.get("ChannelPositions", {}), + "ChannelLayout": track.get("ChannelLayout", {}), + "Channels_Original": track.get("Channels_Original", {}), + "ChannelLayout_Original": track.get("ChannelLayout_Original", {}), + "SamplesPerFrame": track.get("SamplesPerFrame", {}), + "SamplingRate": track.get("SamplingRate", {}), + "SamplingCount": track.get("SamplingCount", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "Compression_Mode": track.get("Compression_Mode", {}), + "Delay": track.get("Delay", {}), + "Delay_Source": track.get("Delay_Source", {}), + "Video_Delay": track.get("Video_Delay", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "ServiceKind": track.get("ServiceKind", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + "extra": track.get("extra", {}), + }) + elif track["@type"] == "Text": + filtered["media"]["track"].append({ + "@type": track["@type"], + "@typeorder": track.get("@typeorder", {}), + "StreamOrder": track.get("StreamOrder", {}), + "ID": track.get("ID", {}), + "UniqueID": track.get("UniqueID", {}), + "Format": track.get("Format", {}), + "CodecID": track.get("CodecID", {}), + "Duration": track.get("Duration", {}), + "BitRate": track.get("BitRate", {}), + "FrameRate": track.get("FrameRate", {}), + "FrameCount": track.get("FrameCount", {}), + "ElementCount": track.get("ElementCount", {}), + "StreamSize": track.get("StreamSize", {}), + "Title": track.get("Title", {}), + "Language": track.get("Language", {}), + "Default": track.get("Default", {}), + "Forced": track.get("Forced", {}), + }) + elif track["@type"] == "Menu": + filtered["media"]["track"].append({ + "@type": track["@type"], + "extra": track.get("extra", {}), + }) + return filtered + + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: console.print("[bold yellow]Exporting MediaInfo...") - #MediaInfo to text - if isdir == False: + if not isdir: os.chdir(os.path.dirname(video)) - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version' : '1'}) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt", 'w', newline="", encoding='utf-8') as export: export.write(media_info) - export.close() with open(f"{base_dir}/tmp/{folder_id}/MEDIAINFO_CLEANPATH.txt", 'w', newline="", encoding='utf-8') as export_cleanpath: export_cleanpath.write(media_info.replace(video, os.path.basename(video))) - export_cleanpath.close() console.print("[bold green]MediaInfo Exported.") - if os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt") == False: - #MediaInfo to JSON - media_info = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version' : '1'}) - export = open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') - export.write(media_info) - export.close() + if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MediaInfo.json.txt"): + media_info_json = MediaInfo.parse(video, output="JSON", mediainfo_options={'inform_version': '1'}) + media_info_dict = json.loads(media_info_json) + filtered_info = filter_mediainfo(media_info_dict) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'w', encoding='utf-8') as export: + json.dump(filtered_info, export, indent=4) + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) - - return mi - - + return mi """ Get Resolution @@ -514,20 +1204,27 @@ def get_resolution(self, guess, folder_id, base_dir): try: width = mi['media']['track'][1]['Width'] height = mi['media']['track'][1]['Height'] - except: + except Exception: width = 0 height = 0 framerate = mi['media']['track'][1].get('FrameRate', '') try: scan = mi['media']['track'][1]['ScanType'] - except: + except Exception: scan = "Progressive" if scan == "Progressive": scan = "p" + elif scan == "Interlaced": + scan = 'i' elif framerate == "25.000": scan = "p" else: - scan = "i" + # Fallback using regex on meta['uuid'] - mainly for HUNO fun and games. + match = re.search(r'\b(1080p|720p|2160p)\b', folder_id, re.IGNORECASE) + if match: + scan = "p" # Assume progressive based on common resolution markers + else: + scan = "i" # Default to interlaced if no indicators are found width_list = [3840, 2560, 1920, 1280, 1024, 854, 720, 15360, 7680, 0] height_list = [2160, 1440, 1080, 720, 576, 540, 480, 8640, 4320, 0] width = self.closest(width_list, int(width)) @@ -549,56 +1246,54 @@ def closest(self, lst, K): res = each break return res - + # return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))] def mi_resolution(self, res, guess, width, scan, height, actual_height): res_map = { - "3840x2160p" : "2160p", "2160p" : "2160p", - "2560x1440p" : "1440p", "1440p" : "1440p", - "1920x1080p" : "1080p", "1080p" : "1080p", - "1920x1080i" : "1080i", "1080i" : "1080i", - "1280x720p" : "720p", "720p" : "720p", - "1280x540p" : "720p", "1280x576p" : "720p", - "1024x576p" : "576p", "576p" : "576p", - "1024x576i" : "576i", "576i" : "576i", - "854x480p" : "480p", "480p" : "480p", - "854x480i" : "480i", "480i" : "480i", - "720x576p" : "576p", "576p" : "576p", - "720x576i" : "576i", "576i" : "576i", - "720x480p" : "480p", "480p" : "480p", - "720x480i" : "480i", "480i" : "480i", - "15360x8640p" : "8640p", "8640p" : "8640p", - "7680x4320p" : "4320p", "4320p" : "4320p", - "OTHER" : "OTHER"} + "3840x2160p": "2160p", "2160p": "2160p", + "2560x1440p": "1440p", "1440p": "1440p", + "1920x1080p": "1080p", "1080p": "1080p", + "1920x1080i": "1080i", "1080i": "1080i", + "1280x720p": "720p", "720p": "720p", + "1280x540p": "720p", "1280x576p": "720p", + "1024x576p": "576p", "576p": "576p", + "1024x576i": "576i", "576i": "576i", + "854x480p": "480p", "480p": "480p", + "854x480i": "480i", "480i": "480i", + "720x576p": "576p", "576p": "576p", + "720x576i": "576i", "576i": "576i", + "720x480p": "480p", "480p": "480p", + "720x480i": "480i", "480i": "480i", + "15360x8640p": "8640p", "8640p": "8640p", + "7680x4320p": "4320p", "4320p": "4320p", + "OTHER": "OTHER"} resolution = res_map.get(res, None) if actual_height == 540: resolution = "OTHER" - if resolution == None: - try: + if resolution is None: + try: resolution = guess['screen_size'] - except: + except Exception: width_map = { - '3840p' : '2160p', - '2560p' : '1550p', - '1920p' : '1080p', - '1920i' : '1080i', - '1280p' : '720p', - '1024p' : '576p', - '1024i' : '576i', - '854p' : '480p', - '854i' : '480i', - '720p' : '576p', - '720i' : '576i', - '15360p' : '4320p', - 'OTHERp' : 'OTHER' + '3840p': '2160p', + '2560p': '1550p', + '1920p': '1080p', + '1920i': '1080i', + '1280p': '720p', + '1024p': '576p', + '1024i': '576i', + '854p': '480p', + '854i': '480i', + '720p': '576p', + '720i': '576i', + '15360p': '4320p', + 'OTHERp': 'OTHER' } resolution = width_map.get(f"{width}{scan}", "OTHER") resolution = self.mi_resolution(resolution, guess, width, scan, height, actual_height) - + return resolution - - def is_sd(self, resolution): if resolution in ("480i", "480p", "576i", "576p", "540p"): @@ -610,47 +1305,94 @@ def is_sd(self, resolution): """ Is a scene release? """ - def is_scene(self, video, imdb=None): + def is_scene(self, video, meta, imdb=None): scene = False base = os.path.basename(video) base = os.path.splitext(base)[0] base = urllib.parse.quote(base) url = f"https://api.srrdb.com/v1/search/r:{base}" - try: - response = requests.get(url, timeout=30) - response = response.json() - if int(response.get('resultsCount', 0)) != 0: - video = f"{response['results'][0]['release']}.mkv" - scene = True - r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") - r = r.json() - if r['releases'] != [] and imdb == None: - imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb - console.print(f"[green]SRRDB: Matched to {response['results'][0]['release']}") - except Exception: - video = video - scene = False - console.print("[yellow]SRRDB: No match found, or request has timed out") - return video, scene, imdb - - + if 'scene' not in meta: + try: + response = requests.get(url, timeout=30) + response_json = response.json() + + if int(response_json.get('resultsCount', 0)) > 0: + first_result = response_json['results'][0] + meta['scene_name'] = first_result['release'] + video = f"{first_result['release']}.mkv" + scene = True + if scene and meta.get('isdir', False) and meta.get('queue') is not None: + meta['keep_folder'] = True + + # NFO Download Handling + if first_result.get("hasNFO") == "yes": + try: + release = first_result['release'] + release_lower = release.lower() + nfo_url = f"https://www.srrdb.com/download/file/{release}/{release_lower}.nfo" + + # Define path and create directory + save_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid']) + os.makedirs(save_path, exist_ok=True) + nfo_file_path = os.path.join(save_path, f"{release_lower}.nfo") + + # Download the NFO file + nfo_response = requests.get(nfo_url, timeout=30) + if nfo_response.status_code == 200: + with open(nfo_file_path, 'wb') as f: + f.write(nfo_response.content) + meta['nfo'] = True + meta['auto_nfo'] = True + console.print(f"[green]NFO downloaded to {nfo_file_path}") + else: + console.print("[yellow]NFO file not available for download.") + except Exception as e: + console.print("[yellow]Failed to download NFO file:", e) + # IMDb Handling + try: + r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") + r = r.json() + if r['releases'] != [] and imdb is None: + imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb + console.print(f"[green]SRRDB: Matched to {first_result['release']}") + except Exception as e: + console.print("[yellow]Failed to fetch IMDb information:", e) + else: + console.print("[yellow]SRRDB: No match found") + except Exception as e: + console.print("[yellow]SRRDB: No match found, or request has timed out", e) + return video, scene, imdb """ Generate Screenshots """ + def sanitize_filename(self, filename): + # Replace invalid characters like colons with an underscore + return re.sub(r'[<>:"/\\|?*]', '_', filename) + + def disc_screenshots(self, meta, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None, force_screenshots=False): + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] - def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): - if num_screens == None: + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) + return + + if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: return - #Get longest m2ts - length = 0 + + sanitized_filename = self.sanitize_filename(filename) + length = 0 + file = None + frame_rate = None for each in bdinfo['files']: int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) if int_length > length: @@ -658,96 +1400,247 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ for root, dirs, files in os.walk(bdinfo['path']): for name in files: if name.lower() == each['file'].lower(): - file = f"{root}/{name}" - - - if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "": - keyframe = 'nokey' + file = os.path.join(root, name) + + if 'video' in bdinfo and bdinfo['video']: + fps_string = bdinfo['video'][0].get('fps', None) + if fps_string: + try: + frame_rate = float(fps_string.split(' ')[0]) # Extract and convert to float + except ValueError: + console.print("[red]Error: Unable to parse frame rate from bdinfo['video'][0]['fps']") + + keyframe = 'nokey' if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "" else 'none' + + os.chdir(f"{base_dir}/tmp/{folder_id}") + existing_screens = glob.glob(f"{sanitized_filename}-*.png") + total_existing = len(existing_screens) + len(existing_images) + if not force_screenshots: + num_screens = max(0, self.screens - total_existing) else: - keyframe = 'none' + num_screens = num_screens - os.chdir(f"{base_dir}/tmp/{folder_id}") - i = len(glob.glob(f"{filename}-*.png")) - if i >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') + if num_screens == 0 and not force_screenshots: + console.print('[bold green]Reusing existing screenshots. No additional screenshots needed.') + return + + if meta['debug'] and not force_screenshots: + console.print(f"[bold yellow]Saving Screens... Total needed: {self.screens}, Existing: {total_existing}, To capture: {num_screens}") + + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + + capture_tasks = [] + capture_results = [] + if hdr_tonemap: + task_limit = int(meta.get('tone_task_limit')) else: - console.print("[bold yellow]Saving Screens...") - if use_vs == True: - from src.vs import vs_screengn - vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + task_limit = int(meta.get('task_limit', os.cpu_count())) + + if use_vs: + from src.vs import vs_screengn + vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + else: + if meta.get('ffdebug', False): + loglevel = 'verbose' else: - if bool(ffdebug) == True: - loglevel = 'verbose' - debug = False - else: - loglevel = 'quiet' - debug = True - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" + loglevel = 'quiet' + + ss_times = self.valid_ss_time([], num_screens + 1, length, frame_rate) + existing_indices = {int(p.split('-')[-1].split('.')[0]) for p in existing_screens} + capture_tasks = [ + ( + file, + ss_times[i], + os.path.abspath(f"{base_dir}/tmp/{folder_id}/{sanitized_filename}-{len(existing_indices) + i}.png"), + keyframe, + loglevel, + hdr_tonemap + ) + for i in range(num_screens + 1) + ] + + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + capture_results = list( + tqdm( + pool.imap_unordered(self.capture_disc_task, capture_tasks), + total=len(capture_tasks), + desc="Capturing Screenshots", + ascii=True, + dynamic_ncols=False + ) + ) + finally: + pool.close() + pool.join() + + if capture_results: + if len(capture_tasks) > num_screens: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) + optimized_results = [] + optimize_tasks = [(result, self.config) for result in capture_results if result and os.path.exists(result)] + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + optimized_results = list( + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images", + ascii=True, + dynamic_ncols=False + ) + ) + finally: + pool.close() + pool.join() + + valid_results = [] + remaining_retakes = [] + for image_path in optimized_results: + if "Error" in image_path: + console.print(f"[red]{image_path}") + continue + + retake = False + image_size = os.path.getsize(image_path) + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + time.sleep(1) + elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: + pass + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: + pass + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: + pass + elif not retake: + console.print("[red]Image too large for your image host, retaking.") + retake = True + time.sleep(1) + + if retake: + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) - ( - ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - time.sleep(1) - progress.advance(screen_task) - #remove smallest image - smallest = "" - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): - screensize = os.path.getsize(screens) - if screensize < smallestsize: - smallestsize = screensize - smallest = screens - os.remove(smallest) - - def dvd_screenshots(self, meta, disc_num, num_screens=None): - if num_screens == None: - num_screens = self.screens - if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): + os.remove(image_path) + random_time = random.uniform(0, length) + self.capture_disc_task((file, random_time, image_path, keyframe, loglevel, hdr_tonemap)) + self.optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") + else: + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) + else: + valid_results.append(image_path) + + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + for image_path in valid_results: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") + + def capture_disc_task(self, task): + file, ss_time, image_path, keyframe, loglevel, hdr_tonemap = task + try: + ff = ffmpeg.input(file, ss=ss_time, skip_frame=keyframe) + + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=8.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + + command = ( + ff + .output(image_path, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + ) + + command.run(capture_stdout=True, capture_stderr=True) + + return image_path + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + console.print(f"[red]FFmpeg error capturing screenshot: {error_output}[/red]") + return None + except Exception as e: + console.print(f"[red]Error capturing screenshot: {e}[/red]") + return None + + def dvd_screenshots(self, meta, disc_num, num_screens=None, retry_cap=None): + if 'image_list' not in meta: + meta['image_list'] = [] + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= meta.get('cutoff') and not retry_cap: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return - ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version' : '1'}) + + if num_screens is None: + num_screens = self.screens - len(existing_images) + if num_screens == 0 or (len(meta.get('image_list', [])) >= self.screens and disc_num == 0): + return + + if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: + i = num_screens + console.print('[bold green]Reusing screenshots') + return + + ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": - length = float(track.duration)/1000 + if isinstance(track.duration, str): + durations = [float(d) for d in track.duration.split(' / ')] + length = max(durations) / 1000 # Use the longest duration + else: + length = float(track.duration) / 1000 # noqa #F841 # Convert to seconds + par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) height = float(track.height) + frame_rate = float(track.frame_rate) if par < 1: - # multiply that dar by the height and then do a simple width / height new_height = dar * height sar = width / new_height w_sar = 1 @@ -756,292 +1649,565 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = par w_sar = sar h_sar = 1 - - main_set_length = len(meta['discs'][disc_num]['main_set']) - if main_set_length >= 3: - main_set = meta['discs'][disc_num]['main_set'][1:-1] - elif main_set_length == 2: - main_set = meta['discs'][disc_num]['main_set'][1:] - elif main_set_length == 1: - main_set = meta['discs'][disc_num]['main_set'] - n = 0 + + def _is_vob_good(n, loops, num_screens): + max_loops = 6 + fallback_duration = 300 + valid_tracks = [] + + while loops < max_loops: + try: + vob_mi = MediaInfo.parse( + f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", + output='JSON' + ) + vob_mi = json.loads(vob_mi) + + for track in vob_mi.get('media', {}).get('track', []): + duration = float(track.get('Duration', 0)) + width = track.get('Width') + height = track.get('Height') + + if duration > 1 and width and height: # Minimum 1-second track + valid_tracks.append({ + 'duration': duration, + 'track_index': n + }) + + if valid_tracks: + # Sort by duration, take longest track + longest_track = max(valid_tracks, key=lambda x: x['duration']) + return longest_track['duration'], longest_track['track_index'] + + except Exception as e: + console.print(f"[red]Error parsing VOB {n}: {e}") + + n = (n + 1) % len(main_set) + loops += 1 + + return fallback_duration, 0 + + main_set = meta['discs'][disc_num]['main_set'][1:] if len(meta['discs'][disc_num]['main_set']) > 1 else meta['discs'][disc_num]['main_set'] os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - i = 0 - if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') - else: - if bool(meta.get('ffdebug', False)) == True: - loglevel = 'verbose' - debug = False - looped = 0 - retake = False - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - if not os.path.exists(image) or retake != False: - retake = False - loglevel = 'quiet' - debug = True - if bool(meta.get('debug', False)): - loglevel = 'error' - debug = False - def _is_vob_good(n, loops, num_screens): - voblength = 300 - vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') - vob_mi = json.loads(vob_mi) - try: - voblength = float(vob_mi['media']['track'][1]['Duration']) - return voblength, n - except Exception: - try: - voblength = float(vob_mi['media']['track'][2]['Duration']) - return voblength, n - except Exception: - n += 1 - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - if loops < 6: - loops = loops + 1 - voblength, n = _is_vob_good(n, loops, num_screens) - return voblength, n - else: - return 300, n - try: - voblength, n = _is_vob_good(n, 0, num_screens) - img_time = random.randint(round(voblength/5) , round(voblength - voblength/5)) - ss_times = self.valid_ss_time(ss_times, num_screens+1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - self.optimize_images(image) - n += 1 - try: - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") - retake = True - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) - looped = 0 - except Exception: - if looped >= 25: - console.print('[red]Failed to take screenshots') - exit() - looped += 1 - progress.advance(screen_task) - #remove smallest image - smallest = "" - smallestsize = 99**99 - for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): - screensize = os.path.getsize(screens) - if screensize < smallestsize: - smallestsize = screensize - smallest = screens - os.remove(smallest) - - def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None): - if num_screens == None: - num_screens = self.screens - len(meta.get('image_list', [])) - if num_screens == 0: - # or len(meta.get('image_list', [])) >= num_screens: + voblength, n = _is_vob_good(0, 0, num_screens) + ss_times = self.valid_ss_time([], num_screens + 1, voblength, frame_rate) + tasks = [] + task_limit = int(meta.get('task_limit', os.cpu_count())) + for i in range(num_screens + 1): + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[i % len(main_set)]}" + tasks.append((input_file, image, ss_times[i], meta, width, height, w_sar, h_sar)) + + with get_context("spawn").Pool(processes=min(num_screens + 1, task_limit)) as pool: + try: + results = list(tqdm(pool.imap_unordered(self.capture_dvd_screenshot, tasks), total=len(tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False)) + finally: + pool.close() + pool.join() + + if len(glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*")) > num_screens: + smallest = None + smallest_size = float('inf') + for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): + screen_path = os.path.join(f"{meta['base_dir']}/tmp/{meta['uuid']}/", screens) + try: + screen_size = os.path.getsize(screen_path) + if screen_size < smallest_size: + smallest_size = screen_size + smallest = screen_path + except FileNotFoundError: + console.print(f"[red]File not found: {screen_path}[/red]") # Handle potential edge cases + continue + + if smallest: + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({smallest_size} bytes)[/yellow]") + os.remove(smallest) + + optimize_tasks = [(image, self.config) for image in results if image and os.path.exists(image)] + + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + optimize_results = list( # noqa F841 + tqdm( + pool.imap_unordered(self.optimize_image_task, optimize_tasks), + total=len(optimize_tasks), + desc="Optimizing Images", + ascii=True, + dynamic_ncols=False + ) + ) + finally: + pool.close() + pool.join() + + valid_results = [] + retry_attempts = 3 + + for image in optimize_results: + if "Error" in image: + console.print(f"[red]{image}") + continue + + retry_cap = False + image_size = os.path.getsize(image) + if image_size <= 120000: + console.print(f"[yellow]Image {image} is incredibly small, retaking.") + retry_cap = True + time.sleep(1) + + if retry_cap: + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image) + except Exception as e: + console.print(f"[red]Failed to delete {image}: {e}[/red]") + break + + image_index = int(image.rsplit('-', 1)[-1].split('.')[0]) + input_file = f"{meta['discs'][disc_num]['path']}/VTS_{main_set[image_index % len(main_set)]}" + adjusted_time = random.uniform(0, voblength) + + try: + self.capture_dvd_screenshot((input_file, image, adjusted_time, meta, width, height, w_sar, h_sar)) + retaken_size = os.path.getsize(image) + + if retaken_size > 75000: + console.print(f"[green]Successfully retaken screenshot for: {image} ({retaken_size} bytes)[/green]") + valid_results.append(image) + break + else: + console.print(f"[red]Retaken image {image} is still too small. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {adjusted_time}: {e}[/red]") + + else: + console.print(f"[red]All retry attempts failed for {image}. Skipping.[/red]") + else: + valid_results.append(image) + + for image in valid_results: + img_dict = { + 'img_url': image, + 'raw_url': image, + 'web_url': image + } + meta['image_list'].append(img_dict) + + console.print(f"[green]Successfully captured {len(optimize_results)} screenshots.") + + def capture_dvd_screenshot(self, task): + input_file, image, seek_time, meta, width, height, w_sar, h_sar = task + + if os.path.exists(image): + console.print(f"[green]Screenshot already exists: {image}[/green]") + return image + + try: + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + media_info = MediaInfo.parse(input_file) + video_duration = next((track.duration for track in media_info.tracks if track.track_type == "Video"), None) + + if video_duration and seek_time > video_duration: + seek_time = max(0, video_duration - 1) + + ff = ffmpeg.input(input_file, ss=seek_time) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + try: + ff.output(image, vframes=1, pix_fmt="rgb24").overwrite_output().global_args('-loglevel', loglevel, '-accurate_seek').run() + except ffmpeg._run.Error as e: + stderr_output = e.stderr.decode() if e.stderr else "No stderr output available" + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {stderr_output}[/red]") + if os.path.exists(image): + return image + else: + console.print(f"[red]Screenshot creation failed for {image}[/red]") + return None + + except Exception as e: + console.print(f"[red]Error capturing screenshot for {input_file} at {seek_time}s: {e}[/red]") + return None + + def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None, force_screenshots=False, manual_frames=None): + def use_tqdm(): + """Check if the environment supports TTY (interactive progress bar).""" + return sys.stdout.isatty() + + if meta['debug']: + start_time = time.time() + if 'image_list' not in meta: + meta['image_list'] = [] + + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + + if len(existing_images) >= meta.get('cutoff') and not force_screenshots: + console.print("[yellow]There are already at least {} images in the image list. Skipping additional screenshots.".format(meta.get('cutoff'))) return - with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: - mi = json.load(f) - video_track = mi['media']['track'][1] - length = video_track.get('Duration', mi['media']['track'][0]['Duration']) - width = float(video_track.get('Width')) - height = float(video_track.get('Height')) - par = float(video_track.get('PixelAspectRatio', 1)) - dar = float(video_track.get('DisplayAspectRatio')) - - if par == 1: - sar = w_sar = h_sar = 1 - elif par < 1: - new_height = dar * height - sar = width / new_height - w_sar = 1 - h_sar = sar + + if num_screens is None: + num_screens = self.screens - len(existing_images) + if num_screens <= 0: + return + + try: + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: + mi = json.load(f) + video_track = mi['media']['track'][1] + length = video_track.get('Duration', mi['media']['track'][0]['Duration']) + width = float(video_track.get('Width')) + height = float(video_track.get('Height')) + par = float(video_track.get('PixelAspectRatio', 1)) + dar = float(video_track.get('DisplayAspectRatio')) + frame_rate = float(video_track.get('FrameRate', 24.0)) + + if par == 1: + sar = w_sar = h_sar = 1 + elif par < 1: + new_height = dar * height + sar = width / new_height + w_sar = 1 + h_sar = sar + else: + sar = w_sar = par + h_sar = 1 + length = round(float(length)) + except (FileNotFoundError, KeyError, ValueError) as e: + console.print(f"[red]Error processing MediaInfo.json: {e}") + return + + loglevel = 'verbose' if meta.get('ffdebug', False) else 'quiet' + os.chdir(f"{base_dir}/tmp/{folder_id}") + + if manual_frames: + if meta['debug']: + console.print(f"[yellow]Using manual frames: {manual_frames}") + manual_frames = [int(frame) for frame in manual_frames.split(',')] + ss_times = [frame / frame_rate for frame in manual_frames] + else: + ss_times = [] + + ss_times = self.valid_ss_time( + ss_times, + num_screens, + length, + frame_rate, + exclusion_zone=500 + ) + if meta['debug']: + console.print(f"[green]Final list of frames for screenshots: {ss_times}") + + tone_map = meta.get('tone_map', False) + if tone_map and "HDR" in meta['hdr']: + hdr_tonemap = True + else: + hdr_tonemap = False + + capture_tasks = [] + capture_results = [] + if hdr_tonemap: + task_limit = int(meta.get('tone_task_limit')) + else: + task_limit = int(meta.get('task_limit', os.cpu_count())) + + existing_images = 0 + for i in range(num_screens): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if os.path.exists(image_path) and not meta.get('retake', False): + existing_images += 1 + + if existing_images == num_screens and not meta.get('retake', False): + console.print("[yellow]The correct number of screenshots already exists. Skipping capture process.") + else: + for i in range(num_screens + 1): + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or meta.get('retake', False): + capture_tasks.append((path, ss_times[i], image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) + elif meta['debug']: + console.print(f"[yellow]Skipping existing screenshot: {image_path}") + + if not capture_tasks: + console.print("[yellow]All screenshots already exist. Skipping capture process.") else: - sar = w_sar = par - h_sar = 1 - length = round(float(length)) - os.chdir(f"{base_dir}/tmp/{folder_id}") - i = 0 - if len(glob.glob(f"{filename}-*.png")) >= num_screens: - i = num_screens - console.print('[bold green]Reusing screenshots') + if use_tqdm(): + with tqdm(total=len(capture_tasks), desc="Capturing Screenshots", ascii=True, dynamic_ncols=False) as pbar: + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.capture_screenshot, capture_tasks): + if isinstance(result, str) and result.startswith("Error:"): + console.print(f"[red]Capture Error: {result}") + else: + capture_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() + else: + console.print("[blue]Non-TTY environment detected. Progress bar disabled.") + with get_context("spawn").Pool(processes=min(len(capture_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.capture_screenshot, capture_tasks), 1): + capture_results.append(result) + console.print(f"Processed {i}/{len(capture_tasks)} screenshots") + finally: + pool.close() + pool.join() + + if capture_results and (len(capture_results) + existing_images) > num_screens and not force_screenshots: + smallest = min(capture_results, key=os.path.getsize) + if meta['debug']: + console.print(f"[yellow]Removing smallest image: {smallest} ({os.path.getsize(smallest)} bytes)[/yellow]") + os.remove(smallest) + capture_results.remove(smallest) + + optimize_tasks = [(result, self.config) for result in capture_results if "Error" not in result] + optimize_results = [] + if optimize_tasks: + if use_tqdm(): + with tqdm(total=len(optimize_tasks), desc="Optimizing Images", ascii=True, dynamic_ncols=False) as pbar: + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for result in pool.imap_unordered(self.optimize_image_task, optimize_tasks): + optimize_results.append(result) + pbar.update(1) + finally: + pool.close() + pool.join() else: - loglevel = 'quiet' - debug = True - if bool(meta.get('ffdebug', False)) == True: - loglevel = 'verbose' - debug = False - if meta.get('vapoursynth', False) == True: - from src.vs import vs_screengn - vs_screengn(source=path, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") + with get_context("spawn").Pool(processes=min(len(optimize_tasks), task_limit)) as pool: + try: + for i, result in enumerate(pool.imap_unordered(self.optimize_image_task, optimize_tasks), 1): + optimize_results.append(result) + console.print(f"Optimized {i}/{len(optimize_tasks)} images") + finally: + pool.close() + pool.join() + + valid_results = [] + remaining_retakes = [] + for image_path in optimize_results: + if "Error" in image_path: + console.print(f"[red]{image_path}") + continue + + retake = False + image_size = os.path.getsize(image_path) + if not manual_frames: + if image_size <= 75000: + console.print(f"[yellow]Image {image_path} is incredibly small, retaking.") + retake = True + time.sleep(1) + elif image_size <= 31000000 and self.img_host == "imgbb" and not retake: + pass + elif image_size <= 10000000 and self.img_host in ["imgbox", "pixhost"] and not retake: + pass + elif self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"] and not retake: + pass + elif not retake: + console.print("[red]Image too large for your image host, retaking.") + retake = True + time.sleep(1) + + if retake: + retry_attempts = 3 + for attempt in range(1, retry_attempts + 1): + console.print(f"[yellow]Retaking screenshot for: {image_path} (Attempt {attempt}/{retry_attempts})[/yellow]") + try: + os.remove(image_path) + random_time = random.uniform(0, length) + self.capture_screenshot((path, random_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap)) + self.optimize_image_task((image_path, config)) + new_size = os.path.getsize(image_path) + valid_image = False + + if new_size > 75000 and new_size <= 31000000 and self.img_host == "imgbb": + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and new_size <= 10000000 and self.img_host in ["imgbox", "pixhost"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + elif new_size > 75000 and self.img_host in ["ptpimg", "lensdump", "ptscreens", "oeimg"]: + console.print(f"[green]Successfully retaken screenshot for: {image_path} ({new_size} bytes)[/green]") + valid_image = True + + if valid_image: + valid_results.append(image_path) + break + else: + console.print(f"[red]Retaken image {image_path} does not meet the size requirements for {self.img_host}. Retrying...[/red]") + except Exception as e: + console.print(f"[red]Error retaking screenshot for {image_path}: {e}[/red]") else: - retake = False - with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - ss_times = [] - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - for i in range(num_screens + 1): - image = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image) or retake != False: - retake = False - try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) - ff = ffmpeg.input(path, ss=ss_times[-1]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small, retaking") - retake = True - time.sleep(1) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb" and retake == False: - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost'] and retake == False: - i += 1 - elif self.img_host in ["ptpimg", "lensdump"] and retake == False: - i += 1 - elif self.img_host == "freeimage.host": - console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") - exit() - elif retake == True: - pass - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) - else: - i += 1 - progress.advance(screen_task) - #remove smallest image - smallest = "" - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): - screensize = os.path.getsize(screens) - if screensize < smallestsize: - smallestsize = screensize - smallest = screens - os.remove(smallest) - - def valid_ss_time(self, ss_times, num_screens, length): - valid_time = False - while valid_time != True: - valid_time = True - if ss_times != []: - sst = random.randint(round(length/5), round(length/2)) - for each in ss_times: - tolerance = length / 10 / num_screens - if abs(sst - each) <= tolerance: - valid_time = False - if valid_time == True: - ss_times.append(sst) + console.print(f"[red]All retry attempts failed for {image_path}. Skipping.[/red]") + remaining_retakes.append(image_path) else: - ss_times.append(random.randint(round(length/5), round(length/2))) - return ss_times + valid_results.append(image_path) - def optimize_images(self, image): - if self.config['DEFAULT'].get('optimize_images', True) == True: - if os.path.exists(image): - try: + if remaining_retakes: + console.print(f"[red]The following images could not be retaken successfully: {remaining_retakes}[/red]") + + for image_path in valid_results: + img_dict = { + 'img_url': image_path, + 'raw_url': image_path, + 'web_url': image_path + } + meta['image_list'].append(img_dict) + + console.print(f"[green]Successfully captured {len(valid_results)} screenshots.") + + if meta['debug']: + finish_time = time.time() + console.print(f"Screenshots processed in {finish_time - start_time:.4f} seconds") + + def valid_ss_time(self, ss_times, num_screens, length, frame_rate, exclusion_zone=None): + total_screens = num_screens + 1 + + if exclusion_zone is None: + exclusion_zone = max(length / (3 * total_screens), length / 15) + + result_times = ss_times.copy() + section_size = (round(4 * length / 5) - round(length / 5)) / total_screens * 1.3 + section_starts = [round(length / 5) + i * (section_size * 0.9) for i in range(total_screens)] + + for section_index in range(total_screens): + valid_time = False + attempts = 0 + start_frame = round(section_starts[section_index] * frame_rate) + end_frame = round((section_starts[section_index] + section_size) * frame_rate) + + while not valid_time and attempts < 50: + attempts += 1 + frame = random.randint(start_frame, end_frame) + time = frame / frame_rate + + if all(abs(frame - existing_time * frame_rate) > exclusion_zone * frame_rate for existing_time in result_times): + result_times.append(time) + valid_time = True + + if not valid_time: + midpoint_frame = (start_frame + end_frame) // 2 + result_times.append(midpoint_frame / frame_rate) + + result_times = sorted(result_times) + + return result_times + + def capture_screenshot(self, args): + path, ss_time, image_path, width, height, w_sar, h_sar, loglevel, hdr_tonemap = args + try: + if width <= 0 or height <= 0: + return "Error: Invalid width or height for scaling" + + if ss_time < 0: + return f"Error: Invalid timestamp {ss_time}" + + ff = ffmpeg.input(path, ss=ss_time) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + + if hdr_tonemap: + ff = ( + ff + .filter('zscale', transfer='linear') + .filter('tonemap', tonemap='mobius', desat=8.0) + .filter('zscale', transfer='bt709') + .filter('format', 'rgb24') + ) + + command = ( + ff + .output( + image_path, + vframes=1, + pix_fmt="rgb24" + ) + .overwrite_output() + .global_args('-loglevel', loglevel) + ) + + try: + command.run(capture_stdout=True, capture_stderr=True) + except ffmpeg.Error as e: + error_output = e.stderr.decode('utf-8') + return f"Error: {error_output}" + + if not os.path.exists(image_path) or os.path.getsize(image_path) == 0: + return f"Error: Screenshot not generated or is empty at {image_path}" + + return image_path + except Exception as e: + return f"Error: {str(e)}" + + def optimize_image_task(self, args): + image, config = args + try: + # Extract shared_seedbox and optimize_images from config + optimize_images = config['DEFAULT'].get('optimize_images', True) + shared_seedbox = config['DEFAULT'].get('shared_seedbox', True) + + if optimize_images: + if shared_seedbox: + # Limit the number of threads for oxipng + num_cores = multiprocessing.cpu_count() + max_threads = num_cores // 2 + os.environ['RAYON_NUM_THREADS'] = str(max_threads) + + if os.path.exists(image): pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng - if os.path.getsize(image) >= 31000000: + import oxipng + if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: - oxipng.optimize(image, level=1) - except: - pass - return + oxipng.optimize(image, level=2) + return image # Return image path if successful + except (KeyboardInterrupt, Exception) as e: + return f"Error: {e}" # Return error message + """ Get type and category """ - def get_type(self, video, scene, is_disc): - filename = os.path.basename(video).lower() - if "remux" in filename: - type = "REMUX" - elif any(word in filename for word in [" web ", ".web.", "web-dl"]): - type = "WEBDL" - elif "webrip" in filename: - type = "WEBRIP" - # elif scene == True: - # type = "ENCODE" - elif "hdtv" in filename: - type = "HDTV" - elif is_disc != None: - type = "DISC" - elif "dvdrip" in filename: - console.print("[bold red]DVDRip Detected, exiting") - exit() + def get_type(self, video, scene, is_disc, meta): + if meta.get('manual_type'): + type = meta.get('manual_type') else: - type = "ENCODE" + filename = os.path.basename(video).lower() + if "remux" in filename: + type = "REMUX" + elif any(word in filename for word in [" web ", ".web.", "web-dl", "webdl"]): + type = "WEBDL" + elif "webrip" in filename: + type = "WEBRIP" + # elif scene == True: + # type = "ENCODE" + elif "hdtv" in filename: + type = "HDTV" + elif is_disc is not None: + type = "DISC" + elif "dvdrip" in filename: + type = "DVDRIP" + # exit() + else: + type = "ENCODE" return type def get_cat(self, video): # if category is None: category = guessit(video.replace('1.0', ''))['type'] if category.lower() == "movie": - category = "MOVIE" #1 + category = "MOVIE" # 1 elif category.lower() in ("tv", "episode"): - category = "TV" #2 + category = "TV" # 2 else: category = "MOVIE" return category async def get_tmdb_from_imdb(self, meta, filename): - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] return meta imdb_id = meta['imdb'] @@ -1051,17 +2217,19 @@ async def get_tmdb_from_imdb(self, meta, filename): info = find.info(external_source="imdb_id") if len(info['movie_results']) >= 1: meta['category'] = "MOVIE" - meta['tmdb'] = info['movie_results'][0]['id'] + meta['tmdb'] = info['movie_results'][0]['id'] + meta['original_language'] = info['movie_results'][0].get('original_language') elif len(info['tv_results']) >= 1: meta['category'] = "TV" - meta['tmdb'] = info['tv_results'][0]['id'] + meta['tmdb'] = info['tv_results'][0]['id'] + meta['original_language'] = info['tv_results'][0].get('original_language') else: - imdb_info = await self.get_imdb_info(imdb_id.replace('tt', ''), meta) + imdb_info = await self.get_imdb_info_api(imdb_id.replace('tt', ''), meta) title = imdb_info.get("title") - if title == None: + if title is None: title = filename year = imdb_info.get('year') - if year == None: + if year is None: year = meta['search_year'] console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") meta = await self.get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) @@ -1081,11 +2249,11 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil search.movie(query=filename, year=search_year) elif category == "TV": search.tv(query=filename, first_air_date_year=search_year) - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] else: meta['tmdb'] = search.results[0]['id'] - meta['category'] = category + meta['category'] = category except IndexError: try: if category == "MOVIE": @@ -1104,7 +2272,7 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil meta = await self.get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) elif attempted == 2: attempted += 1 - meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes" : ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) + meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) if meta['tmdb'] in (None, ""): console.print(f"[red]Unable to find TMDb match for {filename}") if meta.get('mode', 'discord') == 'cli': @@ -1115,17 +2283,17 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil return meta return meta - + async def tmdb_other_meta(self, meta): - + if meta['tmdb'] == "0": try: - title = guessit(meta['path'], {"excludes" : ["country", "language"]})['title'].lower() + title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() title = title.split('aka')[0] - meta = await self.get_tmdb_id(guessit(title, {"excludes" : ["country", "language"]})['title'], meta['search_year'], meta) + meta = await self.get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) if meta['tmdb'] == "0": meta = await self.get_tmdb_id(title, "", meta, meta['category']) - except: + except Exception: if meta.get('mode', 'discord') == 'cli': console.print("[bold red]Unable to find tmdb entry. Exiting.") exit() @@ -1137,23 +2305,26 @@ async def tmdb_other_meta(self, meta): response = movie.info() meta['title'] = response['title'] if response['release_date']: - meta['year'] = datetime.strptime(response['release_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = movie.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) else: meta['imdb_id'] = str(meta['imdb']).replace('tt', '').zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' try: videos = movie.videos() for each in videos.get('results', []): @@ -1162,9 +2333,9 @@ async def tmdb_other_meta(self, meta): break except Exception: console.print('[yellow]Unable to grab videos from TMDb.') - - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1173,9 +2344,12 @@ async def tmdb_other_meta(self, meta): meta['keywords'] = self.get_keywords(movie) meta['genres'] = self.get_genres(response) meta['tmdb_directors'] = self.get_directors(movie) - if meta.get('anime', False) == False: + if meta.get('anime', False) is False: meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] meta['poster'] = response.get('poster_path', "") + meta['tmdb_poster'] = response.get('poster_path', "") meta['overview'] = response['overview'] meta['tmdb_type'] = 'Movie' meta['runtime'] = response.get('episode_run_time', 60) @@ -1184,23 +2358,26 @@ async def tmdb_other_meta(self, meta): response = tv.info() meta['title'] = response['name'] if response['first_air_date']: - meta['year'] = datetime.strptime(response['first_air_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = tv.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) else: meta['imdb_id'] = str(int(meta['imdb'].replace('tt', ''))).zfill(7) - if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: - meta['tvdb_id'] = external.get('tvdb_id', '0') - if meta['tvdb_id'] in ["", None, " ", "None"]: - meta['tvdb_id'] = '0' + if meta.get('tvdb_manual'): + meta['tvdb_id'] = meta['tvdb_manual'] + else: + if meta.get('tvdb_id', '0') in ['', ' ', None, 'None', '0']: + meta['tvdb_id'] = external.get('tvdb_id', '0') + if meta['tvdb_id'] in ["", None, " ", "None"]: + meta['tvdb_id'] = '0' try: videos = tv.videos() for each in videos.get('results', []): @@ -1211,8 +2388,8 @@ async def tmdb_other_meta(self, meta): console.print('[yellow]Unable to grab videos from TMDb.') # meta['aka'] = f" AKA {response['original_name']}" - meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + meta['aka'], original_language = await self.get_imdb_aka_api(meta['imdb_id'], meta) + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1221,6 +2398,8 @@ async def tmdb_other_meta(self, meta): meta['genres'] = self.get_genres(response) meta['tmdb_directors'] = self.get_directors(tv) meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) + if meta.get('mal') is not None: + meta['mal_id'] = meta['mal'] meta['poster'] = response.get('poster_path', '') meta['overview'] = response['overview'] @@ -1237,20 +2416,17 @@ async def tmdb_other_meta(self, meta): meta['aka'] = "" if f"({meta['year']})" in meta['aka']: meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() - - - return meta - + return meta def get_keywords(self, tmdb_info): if tmdb_info is not None: tmdb_keywords = tmdb_info.keywords() if tmdb_keywords.get('keywords') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('keywords')] + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] elif tmdb_keywords.get('results') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('results')] - return(', '.join(keywords)) + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] + return (', '.join(keywords)) else: return '' @@ -1258,8 +2434,8 @@ def get_genres(self, tmdb_info): if tmdb_info is not None: tmdb_genres = tmdb_info.get('genres', []) if tmdb_genres is not []: - genres=[f"{genre['name'].replace(',',' ')}" for genre in tmdb_genres] - return(', '.join(genres)) + genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] + return (', '.join(genres)) else: return '' @@ -1286,10 +2462,10 @@ def get_anime(self, response, meta): for each in response['genres']: if each['id'] == 16: animation = True - if response['original_language'] == 'ja' and animation == True: + if response['original_language'] == 'ja' and animation is True: romaji, mal_id, eng_title, season_year, episodes = self.get_romaji(tmdb_name, meta.get('mal', None)) alt_name = f" AKA {romaji}" - + anime = True # mal = AnimeSearch(romaji) # mal_id = mal.results[0].mal_id @@ -1297,12 +2473,12 @@ def get_anime(self, response, meta): mal_id = 0 if meta.get('mal_id', 0) != 0: mal_id = meta.get('mal_id') - if meta.get('mal') not in ('0', 0, None): - mal_id = meta.get('mal', 0) + if meta.get('mal') is not None: + mal_id = meta.get('mal') return mal_id, alt_name, anime def get_romaji(self, tmdb_name, mal): - if mal == None: + if mal is None: mal = 0 tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") tmdb_name = ' '.join(tmdb_name.split()) @@ -1362,16 +2538,16 @@ def get_romaji(self, tmdb_name, mal): response = requests.post(url, json={'query': query, 'variables': variables}) json = response.json() media = json['data']['Page']['media'] - except: + except Exception: console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') media = [] if media not in (None, []): - result = {'title' : {}} + result = {'title': {}} difference = 0 for anime in media: - search_name = re.sub("[^0-9a-zA-Z\[\]]+", "", tmdb_name.lower().replace(' ', '')) + search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): - if title != None: + if title is not None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+ā€“)', "", title.lower().replace(' ', ''), re.U) diff = SequenceMatcher(None, title, search_name).ratio() if diff >= difference: @@ -1384,7 +2560,7 @@ def get_romaji(self, tmdb_name, mal): season_year = result.get('season_year', "") episodes = result.get('episodes', 0) else: - romaji = eng_title = season_year = "" + romaji = eng_title = season_year = "" episodes = mal_id = 0 if mal_id in [None, 0]: mal_id = mal @@ -1392,69 +2568,58 @@ def get_romaji(self, tmdb_name, mal): episodes = 0 return romaji, mal_id, eng_title, season_year, episodes - - - - - - - """ Mediainfo/Bdinfo > meta """ def get_audio_v2(self, mi, meta, bdinfo): extra = dual = "" has_commentary = False - #Get formats - if bdinfo != None: #Disks + + # Get formats + if bdinfo is not None: # Disks format_settings = "" - format = bdinfo['audio'][0]['codec'] + format = bdinfo.get('audio', [{}])[0].get('codec', '') commercial = format - try: - additional = bdinfo['audio'][0]['atmos_why_you_be_like_this'] - except: - additional = "" - #Channels - chan = bdinfo['audio'][0]['channels'] - + additional = bdinfo.get('audio', [{}])[0].get('atmos_why_you_be_like_this', '') - else: + # Channels + chan = bdinfo.get('audio', [{}])[0].get('channels', '') + else: track_num = 2 - for i in range(len(mi['media']['track'])): - t = mi['media']['track'][i] - if t['@type'] != "Audio": - pass - else: - if t.get('Language', "") == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): - track_num = i - break - format = mi['media']['track'][track_num]['Format'] - commercial = mi['media']['track'][track_num].get('Format_Commercial', '') - if mi['media']['track'][track_num].get('Language', '') == "zxx": + tracks = mi.get('media', {}).get('track', []) + + for i, t in enumerate(tracks): + if t.get('@type') != "Audio": + continue + if t.get('Language', '') == meta.get('original_language', '') and "commentary" not in (t.get('Title') or '').lower(): + track_num = i + break + + track = tracks[track_num] if len(tracks) > track_num else {} + format = track.get('Format', '') + commercial = track.get('Format_Commercial', '') or track.get('Format_Commercial_IfAny', '') + + if track.get('Language', '') == "zxx": meta['silent'] = True - try: - additional = mi['media']['track'][track_num]['Format_AdditionalFeatures'] - # format = f"{format} {additional}" - except: - additional = "" - try: - format_settings = mi['media']['track'][track_num]['Format_Settings'] - if format_settings in ['Explicit']: - format_settings = "" - except: + + additional = track.get('Format_AdditionalFeatures', '') + + format_settings = track.get('Format_Settings', '') + if not isinstance(format_settings, str): + format_settings = "" + if format_settings in ['Explicit']: format_settings = "" - #Channels - channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) + format_profile = track.get('Format_Profile', '') + # Channels + channels = track.get('Channels_Original', track.get('Channels')) if not str(channels).isnumeric(): - channels = mi['media']['track'][track_num]['Channels'] + channels = track.get('Channels') try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout'] - except: - try: - channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] - except: - channel_layout = "" - if "LFE" in channel_layout: + channel_layout = track.get('ChannelLayout', '') + except Exception: + channel_layout = track.get('ChannelLayout_Original', '') + + if channel_layout and "LFE" in channel_layout: chan = f"{int(channels) - 1}.1" elif channel_layout == "": if int(channels) <= 2: @@ -1463,51 +2628,52 @@ def get_audio_v2(self, mi, meta, bdinfo): chan = f"{int(channels) - 1}.1" else: chan = f"{channels}.0" - - if meta['original_language'] != 'en': - eng, orig = False, False - try: - for t in mi['media']['track']: - if t['@type'] != "Audio": - pass - else: + + if meta.get('dual_audio', False): + dual = "Dual-Audio" + else: + if not meta.get('original_language', '').startswith('en'): + eng, orig = False, False + try: + for t in tracks: + if t.get('@type') != "Audio": + continue + audio_language = t.get('Language', '') - # Check for English Language Track - if audio_language == "en" and "commentary" not in t.get('Title', '').lower(): - eng = True - # Check for original Language Track - if audio_language == meta['original_language'] and "commentary" not in t.get('Title', '').lower(): - orig = True - # Catch Chinese / Norwegian variants - variants = ['zh', 'cn', 'cmn', 'no', 'nb'] - if audio_language in variants and meta['original_language'] in variants: - orig = True - # Check for additional, bloated Tracks - if audio_language != meta['original_language'] and audio_language != "en": - if meta['original_language'] not in variants and audio_language not in variants: - audio_language = "und" if audio_language == "" else audio_language - console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") - time.sleep(5) - if eng and orig == True: - dual = "Dual-Audio" - elif eng == True and orig == False and meta['original_language'] not in ['zxx', 'xx', None] and meta.get('no_dub', False) == False: - dual = "Dubbed" - except Exception: - console.print(traceback.print_exc()) - pass - - - for t in mi['media']['track']: - if t['@type'] != "Audio": - pass - else: - if "commentary" in t.get('Title', '').lower(): - has_commentary = True - - - #Convert commercial name to naming conventions + + if isinstance(audio_language, str): + if audio_language.startswith("en") and "commentary" not in (t.get('Title') or '').lower(): + eng = True + + if not audio_language.startswith("en") and audio_language.startswith(meta['original_language']) and "commentary" not in (t.get('Title') or '').lower(): + orig = True + + variants = ['zh', 'cn', 'cmn', 'no', 'nb'] + if any(audio_language.startswith(var) for var in variants) and any(meta['original_language'].startswith(var) for var in variants): + orig = True + + if isinstance(audio_language, str) and audio_language and audio_language != meta['original_language'] and not audio_language.startswith("en"): + audio_language = "und" if audio_language == "" else audio_language + console.print(f"[bold red]This release has a(n) {audio_language} audio track, and may be considered bloated") + time.sleep(5) + + if eng and orig: + dual = "Dual-Audio" + elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): + dual = "Dubbed" + except Exception: + console.print(traceback.format_exc()) + pass + + for t in tracks: + if t.get('@type') != "Audio": + continue + + if "commentary" in (t.get('Title') or '').lower(): + has_commentary = True + + # Convert commercial name to naming conventions audio = { - #Format "DTS": "DTS", "AAC": "AAC", "AAC LC": "AAC", @@ -1515,20 +2681,17 @@ def get_audio_v2(self, mi, meta, bdinfo): "E-AC-3": "DD+", "MLP FBA": "TrueHD", "FLAC": "FLAC", - "Opus": "OPUS", + "Opus": "Opus", "Vorbis": "VORBIS", "PCM": "LPCM", - - #BDINFO AUDIOS - "LPCM Audio" : "LPCM", - "Dolby Digital Audio" : "DD", - "Dolby Digital Plus Audio" : "DD+", - # "Dolby TrueHD" : "TrueHD", - "Dolby TrueHD Audio" : "TrueHD", - "DTS Audio" : "DTS", - "DTS-HD Master Audio" : "DTS-HD MA", - "DTS-HD High-Res Audio" : "DTS-HD HRA", - "DTS:X Master Audio" : "DTS:X" + "LPCM Audio": "LPCM", + "Dolby Digital Audio": "DD", + "Dolby Digital Plus Audio": "DD+", + "Dolby TrueHD Audio": "TrueHD", + "DTS Audio": "DTS", + "DTS-HD Master Audio": "DTS-HD MA", + "DTS-HD High-Res Audio": "DTS-HD HRA", + "DTS:X Master Audio": "DTS:X" } audio_extra = { "XLL": "-HD MA", @@ -1541,30 +2704,36 @@ def get_audio_v2(self, mi, meta, bdinfo): "Atmos Audio": " Atmos", } format_settings_extra = { - "Dolby Surround EX" : "EX" + "Dolby Surround EX": "EX" } commercial_names = { - "Dolby Digital" : "DD", - "Dolby Digital Plus" : "DD+", - "Dolby TrueHD" : "TrueHD", - "DTS-ES" : "DTS-ES", - "DTS-HD High" : "DTS-HD HRA", - "Free Lossless Audio Codec" : "FLAC", - "DTS-HD Master Audio" : "DTS-HD MA" - } + "Dolby Digital": "DD", + "Dolby Digital Plus": "DD+", + "Dolby TrueHD": "TrueHD", + "DTS-ES": "DTS-ES", + "DTS-HD High": "DTS-HD HRA", + "Free Lossless Audio Codec": "FLAC", + "DTS-HD Master Audio": "DTS-HD MA" + } - search_format = True - for key, value in commercial_names.items(): - if key in commercial: - codec = value - search_format = False - if "Atmos" in commercial or format_extra.get(additional, "") == " Atmos": - extra = " Atmos" + + if isinstance(additional, dict): + additional = "" # Set empty string if additional is a dictionary + + if commercial: + for key, value in commercial_names.items(): + if key in commercial: + codec = value + search_format = False + if "Atmos" in commercial or format_extra.get(additional, "") == " Atmos": + extra = " Atmos" + if search_format: codec = audio.get(format, "") + audio_extra.get(additional, "") extra = format_extra.get(additional, "") + format_settings = format_settings_extra.get(format_settings, "") if format_settings == "EX" and chan == "5.1": format_settings = "EX" @@ -1573,23 +2742,24 @@ def get_audio_v2(self, mi, meta, bdinfo): if codec == "": codec = format - + if format.startswith("DTS"): - if additional.endswith("X"): + if additional and additional.endswith("X"): codec = "DTS:X" chan = f"{int(channels) - 1}.1" - if format == "MPEG Audio": - codec = mi['media']['track'][2].get('CodecID_Hint', '') - + if format == "MPEG Audio": + if format_profile == "Layer 2": + codec = "MP2" + else: + codec = track.get('CodecID_Hint', '') - audio = f"{dual} {codec} {format_settings} {chan}{extra}" + audio = f"{dual} {codec or ''} {format_settings or ''} {chan or ''}{extra or ''}" audio = ' '.join(audio.split()) return audio, chan, has_commentary - def is_3d(self, mi, bdinfo): - if bdinfo != None: + if bdinfo is not None: if bdinfo['video'][0]['3d'] != "": return "3D" else: @@ -1599,28 +2769,42 @@ def is_3d(self, mi, bdinfo): def get_tag(self, video, meta): try: - tag = guessit(video)['release_group'] - tag = f"-{tag}" - except: + parsed = guessit(video) + release_group = parsed.get('release_group') + if meta['is_disc'] == "BDMV": + if release_group: + if f"-{release_group}" not in video: + if meta['debug']: + console.print(f"[warning] Invalid release group format: {release_group}") + release_group = None + + tag = f"-{release_group}" if release_group else "" + except Exception as e: + console.print(f"Error while parsing: {e}") tag = "" + if tag == "-": tag = "" - if tag[1:].lower() in ["nogroup", 'nogrp']: + if tag[1:].lower() in ["nogroup", "nogrp"]: tag = "" - return tag + return tag - def get_source(self, type, video, path, is_disc, meta): + def get_source(self, type, video, path, is_disc, meta, folder_id, base_dir): + try: + with open(f'{base_dir}/tmp/{folder_id}/MediaInfo.json', 'r', encoding='utf-8') as f: + mi = json.load(f) + except Exception: + if meta['debug']: + console.print("No mediainfo.json") try: try: source = guessit(video)['source'] - except: + except Exception: try: source = guessit(path)['source'] - except: + except Exception: source = "BluRay" - if meta.get('manual_source', None): - source = meta['manual_source'] if source in ("Blu-ray", "Ultra HD Blu-ray", "BluRay", "BR") or is_disc == "BDMV": if type == "DISC": source = "Blu-ray" @@ -1636,19 +2820,30 @@ def get_source(self, type, video, path, is_disc, meta): if track.track_type == "Video": system = track.standard if system not in ("PAL", "NTSC"): - raise WeirdSystem - except: + raise WeirdSystem # noqa: F405 + except Exception: try: other = guessit(video)['other'] if "PAL" in other: system = "PAL" elif "NTSC" in other: system = "NTSC" - except: + except Exception: system = "" + if system == "" or system is None: + try: + framerate = mi['media']['track'][1].get('FrameRate', '') + if '25' in framerate or '50' in framerate: + system = "PAL" + elif framerate: + system = "NTSC" + else: + system = "" + except Exception: + system = "" finally: - if system == None: - system = "" + if system is None: + system = "" if type == "REMUX": system = f"{system} DVD".strip() source = system @@ -1674,7 +2869,7 @@ def get_uhd(self, type, guess, resolution, path): try: source = guess['Source'] other = guess['Other'] - except: + except Exception: source = "" other = "" uhd = "" @@ -1684,7 +2879,7 @@ def get_uhd(self, type, guess, resolution, path): uhd = "UHD" elif type in ("DISC", "REMUX", "ENCODE", "WEBRIP"): uhd = "" - + if type in ("DISC", "REMUX", "ENCODE") and resolution == "2160p": uhd = "UHD" @@ -1693,7 +2888,7 @@ def get_uhd(self, type, guess, resolution, path): def get_hdr(self, mi, bdinfo): hdr = "" dv = "" - if bdinfo != None: #Disks + if bdinfo is not None: # Disks hdr_mi = bdinfo['video'][0]['hdr_dv'] if "HDR10+" in hdr_mi: hdr = "HDR10+" @@ -1702,9 +2897,9 @@ def get_hdr(self, mi, bdinfo): try: if bdinfo['video'][1]['hdr_dv'] == "Dolby Vision": dv = "DV" - except: + except Exception: pass - else: + else: video_track = mi['media']['track'][1] try: hdr_mi = video_track['colour_primaries'] @@ -1722,13 +2917,13 @@ def get_hdr(self, mi, bdinfo): hdr = "HLG" if hdr != "HLG" and "BT.2020 (10-bit)" in transfer_characteristics: hdr = "WCG" - except: + except Exception: pass try: if "Dolby Vision" in video_track.get('HDR_Format', '') or "Dolby Vision" in video_track.get('HDR_Format_String', ''): dv = "DV" - except: + except Exception: pass hdr = f"{dv} {hdr}".strip() @@ -1736,68 +2931,68 @@ def get_hdr(self, mi, bdinfo): def get_region(self, bdinfo, region=None): label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') - if region != None: + if region is not None: region = region.upper() - else: + else: regions = { - 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', - 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', - 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', - 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', - 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', - 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', - 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', - 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', - 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', - 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', - 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', - 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', - 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', - 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', - 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', - 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', - 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', - 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', - 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', - 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', - 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', - 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', - 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', - 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', - 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', - 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', - 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', - 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', - 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', - 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', - 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', - 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', - 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', - 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', - 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR" : "EUR" + 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', + 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', + 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', + 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', + 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', + 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', + 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', + 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', + 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', + 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', + 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', + 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', + 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', + 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', + 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', + 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', + 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', + 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', + 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', + 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', + 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', + 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', + 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', + 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', + 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', + 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', + 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', + 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', + 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', + 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', + 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', + 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', + 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', + 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', + 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" } for key, value in regions.items(): if f" {key} " in label: region = value - - if region == None: + + if region is None: region = "" return region def get_distributor(self, distributor_in): distributor_list = [ - '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDƉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', - 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SƉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ƉDITIONS', 'ARTE EDITIONS', 'ARTE VIDƉO', - 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTƖRUNG', - 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ƉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFƓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINƉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINƉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', - 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TƉLƉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', - 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車åŗ«å؛ę؂)', '車åŗ«å؛ę؂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDƉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HƄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', - 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", - 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIƈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MƉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', - 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', - 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRƖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', - 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDƉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', - 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSƁTIL HOME VIDEO', 'VERSƁTIL VIDEO', 'VERSƁTIL HOME', 'VERSƁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VƉRTICE 360Āŗ', 'VƉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VƉRTIGO FILMS', 'VƉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', + '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDƉO', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', + 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SƉLECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE ƉDITIONS', 'ARTE EDITIONS', 'ARTE VIDƉO', + 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTƖRUNG', + 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL ƉDITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFƓRNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINƉART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINƉMA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', + 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TƉLƉVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', + 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (車åŗ«å؛ę؂)', '車åŗ«å؛ę؂', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDƉO', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HƄNNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', + 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", + 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIƈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MƉDIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', + 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', + 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRƖDERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', + 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDƉO', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', + 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSƁTIL HOME VIDEO', 'VERSƁTIL VIDEO', 'VERSƁTIL HOME', 'VERSƁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VƉRTICE 360Āŗ', 'VƉRTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VƉRTIGO FILMS', 'VƉRTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', 'MASTERS OF CINEMA', 'MOC' ] @@ -1808,13 +3003,12 @@ def get_distributor(self, distributor_in): distributor_out = each return distributor_out - def get_video_codec(self, bdinfo): codecs = { - "MPEG-2 Video" : "MPEG-2", - "MPEG-4 AVC Video" : "AVC", - "MPEG-H HEVC Video" : "HEVC", - "VC-1 Video" : "VC-1" + "MPEG-2 Video": "MPEG-2", + "MPEG-4 AVC Video": "AVC", + "MPEG-H HEVC Video": "HEVC", + "VC-1 Video": "VC-1" } codec = codecs.get(bdinfo['video'][0]['codec'], "") return codec @@ -1830,21 +3024,25 @@ def get_video_encode(self, mi, type, bdinfo): if mi['media']['track'][1].get('Encoded_Library_Settings', None): has_encode_settings = True bit_depth = mi['media']['track'][1].get('BitDepth', '0') - except: + except Exception: format = bdinfo['video'][0]['codec'] format_profile = bdinfo['video'][0]['profile'] - if type in ("ENCODE", "WEBRIP"): #ENCODE or WEBRIP + if type in ("ENCODE", "WEBRIP", "DVDRIP"): # ENCODE or WEBRIP or DVDRIP if format == 'AVC': codec = 'x264' elif format == 'HEVC': codec = 'x265' - elif type in ('WEBDL', 'HDTV'): #WEB-DL + elif format == 'AV1': + codec = 'AV1' + elif type in ('WEBDL', 'HDTV'): # WEB-DL if format == 'AVC': codec = 'H.264' elif format == 'HEVC': codec = 'H.265' - - if type == 'HDTV' and has_encode_settings == True: + elif format == 'AV1': + codec = 'AV1' + + if type == 'HDTV' and has_encode_settings is True: codec = codec.replace('H.', 'x') elif format == "VP9": codec = "VP9" @@ -1860,157 +3058,258 @@ def get_video_encode(self, mi, type, bdinfo): video_codec = f"MPEG-{mi['media']['track'][1].get('Format_Version')}" return video_encode, video_codec, has_encode_settings, bit_depth - def get_edition(self, video, bdinfo, filelist, manual_edition): if video.lower().startswith('dc'): video = video.replace('dc', '', 1) + guess = guessit(video) tag = guess.get('release_group', 'NOGROUP') repack = "" edition = "" - if bdinfo != None: + + if bdinfo is not None: try: edition = guessit(bdinfo['label'])['edition'] - except: + except Exception as e: + print(f"BDInfo Edition Guess Error: {e}") edition = "" else: try: - edition = guess['edition'] - except: + edition = guess.get('edition', "") + except Exception as e: + print(f"Video Edition Guess Error: {e}") edition = "" + if isinstance(edition, list): - # time.sleep(2) edition = " ".join(edition) + if len(filelist) == 1: video = os.path.basename(video) - video = video.upper().replace('.', ' ').replace(tag, '').replace('-', '') + video = video.upper().replace('.', ' ').replace(tag.upper(), '').replace('-', '') if "OPEN MATTE" in video: - edition = edition + "Open Matte" + edition = edition + " Open Matte" - if manual_edition != None: + if manual_edition: if isinstance(manual_edition, list): manual_edition = " ".join(manual_edition) edition = str(manual_edition) - - if " REPACK " in (video or edition) or "V2" in video: + edition = edition.replace(",", " ") + + # print(f"Edition After Manual Edition: {edition}") + + if "REPACK" in (video or edition.upper()) or "V2" in video: repack = "REPACK" - if " REPACK2 " in (video or edition) or "V3" in video: + if "REPACK2" in (video or edition.upper()) or "V3" in video: repack = "REPACK2" - if " REPACK3 " in (video or edition) or "V4" in video: + if "REPACK3" in (video or edition.upper()) or "V4" in video: repack = "REPACK3" - if " PROPER " in (video or edition): + if "PROPER" in (video or edition.upper()): repack = "PROPER" - if " RERIP " in (video.upper() or edition): + if "RERIP" in (video or edition.upper()): repack = "RERIP" - # if "HYBRID" in video.upper() and "HYBRID" not in title.upper(): - # edition = "Hybrid " + edition - edition = re.sub("(REPACK\d?)?(RERIP)?(PROPER)?", "", edition, flags=re.IGNORECASE).strip() + + # print(f"Repack after Checks: {repack}") + + # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition + if not manual_edition or all(tag.lower() not in ['repack', 'repack2', 'repack3', 'proper', 'rerip'] for tag in manual_edition.strip().lower().split()): + edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() + print(f"Final Edition: {edition}") bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: - edition = "" - # try: - # other = guess['other'] - # except: - # other = "" - # if " 3D " in other: - # edition = edition + " 3D " - # if edition == None or edition == None: - # edition = "" + edition = re.sub(r'\b(?:' + '|'.join(bad) + r')\b', '', edition, flags=re.IGNORECASE).strip() + return edition, repack + """ + Create Torrent + """ + class CustomTorrent(torf.Torrent): + # Default piece size limits + torf.Torrent.piece_size_min = 16384 # 16 KiB + torf.Torrent.piece_size_max = 268435456 # 256 MiB + def __init__(self, meta, *args, **kwargs): + super().__init__(*args, **kwargs) + # Override piece_size_max if meta['max_piece_size'] is specified + if 'max_piece_size' in meta and meta['max_piece_size']: + try: + max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes + self.piece_size_max = min(max_piece_size_mib, torf.Torrent.piece_size_max) + except ValueError: + self.piece_size_max = torf.Torrent.piece_size_max # Fallback to default if conversion fails + else: + self.piece_size_max = torf.Torrent.piece_size_max + + # Calculate and set the piece size + # total_size = self._calculate_total_size() + # piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) + self.metainfo['info']['piece length'] = self._piece_size + + @property + def piece_size(self): + return self._piece_size + + @piece_size.setter + def piece_size(self, value): + if value is None: + total_size = self._calculate_total_size() + value = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) + self._piece_size = value + self.metainfo['info']['piece length'] = value # Ensure 'piece length' is set + + @classmethod + def calculate_piece_size(cls, total_size, min_size, max_size, files): + file_count = len(files) + # console.print(f"[red]Calculating piece size for {file_count} files") + + our_min_size = 16384 + our_max_size = max_size if max_size else 268435456 # Default to 256 MiB if max_size is None + piece_size = 4194304 # Start with 4 MiB + + num_pieces = math.ceil(total_size / piece_size) + + # Initial torrent_file_size calculation based on file_count + # More paths = greater error in pathname_bytes, roughly recalibrate + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + + # iteration = 0 # Track the number of iterations + # print(f"Initial piece size: {piece_size} bytes") + # print(f"Initial num_pieces: {num_pieces}, Initial torrent_file_size: {torrent_file_size} bytes") + + # Adjust the piece size to fit within the constraints + while not ((750 <= num_pieces <= 2200 or num_pieces < 750 and 40960 <= torrent_file_size <= 250000) and torrent_file_size <= 250000): + # iteration += 1 + # print(f"\nIteration {iteration}:") + # print(f"Current piece_size: {piece_size} bytes") + # print(f"Current num_pieces: {num_pieces}, Current torrent_file_size: {torrent_file_size} bytes") + if num_pieces > 1000 and num_pieces < 2000 and torrent_file_size < 250000: + break + elif num_pieces < 1500 and torrent_file_size >= 250000: + piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (num_pieces < 1500 and torrent_file_size >= 250 KiB)") + if piece_size > our_max_size: + piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") + break + elif num_pieces < 750: + piece_size //= 2 + # print(f"Halved piece_size to {piece_size} bytes (num_pieces < 750)") + if piece_size < our_min_size: + piece_size = our_min_size + # print(f"piece_size went below min_size, set to our_min_size: {our_min_size} bytes") + break + elif 40960 < torrent_file_size < 250000: + # print(f"torrent_file_size is between 40 KiB and 250 KiB, exiting loop.") + break + elif num_pieces > 2200: + piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (num_pieces > 2500)") + if piece_size > our_max_size: + piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") + break + elif torrent_file_size < 2048: + # print(f"torrent_file_size is less than 2 KiB, exiting loop.") + break + elif torrent_file_size > 250000: + piece_size *= 2 + # print(f"Doubled piece_size to {piece_size} bytes (torrent_file_size > 250 KiB)") + if piece_size > our_max_size: + piece_size = our_max_size + # print(f"piece_size exceeded max_size, set to our_max_size: {our_max_size} bytes") + cli_ui.warning('WARNING: .torrent size will exceed 250 KiB!') + break + # Update num_pieces + num_pieces = math.ceil(total_size / piece_size) - """ - Create Torrent - """ - def create_torrent(self, meta, path, output_filename, piece_size_max): - piece_size_max = int(piece_size_max) if piece_size_max is not None else 0 - if meta['isdir'] == True: - os.chdir(path) - globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") - no_sample_globs = [] - for file in globs: - if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): - no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) - if len(no_sample_globs) == 1: - path = meta['filelist'][0] + # Recalculate torrent_file_size based on file_count in each iteration + if file_count > 1000: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 71 / 100) + elif file_count > 500: + torrent_file_size = 20 + (num_pieces * 20) + int(cls._calculate_pathname_bytes(files) * 4 / 5) + else: + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) + + # print(f"\nFinal piece_size: {piece_size} bytes after {iteration} iterations.") + # print(f"Final num_pieces: {num_pieces}, Final torrent_file_size: {torrent_file_size} bytes") + return piece_size + + def _calculate_total_size(self): + total_size = sum(file.size for file in self.files) + return total_size + + @classmethod + def _calculate_pathname_bytes(cls, files): + total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + return total_pathname_bytes + + def validate_piece_size(self): + if not hasattr(self, '_piece_size') or self._piece_size is None: + self.piece_size = self.calculate_piece_size(self._calculate_total_size(), self.piece_size_min, self.piece_size_max, self.files) + self.metainfo['info']['piece length'] = self.piece_size # Ensure 'piece length' is set + + def create_torrent(self, meta, path, output_filename): + # Handle directories and file inclusion logic + if meta['isdir']: + if meta['keep_folder']: + cli_ui.info('--keep-folder was specified. Using complete folder for torrent creation.') + path = path + else: + os.chdir(path) + globs = glob.glob1(path, "*.mkv") + glob.glob1(path, "*.mp4") + glob.glob1(path, "*.ts") + no_sample_globs = [] + for file in globs: + if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): + no_sample_globs.append(os.path.abspath(f"{path}{os.sep}{file}")) + if len(no_sample_globs) == 1: + path = meta['filelist'][0] if meta['is_disc']: include, exclude = "", "" else: - exclude = ["*.*", "*sample.mkv", "!sample*.*"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] include = ["*.mkv", "*.mp4", "*.ts"] - torrent = Torrent(path, - trackers = ["https://fake.tracker"], - source = "L4G", - private = True, - exclude_globs = exclude or [], - include_globs = include or [], - creation_date = datetime.now(), - comment = "Created by L4G's Upload Assistant", - created_by = "L4G's Upload Assistant") - file_size = torrent.size - if file_size < 268435456: # 256 MiB File / 256 KiB Piece Size - piece_size = 18 - piece_size_text = "256KiB" - elif file_size < 1073741824: # 1 GiB File/512 KiB Piece Size - piece_size = 19 - piece_size_text = "512KiB" - elif file_size < 2147483648 or piece_size_max == 1: # 2 GiB File/1 MiB Piece Size - piece_size = 20 - piece_size_text = "1MiB" - elif file_size < 4294967296 or piece_size_max == 2: # 4 GiB File/2 MiB Piece Size - piece_size = 21 - piece_size_text = "2MiB" - elif file_size < 8589934592 or piece_size_max == 4: # 8 GiB File/4 MiB Piece Size - piece_size = 22 - piece_size_text = "4MiB" - elif file_size < 17179869184 or piece_size_max == 8: # 16 GiB File/8 MiB Piece Size - piece_size = 23 - piece_size_text = "8MiB" - else: # 16MiB Piece Size - piece_size = 24 - piece_size_text = "16MiB" - console.print(f"[bold yellow]Creating .torrent with a piece size of {piece_size_text}... (No valid --torrenthash was provided to reuse)") - if meta.get('torrent_creation') != None: - torrent_creation = meta['torrent_creation'] - else: - torrent_creation = self.config['DEFAULT'].get('torrent_creation', 'torf') - if torrent_creation == 'torrenttools': - args = ['torrenttools', 'create', '-a', 'https://fake.tracker', '--private', 'on', '--piece-size', str(2**piece_size), '--created-by', "L4G's Upload Assistant", '--no-cross-seed','-o', f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent"] - if not meta['is_disc']: - args.extend(['--include', '^.*\.(mkv|mp4|ts)$']) - args.append(path) - err = subprocess.call(args) - if err != 0: - args[3] = "OMITTED" - console.print(f"[bold red]Process execution {args} returned with error code {err}.") - elif torrent_creation == 'mktorrent': - args = ['mktorrent', '-a', 'https://fake.tracker', '-p', f'-l {piece_size}', '-o', f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", path] - err = subprocess.call(args) - if err != 0: - args[2] = "OMITTED" - console.print(f"[bold red]Process execution {args} returned with error code {err}.") - else: - torrent.piece_size = 2**piece_size - torrent.piece_size_max = 16777216 - torrent.generate(callback=self.torf_cb, interval=5) - torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) - torrent.verify_filesize(path) + + # Create and write the new torrent using the CustomTorrent class + torrent = self.CustomTorrent( + meta=meta, + path=path, + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude or [], + include_globs=include or [], + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Ensure piece size is validated before writing + torrent.validate_piece_size() + + # Generate and write the new torrent + torrent.generate(callback=self.torf_cb, interval=5) + torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{output_filename}.torrent", overwrite=True) + torrent.verify_filesize(path) + console.print("[bold green].torrent created", end="\r") return torrent - def torf_cb(self, torrent, filepath, pieces_done, pieces_total): # print(f'{pieces_done/pieces_total*100:3.0f} % done') cli_ui.info_progress("Hashing...", pieces_done, pieces_total) def create_random_torrents(self, base_dir, uuid, num, path): - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(path)) base_torrent = Torrent.read(f"{base_dir}/tmp/{uuid}/BASE.torrent") for i in range(1, int(num) + 1): new_torrent = base_torrent @@ -2020,11 +3319,10 @@ def create_random_torrents(self, base_dir, uuid, num, path): def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): if os.path.exists(torrentpath): base_torrent = Torrent.read(torrentpath) - base_torrent.creation_date = datetime.now() base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" - #Remove Un-whitelisted info from torrent + # Remove Un-whitelisted info from torrent for each in list(base_torrent.metainfo['info']): if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): base_torrent.metainfo['info'].pop(each, None) @@ -2035,193 +3333,381 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): base_torrent.private = True Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) - - - """ Upload Screenshots """ - def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict): - # if int(total_screens) != 0 or len(meta.get('image_list', [])) > total_screens: - # if custom_img_list == []: - # console.print('[yellow]Uploading Screens') + def upload_image_task(self, args): + image, img_host, config, meta = args + try: + timeout = 60 # Default timeout + img_url, raw_url, web_url = None, None, None + + if img_host == "imgbox": + try: + # Call the asynchronous imgbox_upload function + loop = asyncio.get_event_loop() + image_list = loop.run_until_complete( + self.imgbox_upload(os.getcwd(), [image], meta, return_dict={}) + ) + if image_list and all( + 'img_url' in img and 'raw_url' in img and 'web_url' in img for img in image_list + ): + img_url = image_list[0]['img_url'] + raw_url = image_list[0]['raw_url'] + web_url = image_list[0]['web_url'] + else: + return { + 'status': 'failed', + 'reason': "Imgbox upload failed. No valid URLs returned." + } + except Exception as e: + return { + 'status': 'failed', + 'reason': f"Error during Imgbox upload: {str(e)}" + } + + elif img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post( + "https://ptpimg.me/upload.php", headers=headers, data=payload, files=files, timeout=timeout + ) + response_data = response.json() + if response_data: + code = response_data[0]['code'] + ext = response_data[0]['ext'] + img_url = f"https://ptpimg.me/{code}.{ext}" + raw_url = img_url + web_url = img_url + + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + try: + with open(image, "rb") as img_file: + encoded_image = base64.b64encode(img_file.read()).decode('utf8') + + data = { + 'key': config['DEFAULT']['imgbb_api'], + 'image': encoded_image, + } + + response = requests.post(url, data=data, timeout=timeout) + + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]imgbb failed, trying next image host") + return {'status': 'failed', 'reason': 'imgbb upload failed'} + + img_url = response_data['data'].get('medium', {}).get('url') or response_data['data']['thumb']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + return {'status': 'success', 'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + + except ValueError as e: # JSON decoding error + console.print(f"[red]Invalid JSON response: {e}") + return {'status': 'failed', 'reason': 'Invalid JSON response'} + + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + try: + files = { + 'source': ('file-upload[0]', open(image, 'rb')), + } + headers = { + 'X-API-Key': config['DEFAULT']['ptscreens_api'] + } + response = requests.post(url, headers=headers, files=files, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response_data.get('status_code') != 200: + console.print("[yellow]ptscreens failed, trying next image host") + return {'status': 'failed', 'reason': 'ptscreens upload failed'} + + img_url = response_data['image']['medium']['url'] + raw_url = response_data['image']['url'] + web_url = response_data['image']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "oeimg": + url = "https://imgoe.download/api/1/upload" + try: + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + if meta['debug']: + console.print(f"[yellow]Response status code: {response.status_code}") + console.print(f"[yellow]Response content: {response.content.decode('utf-8')}") + + response_data = response.json() + if response.status_code != 200 or not response_data.get('success'): + console.print("[yellow]OEimg failed, trying next image host") + return {'status': 'failed', 'reason': 'OEimg upload failed'} + + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + if meta['debug']: + console.print(f"[green]Image URLs: img_url={img_url}, raw_url={raw_url}, web_url={web_url}") + + except requests.exceptions.Timeout: + console.print("[red]Request timed out. The server took too long to respond.") + return {'status': 'failed', 'reason': 'Request timed out'} + except requests.exceptions.RequestException as e: + console.print(f"[red]Request failed with error: {e}") + return {'status': 'failed', 'reason': str(e)} + + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350 + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')) + } + response = requests.post(url, data=data, files=files, timeout=timeout) + response_data = response.json() + if response.status_code == 200: + raw_url = response_data['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response_data['th_url'] + web_url = response_data['show_url'] + + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': config['DEFAULT']['lensdump_api'] + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response_data = response.json() + if response_data.get('status_code') == 200: + img_url = response_data['data']['image']['url'] + raw_url = response_data['data']['image']['url'] + web_url = response_data['data']['url_viewer'] + + if img_url and raw_url and web_url: + return { + 'status': 'success', + 'img_url': img_url, + 'raw_url': raw_url, + 'web_url': web_url, + 'local_file_path': image + } + else: + return { + 'status': 'failed', + 'reason': f"Failed to upload image to {img_host}. No URLs received." + } + + except Exception as e: + return { + 'status': 'failed', + 'reason': str(e) + } + + def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=False, max_retries=3): + def use_tqdm(): + """Check if the environment supports TTY (interactive progress bar).""" + return sys.stdout.isatty() + + if meta['debug']: + upload_start_time = time.time() + + import nest_asyncio + nest_asyncio.apply() os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] - if img_host != self.img_host and meta.get('imghost', None) == None: - img_host = self.img_host - i -= 1 - elif img_host_num == 1 and meta.get('imghost') != img_host: - img_host = meta.get('imghost') - img_host_num = 0 - image_list = [] - newhost_list = [] - if custom_img_list != []: + initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] + img_host = meta['imghost'] + using_custom_img_list = isinstance(custom_img_list, list) and bool(custom_img_list) + + if 'image_sizes' not in meta: + meta['image_sizes'] = {} + + if using_custom_img_list: image_glob = custom_img_list existing_images = [] + existing_count = 0 else: image_glob = glob.glob("*.png") if 'POSTER.png' in image_glob: image_glob.remove('POSTER.png') - existing_images = meta.get('image_list', []) - if len(existing_images) < total_screens: - if img_host == 'imgbox': - nest_asyncio.apply() - console.print("[green]Uploading Screens to Imgbox...") - image_list = asyncio.run(self.imgbox_upload(f"{meta['base_dir']}/tmp/{meta['uuid']}", image_glob)) - if image_list == []: - if img_host_num == 0: - img_host_num = 1 - console.print("[yellow]Imgbox failed, trying next image host") - image_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - else: - with Progress( - TextColumn("[bold green]Uploading Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total = len(image_glob[-screens:])) - timeout=60 - for image in image_glob[-screens:]: - if img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - try: - response = requests.post(url, data = data,timeout=timeout) - response = response.json() - if response.get('success') != True: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] - web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]imgbb failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "freeimage.host": - progress.console.print("[red]Support for freeimage.host has been removed. Please remove from your config") - progress.console.print("continuing in 15 seconds") - time.sleep(15) - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i, img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - try: - response = requests.post(url, data=data, files=files,timeout=timeout) - if response.status_code != 200: - progress.console.print(response) - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - except Exception: - progress.console.print("[yellow]pixhost failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "ptpimg": - payload = { - 'format' : 'json', - 'api_key' : self.config['DEFAULT']['ptpimg_api'] # API key is obtained from inspecting element on the upload page. - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = { 'referer': 'https://ptpimg.me/index.php'} - url = "https://ptpimg.me/upload.php" - - # tasks.append(asyncio.ensure_future(self.upload_image(session, url, data, headers, files=None))) - try: - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - web_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - except: - progress.console.print("[yellow]ptpimg failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i, img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - try: - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] - web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]lensdump failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - else: - console.print("[bold red]Please choose a supported image host in your config") - exit() + image_glob = list(set(image_glob)) + if meta['debug']: + console.print("image globs:", image_glob) + existing_images = [img for img in meta['image_list'] if img.get('img_url') and img.get('web_url')] + existing_count = len(existing_images) - - if len(newhost_list) >=1: - image_list.extend(newhost_list) - else: - image_dict = {} - image_dict['web_url'] = web_url - image_dict['img_url'] = img_url - image_dict['raw_url'] = raw_url - image_list.append(image_dict) - # cli_ui.info_count(i, total_screens, "Uploaded") - progress.advance(upload_task) - i += 1 - time.sleep(0.5) - if i >= total_screens: - break - return_dict['image_list'] = image_list - return image_list, i + if not retry_mode: + images_needed = max(0, total_screens - existing_count) else: - return meta.get('image_list', []), total_screens - - async def imgbox_upload(self, chdir, image_glob): - os.chdir(chdir) - image_list = [] - # image_glob = glob.glob("*.png") - async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: - async for submission in gallery.add(image_glob): - if not submission['success']: - console.print(f"[red]There was an error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") - return [] + images_needed = total_screens + + if existing_count >= total_screens and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + console.print(f"[yellow]Skipping upload because enough images are already uploaded to {img_host}. Existing images: {existing_count}, Required: {total_screens}") + return meta['image_list'], total_screens + + upload_tasks = [(image, img_host, self.config, meta) for image in image_glob[:images_needed]] + + host_limits = { + "oeimg": 6, + "ptscreens": 1, + "lensdump": 1, + } + default_pool_size = int(meta.get('task_limit', os.cpu_count())) + pool_size = host_limits.get(img_host, default_pool_size) + + try: + with get_context("spawn").Pool(processes=max(1, min(len(upload_tasks), pool_size))) as pool: + if use_tqdm(): + try: + results = list( + tqdm( + pool.imap_unordered(self.upload_image_task, upload_tasks), + total=len(upload_tasks), + desc=f"Uploading Images to {img_host}", + ascii=True, + dynamic_ncols=False + ) + ) + finally: + pool.close() + pool.join() else: - image_dict = {} - image_dict['web_url'] = submission['web_url'] - image_dict['img_url'] = submission['thumbnail_url'] - image_dict['raw_url'] = submission['image_url'] - image_list.append(image_dict) - return image_list + console.print(f"[blue]Non-TTY environment detected. Progress bar disabled. Uploading images to {img_host}.") + results = [] + for i, result in enumerate(pool.imap_unordered(self.upload_image_task, upload_tasks), 1): + results.append(result) + console.print(f"Uploaded {i}/{len(upload_tasks)} images to {img_host}") + except KeyboardInterrupt: + console.print("[red]Upload process interrupted by user. Exiting...") + pool.terminate() + pool.join() + return meta['image_list'], len(meta['image_list']) + + successfully_uploaded = [] + for result in results: + if result['status'] == 'success': + successfully_uploaded.append(result) + else: + console.print(f"[yellow]Failed to upload: {result.get('reason', 'Unknown error')}") + + if len(successfully_uploaded) < meta.get('cutoff') and not retry_mode and img_host == initial_img_host and not using_custom_img_list: + img_host_num += 1 + if f'img_host_{img_host_num}' in self.config['DEFAULT']: + meta['imghost'] = self.config['DEFAULT'][f'img_host_{img_host_num}'] + console.print(f"[cyan]Switching to the next image host: {meta['imghost']}") + return self.upload_screens(meta, screens, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + else: + console.print("[red]No more image hosts available. Aborting upload process.") + return meta['image_list'], len(meta['image_list']) + + new_images = [] + for upload in successfully_uploaded: + raw_url = upload['raw_url'] + new_image = { + 'img_url': upload['img_url'], + 'raw_url': raw_url, + 'web_url': upload['web_url'] + } + new_images.append(new_image) + if not using_custom_img_list and raw_url not in {img['raw_url'] for img in meta['image_list']}: + if meta['debug']: + console.print(f"[blue]Adding {raw_url} to image_list") + meta['image_list'].append(new_image) + local_file_path = upload.get('local_file_path') + if local_file_path: + image_size = os.path.getsize(local_file_path) + meta['image_sizes'][raw_url] = image_size + + console.print(f"[green]Successfully uploaded {len(new_images)} images.") + if meta['debug']: + upload_finish_time = time.time() + print(f"Screenshot uploads processed in {upload_finish_time - upload_start_time:.4f} seconds") + + if using_custom_img_list: + return new_images, len(new_images) + return meta['image_list'], len(successfully_uploaded) + async def imgbox_upload(self, chdir, image_glob, meta, return_dict): + try: + os.chdir(chdir) + image_list = [] + async with pyimgbox.Gallery(thumb_width=350, square_thumbs=False) as gallery: + for image in image_glob: + try: + async for submission in gallery.add([image]): + if not submission['success']: + console.print(f"[red]Error uploading to imgbox: [yellow]{submission['error']}[/yellow][/red]") + else: + web_url = submission.get('web_url') + img_url = submission.get('thumbnail_url') + raw_url = submission.get('image_url') + if web_url and img_url and raw_url: + image_dict = { + 'web_url': web_url, + 'img_url': img_url, + 'raw_url': raw_url + } + image_list.append(image_dict) + else: + console.print(f"[red]Incomplete URLs received for image: {image}") + except Exception as e: + console.print(f"[red]Error during upload for {image}: {str(e)}") + return_dict['image_list'] = image_list + return image_list + except Exception as e: + console.print(f"[red]An error occurred while uploading images to imgbox: {str(e)}") + return [] async def get_name(self, meta): - type = meta.get('type', "") - title = meta.get('title',"") + type = meta.get('type', "").upper() + title = meta.get('title', "") alt_title = meta.get('aka', "") year = meta.get('year', "") + if meta.get('manual_year') > 0: + year = meta.get('manual_year') resolution = meta.get('resolution', "") if resolution == "OTHER": resolution = "" @@ -2236,8 +3722,11 @@ async def get_name(self, meta): source = meta.get('source', "") uhd = meta.get('uhd', "") hdr = meta.get('hdr', "") - episode_title = meta.get('episode_title', '') - if meta.get('is_disc', "") == "BDMV": #Disk + if meta.get('manual_episode_title'): + episode_title = meta.get('manual_episode_title') + else: + episode_title = meta.get('episode_title', '') + if meta.get('is_disc', "") == "BDMV": # Disk video_codec = meta.get('video_codec', "") region = meta.get('region', "") elif meta.get('is_disc', "") == "DVD": @@ -2253,51 +3742,58 @@ async def get_name(self, meta): year = meta['year'] else: year = "" - if meta.get('no_season', False) == True: + if meta.get('manual_date'): + # Ignore season and year for --daily flagged shows, just use manual date stored in episode_name + season = '' + episode = '' + if meta.get('no_season', False) is True: season = '' - if meta.get('no_year', False) == True: + if meta.get('no_year', False) is True: year = '' - if meta.get('no_aka', False) == True: + if meta.get('no_aka', False) is True: alt_title = '' if meta['debug']: console.log("[cyan]get_name cat/type") console.log(f"CATEGORY: {meta['category']}") console.log(f"TYPE: {meta['type']}") console.log("[cyan]get_name meta:") - console.log(meta) + # console.log(meta) - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif meta['is_disc'] == 'DVD': + elif meta['is_disc'] == 'DVD': name = f"{title} {alt_title} {year} {edition} {repack} {source} {dvd_size} {audio}" potential_missing = ['edition', 'distributor'] elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay/HDDVD Remux - name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay/HDDVD Remux + name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): #DVD Remux - name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" + elif type == "ENCODE": # Encode + name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif type == "DVDRIP": + name = f"{title} {alt_title} {year} {source} {video_encode} DVDRip {audio}" + potential_missing = [] + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {year} {alt_title} {season}{episode} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] @@ -2307,33 +3803,36 @@ async def get_name(self, meta): elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" #SOURCE + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" #SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" #SOURCE + elif type == "ENCODE": # Encode + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] + elif type == "DVDRIP": + name = f"{title} {alt_title} {season} {source} DVDRip {video_encode}" + potential_missing = [] - - try: + try: name = ' '.join(name.split()) - except: + except Exception: console.print("[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") console.print(f"--category [yellow]{meta['category']}") console.print(f"--type [yellow]{meta['type']}") console.print(f"--source [yellow]{meta['source']}") + console.print("[bold green]If you specified type, try also specifying source") exit() name_notag = name @@ -2341,52 +3840,57 @@ async def get_name(self, meta): clean_name = self.clean_filename(name) return name_notag, name, clean_name, potential_missing - - - async def get_season_episode(self, video, meta): if meta['category'] == 'TV': filelist = meta['filelist'] meta['tv_pack'] = 0 is_daily = False - if meta['anime'] == False: + if meta['anime'] is False: try: - if meta.get('manual_date'): - raise ManualDateException - try: - guess_year = guessit(video)['year'] - except Exception: - guess_year = "" - if guessit(video)["season"] == guess_year: - if f"s{guessit(video)['season']}" in video.lower(): - season_int = str(guessit(video)["season"]) - season = "S" + season_int.zfill(2) - else: - season_int = "1" - season = "S01" + daily_match = re.search(r"\d{4}[-\.]\d{2}[-\.]\d{2}", video) + if meta.get('manual_date') or daily_match: + # Handle daily episodes + # The user either provided the --daily argument or a date was found in the filename + + if meta.get('manual_date') is None and daily_match is not None: + meta['manual_date'] = daily_match.group().replace('.', '-') + is_daily = True + guess_date = meta.get('manual_date', guessit(video).get('date')) if meta.get('manual_date') else guessit(video).get('date') + season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) + + season = f"S{str(season_int).zfill(2)}" + episode = f"E{str(episode_int).zfill(2)}" + # For daily shows, pass the supplied date as the episode title + # Season and episode will be stripped later to conform with standard daily episode naming format + meta['episode_title'] = meta.get('manual_date') + else: - season_int = str(guessit(video)["season"]) - season = "S" + season_int.zfill(2) + try: + guess_year = guessit(video)['year'] + except Exception: + guess_year = "" + if guessit(video)["season"] == guess_year: + if f"s{guessit(video)['season']}" in video.lower(): + season_int = str(guessit(video)["season"]) + season = "S" + season_int.zfill(2) + else: + season_int = "1" + season = "S01" + else: + season_int = str(guessit(video)["season"]) + season = "S" + season_int.zfill(2) except Exception: - try: - guess_date = meta.get('manual_date', guessit(video)['date']) if meta.get('manual_date') else guessit(video)['date'] - season_int, episode_int = self.daily_to_tmdb_season_episode(meta.get('tmdb'), guess_date) - # season = f"S{season_int.zfill(2)}" - # episode = f"E{episode_int.zfill(2)}" - season = str(guess_date) - episode = "" - is_daily = True - except Exception: - console.print_exception() - season_int = "1" - season = "S01" + console.print_exception() + season_int = "1" + season = "S01" + try: - if is_daily != True: + if is_daily is not True: episodes = "" if len(filelist) == 1: episodes = guessit(video)['episode'] - if type(episodes) == list: + if isinstance(episodes, list): episode = "" for item in guessit(video)["episode"]: ep = (str(item).zfill(2)) @@ -2403,26 +3907,22 @@ async def get_season_episode(self, video, meta): episode = "" episode_int = "0" meta['tv_pack'] = 1 + else: - #If Anime + # If Anime parsed = anitopy.parse(Path(video).name) - # romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title']) romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) if mal_id: meta['mal_id'] = mal_id - if meta.get('tmdb_manual', None) == None: + if meta.get('mal') is not None: + mal_id = meta.get('mal') + if meta.get('tmdb_manual', None) is None: year = parsed.get('anime_year', str(seasonYear)) - meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title'], year, meta, meta['category']) + meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) meta = await self.tmdb_other_meta(meta) if meta['category'] != "TV": return meta - # meta['title'] = eng_title - # difference = SequenceMatcher(None, eng_title, romaji.lower()).ratio() - # if difference >= 0.8: - # meta['aka'] = "" - # else: - # meta['aka'] = f" AKA {romaji}" tag = parsed.get('release_group', "") if tag != "": meta['tag'] = f"-{tag}" @@ -2431,56 +3931,51 @@ async def get_season_episode(self, video, meta): episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) if not isinstance(episodes, list) and not episodes.isnumeric(): episodes = guessit(video)['episode'] - if type(episodes) == list: - episode = "" - for item in episodes: - ep = (str(item).zfill(2)) - episode += f"E{ep}" - episode_int = episodes[0] + if isinstance(episodes, list): + episode_int = int(episodes[0]) # Always convert to integer + episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) else: - episode_int = str(int(episodes)) - episode = f"E{str(int(episodes)).zfill(2)}" + episode_int = int(episodes) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" except Exception: episode = "E01" - episode_int = "1" + episode_int = 1 # Ensure it's an integer console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') await asyncio.sleep(1.5) else: episode = "" - episode_int = "0" + episode_int = 0 # Ensure it's an integer meta['tv_pack'] = 1 - + try: if meta.get('season_int'): - season = meta.get('season_int') + season_int = int(meta.get('season_int')) # Convert to integer else: - season = parsed.get('anime_season', guessit(video)['season']) - season_int = season - season = f"S{season.zfill(2)}" + season = parsed.get('anime_season', guessit(video).get('season', '1')) + season_int = int(season) # Convert to integer + season = f"S{str(season_int).zfill(2)}" except Exception: try: - if int(episode_int) >= anilist_episodes: + if episode_int >= anilist_episodes: params = { - 'id' : str(meta['tvdb_id']), - 'origin' : 'tvdb', - 'absolute' : str(episode_int), - # 'destination' : 'tvdb' + 'id': str(meta['tvdb_id']), + 'origin': 'tvdb', + 'absolute': str(episode_int), } url = "https://thexem.info/map/single" response = requests.post(url, params=params).json() if response['result'] == "failure": - raise XEMNotFound + raise XEMNotFound # noqa: F405 if meta['debug']: console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") - season_int = str(response['data']['scene']['season']) - season = f"S{str(response['data']['scene']['season']).zfill(2)}" + season_int = int(response['data']['scene']['season']) # Convert to integer + season = f"S{str(season_int).zfill(2)}" if len(filelist) == 1: - episode_int = str(response['data']['scene']['episode']) - episode = f"E{str(response['data']['scene']['episode']).zfill(2)}" + episode_int = int(response['data']['scene']['episode']) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" else: - #Get season from xem name map + season_int = 1 # Default to 1 if error occurs season = "S01" - season_int = "1" names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" names_response = requests.get(names_url).json() if meta['debug']: @@ -2491,79 +3986,63 @@ async def get_season_episode(self, video, meta): for lang, names in values.items(): if lang == "jp": for name in names: - romaji_check = re.sub("[^0-9a-zA-Z\[\]]+", "", romaji.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() - if romaji_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if romaji_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff if lang == "us": for name in names: - eng_check = re.sub("[^0-9a-zA-Z\[\]]+", "", eng_title.lower().replace(' ', '')) - name_check = re.sub("[^0-9a-zA-Z\[\]]+", "", name.lower().replace(' ', '')) + eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) + name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() - if eng_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if eng_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff else: - raise XEMNotFound + raise XEMNotFound # noqa: F405 except Exception: if meta['debug']: console.print_exception() try: - season = guessit(video)['season'] - season_int = season + season = guessit(video).get('season', '1') + season_int = int(season) # Convert to integer except Exception: - season_int = "1" + season_int = 1 # Default to 1 if error occurs season = "S01" console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") await asyncio.sleep(3) - # try: - # version = parsed['release_version'] - # if int(version) == 2: - # meta['repack'] = "REPACK" - # elif int(version) > 2: - # meta['repack'] = f"REPACK{int(version) - 1}" - # # version = f"v{version}" - # except Exception: - # # version = "" - # pass - - if meta.get('manual_season', None) == None: + + if meta.get('manual_season', None) is None: meta['season'] = season else: season_int = meta['manual_season'].lower().replace('s', '') meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" - if meta.get('manual_episode', None) == None: + if meta.get('manual_episode', None) is None: meta['episode'] = episode else: episode_int = meta['manual_episode'].lower().replace('e', '') meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" meta['tv_pack'] = 0 - + # if " COMPLETE " in Path(video).name.replace('.', ' '): # meta['season'] = "COMPLETE" meta['season_int'] = season_int meta['episode_int'] = episode_int - - meta['episode_title_storage'] = guessit(video,{"excludes" : "part"}).get('episode_title', '') + # Manual episode title + if 'manual_episode_title' in meta and meta['manual_episode_title'] == "": + meta['episode_title_storage'] = meta.get('manual_episode_title') + else: + meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') + if meta['season'] == "S00" or meta['episode'] == "E00": meta['episode_title'] = meta['episode_title_storage'] - + # Guess the part of the episode (if available) meta['part'] = "" if meta['tv_pack'] == 1: @@ -2572,62 +4051,68 @@ async def get_season_episode(self, video, meta): return meta - - def get_service(self, video, tag, audio, guess_title): - service = guessit(video).get('streaming_service', "") + def get_service(self, video=None, tag=None, audio=None, guess_title=None, get_services_only=False): services = { - '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', - 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', - 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', - 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', - 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', - 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', - 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', - 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', - 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', - 'CRIT': 'CRIT', 'Criterion' : 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', - 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', - 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', - 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', - 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', - 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', - 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', - 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', - 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', - 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi' : 'FUNI', - 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', - 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', - 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR' : 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', - 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', - 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy' : 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', - 'MA' : 'MA', 'Movies Anywhere' : 'MA', 'MAX' : 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', - 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', - 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', - 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', - 'PMNT': 'PMNT', 'PMTP' : 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', - 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST' : 'SKST', 'SkyShowtime': 'SKST', - 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', - 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+' : 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', - 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', - 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', - 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', - 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', - 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', + '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', + 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', + 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', + 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', + 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', + 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', + 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', + 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', + 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', + 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'CNGO': 'CNGO', 'Cinego': 'CNGO', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', + 'Crunchy Roll': 'CR', 'Crave': 'CRAV', 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', + 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', + 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', 'DC Universe': 'DCU', 'DDY': 'DDY', + 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', 'Deadhouse Films': 'DHF', + 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', 'Doc Club': 'DOCC', + 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', 'Daisuki': 'DSKI', + 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', 'EPIX': 'EPIX', 'ePix': 'EPIX', + 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', 'ETV': 'ETV', 'E!': 'ETV', + 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', 'Family Jr': 'FJR', 'FMIO': 'FMIO', + 'Filmio': 'FMIO', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', + 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', + 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', + 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', + 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', + 'hoichoi': 'HoiChoi', 'ID': 'ID', 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', + 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', 'JOYN': 'JOYN', 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', + 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', 'MA': 'MA', 'Movies Anywhere': 'MA', + 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', + 'MUBI': 'MUBI', 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', + 'Netflix': 'NF', 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', + 'Nickelodeon': 'NICK', 'NOW': 'NOW', 'NRK': 'NRK', 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', + 'ORF': 'ORF', 'ORF ON': 'ORF', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', + 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', 'PMNT': 'PMNT', + 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RNET': 'RNET', + 'OBB Railnet': 'RNET', 'RSTR': 'RSTR', 'RTE': 'RTE', 'RTE One': 'RTE', 'RTLP': 'RTLP', 'RTL+': 'RTLP', 'RUUTU': 'RUUTU', + 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', + 'SkyShowtime': 'SKST', 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', + 'Spike': 'SPIK', 'Spike TV': 'SPKE', 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', + 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', 'Sveriges Television': 'SVT', 'SWER': 'SWER', + 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', 'TIMV': 'TIMV', 'TIMvision': 'TIMV', + 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', + 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', + 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', + 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', + 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', + 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' - } - - - video_name = re.sub("[.()]", " ", video.replace(tag, '').replace(guess_title, '')) + } + + if get_services_only: + return services + service = guessit(video).get('streaming_service', "") + + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): - if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes" : ["country", "language"]}).get('title', ''): + if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): service = value elif key == service: service = value @@ -2639,10 +4124,8 @@ def get_service(self, video, tag, audio, guess_title): service_longname = "Amazon" return service, service_longname - - def stream_optimized(self, stream_opt): - if stream_opt == True: + if stream_opt is True: stream = 1 else: stream = 0 @@ -2653,22 +4136,22 @@ def is_anon(self, anon_in): if anon.lower() == "true": console.print("[bold red]Global ANON has been removed in favor of per-tracker settings. Please update your config accordingly.") time.sleep(10) - if anon_in == True: + if anon_in is True: anon_out = 1 else: anon_out = 0 return anon_out async def upload_image(self, session, url, data, headers, files): - if headers == None and files == None: + if headers is None and files is None: async with session.post(url=url, data=data) as resp: response = await resp.json() return response - elif headers == None and files != None: + elif headers is None and files is not None: async with session.post(url=url, data=data, files=files) as resp: response = await resp.json() return response - elif headers != None and files == None: + elif headers is not None and files is None: async with session.post(url=url, data=data, headers=headers) as resp: response = await resp.json() return response @@ -2676,90 +4159,124 @@ async def upload_image(self, session, url, data, headers, files): async with session.post(url=url, data=data, headers=headers, files=files) as resp: response = await resp.json() return response - - + def clean_filename(self, name): - invalid = '<>:"/\|?*' + invalid = '<>:"/\\|?*' for char in invalid: name = name.replace(char, '-') return name - async def gen_desc(self, meta): - desclink = meta.get('desclink', None) - descfile = meta.get('descfile', None) - ptp_desc = blu_desc = "" - desc_source = [] + def clean_text(text): + return text.replace('\r\n', '').replace('\n', '').strip() + + desclink = meta.get('desclink') + descfile = meta.get('descfile') + scene_nfo = False + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) - if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp_manual') != None: - desc_source.append('PTP') - if meta.get('blu_manual') != None: - desc_source.append('BLU') - if len(desc_source) != 1: - desc_source = None - else: - desc_source = desc_source[0] - - if meta.get('ptp', None) != None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: - ptp = PTP(config=self.config) - ptp_desc = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) - if ptp_desc.replace('\r\n', '').replace('\n', '').strip() != "": - description.write(ptp_desc) - description.write("\n") - meta['description'] = 'PTP' - - if ptp_desc == "" and meta.get('blu_desc', '').rstrip() not in [None, ''] and desc_source in ['BLU', None]: - if meta.get('blu_desc', '').strip().replace('\r\n', '').replace('\n', '') != '': - description.write(meta['blu_desc']) - meta['description'] = 'BLU' - - if meta.get('desc_template', None) != None: + content_written = False + + if meta.get('desc_template'): from jinja2 import Template - with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: - desc_templater = Template(f.read()) - template_desc = desc_templater.render(meta) - if template_desc.strip() != "": - description.write(template_desc) - description.write("\n") - - if meta['nfo'] != False: - description.write("[code]") - nfo = glob.glob("*.nfo")[0] - description.write(open(nfo, 'r', encoding="utf-8").read()) - description.write("[/code]") - description.write("\n") - meta['description'] = "CUSTOM" - if desclink != None: - parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) - split = os.path.split(parsed.path) - if split[0] != '/': - raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}") + try: + with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: + template = Template(f.read()) + template_desc = template.render(meta) + if clean_text(template_desc): + description.write(template_desc + "\n") + content_written = True + except FileNotFoundError: + console.print(f"[ERROR] Template '{meta['desc_template']}' not found.") + + base_dir = meta['base_dir'] + uuid = meta['uuid'] + current_dir_path = "*.nfo" + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + if meta['debug']: + console.print(f"specified_dir_path: {specified_dir_path}") + if meta.get('nfo') and not content_written: + if meta['auto_nfo'] is True: + nfo_files = glob.glob(specified_dir_path) + scene_nfo = True else: - raw = parsed._replace(path=f"/raw{parsed.path}") - raw = urllib.parse.urlunparse(raw) - description.write(requests.get(raw).text) - description.write("\n") - meta['description'] = "CUSTOM" - - if descfile != None: - if os.path.isfile(descfile) == True: - text = open(descfile, 'r').read() - description.write(text) - meta['description'] = "CUSTOM" - if meta['desc'] != None: - description.write(meta['desc']) - description.write("\n") + nfo_files = glob.glob(current_dir_path) + if meta['debug']: + console.print(f"Glob current_dir_path matches: {glob.glob(current_dir_path)}") + console.print(f"Glob specified_dir_path matches: {glob.glob(specified_dir_path)}") + if not nfo_files: + console.print("NFO was set but no nfo file was found") + description.write("\n") + return meta + + if nfo_files: + nfo = nfo_files[0] + try: + with open(nfo, 'r', encoding="utf-8") as nfo_file: + nfo_content = nfo_file.read() + if meta['debug']: + console.print("NFO content read with utf-8 encoding.") + except UnicodeDecodeError: + if meta['debug']: + console.print("utf-8 decoding failed, trying latin1.") + with open(nfo, 'r', encoding="latin1") as nfo_file: + nfo_content = nfo_file.read() + + if scene_nfo is True: + description.write(f"[center][spoiler=Scene NFO:][code]{nfo_content}[/code][/spoiler][/center]\n") + else: + description.write(f"[code]{nfo_content}[/code]\n") + meta['description'] = "CUSTOM" + content_written = True + + if desclink and not content_written: + try: + parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) + split = os.path.split(parsed.path) + raw = parsed._replace(path=f"{split[0]}/raw/{split[1]}" if split[0] != '/' else f"/raw{parsed.path}") + raw_url = urllib.parse.urlunparse(raw) + desclink_content = requests.get(raw_url).text + if clean_text(desclink_content): + description.write(desclink_content + "\n") + meta['description'] = "CUSTOM" + content_written = True + except Exception as e: + console.print(f"[ERROR] Failed to fetch description from link: {e}") + + if descfile and os.path.isfile(descfile) and not content_written: + with open(descfile, 'r') as f: + file_content = f.read() + if clean_text(file_content): + description.write(file_content) + meta['description'] = "CUSTOM" + content_written = True + + if meta.get('desc') and not content_written: + description.write(meta['desc'] + "\n") meta['description'] = "CUSTOM" + content_written = True + + if not content_written: + description_text = meta.get('description', '') or '' + description.write(description_text + "\n") + description.write("\n") - return meta - + return meta + + # Fallback if no description is provided + if not meta.get('skip_gen_desc', False): + description_text = meta['description'] if meta['description'] else "" + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: + description.write(description_text + "\n") + + return meta + async def tag_override(self, meta): with open(f"{meta['base_dir']}/data/tags.json", 'r', encoding="utf-8") as f: tags = json.load(f) f.close() - + for tag in tags: value = tags.get(tag) if value.get('in_name', "") == tag and tag in meta['path']: @@ -2772,13 +4289,12 @@ async def tag_override(self, meta): else: pass elif key == 'personalrelease': - meta[key] = bool(distutils.util.strtobool(str(value.get(key, 'False')))) + meta[key] = bool(str2bool(str(value.get(key, 'False')))) elif key == 'template': meta['desc_template'] = value.get(key) else: meta[key] = value.get(key) return meta - async def package(self, meta): if meta['tag'] == "": @@ -2800,9 +4316,11 @@ async def package(self, meta): generic.write(f"IMDb: https://www.imdb.com/title/tt{meta['imdb_id']}\n") if meta['tvdb_id'] != "0": generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") + if "tvmaze_id" in meta and meta['tvmaze_id'] != "0": + generic.write(f"TVMaze: https://www.tvmaze.com/shows/{meta['tvmaze_id']}\n") poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): - if meta.get('rehosted_poster', None) == None: + if meta.get('rehosted_poster', None) is None: r = requests.get(meta['poster'], stream=True) if r.status_code == 200: console.print("[bold yellow]Rehosting Poster") @@ -2813,23 +4331,23 @@ async def package(self, meta): poster = poster[0] generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: json.dump(meta, metafile, indent=4) metafile.close() else: console.print("[bold yellow]Poster could not be retrieved") - elif os.path.exists(poster_img) and meta.get('rehosted_poster') != None: + elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") if len(meta['image_list']) > 0: - generic.write(f"\nImage Webpage:\n") + generic.write("\nImage Webpage:\n") for each in meta['image_list']: generic.write(f"{each['web_url']}\n") - generic.write(f"\nThumbnail Image:\n") + generic.write("\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") - title = re.sub("[^0-9a-zA-Z\[\]]+", "", meta['title']) + title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" - torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") + torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: for each in torrent_files: if not each.startswith(('BASE', '[RAND')): @@ -2837,17 +4355,17 @@ async def package(self, meta): try: if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): base_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") - manual_name = re.sub("[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) + manual_name = re.sub(r"[^0-9a-zA-Z\[\]\'\-]+", ".", os.path.basename(meta['path'])) Torrent.copy(base_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/{manual_name}.torrent", overwrite=True) # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") - if filebrowser != None: + if filebrowser is not None: url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) url = urllib.parse.quote(url, safe="https://") else: files = { - "files[]" : (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) + "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) } response = requests.post("https://uguu.se/upload.php", files=files).json() if meta['debug']: @@ -2856,14 +4374,69 @@ async def package(self, meta): return url except Exception: return False - return + return + + async def get_imdb_aka_api(self, imdb_id, meta): + if imdb_id == "0": + return "", None + if not imdb_id.startswith("tt"): + imdb_id = f"tt{imdb_id}" + url = "https://api.graphql.imdb.com/" + query = { + "query": f""" + query {{ + title(id: "{imdb_id}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + countriesOfOrigin {{ + countries {{ + id + }} + }} + }} + }} + """ + } + + headers = { + "Content-Type": "application/json", + } + + response = requests.post(url, headers=headers, json=query) + data = response.json() + + # Check if `data` and `title` exist + title_data = data.get("data", {}).get("title") + if title_data is None: + console.print("Title data is missing from response") + return "", None + + # Extract relevant fields from the response + aka = title_data.get("originalTitleText", {}).get("text", "") + is_original = title_data.get("titleText", {}).get("isOriginalTitle", False) + if meta.get('manual_language'): + original_language = meta.get('manual_language') + else: + original_language = None + + if not is_original and aka: + aka = f" AKA {aka}" + + return aka, original_language async def get_imdb_aka(self, imdb_id): if imdb_id == "0": return "", None + if not imdb_id.startswith("tt"): + imdb_id = f"tt{imdb_id}" ia = Cinemagoer() result = ia.get_movie(imdb_id.replace('tt', '')) - original_language = result.get('language codes') if isinstance(original_language, list): if len(original_language) > 1: @@ -2875,7 +4448,7 @@ async def get_imdb_aka(self, imdb_id): aka = f" AKA {aka}" return aka, original_language - async def get_dvd_size(self, discs): + async def get_dvd_size(self, discs, manual_dvds): sizes = [] dvd_sizes = [] for each in discs: @@ -2888,8 +4461,11 @@ async def get_dvd_size(self, discs): dvd_sizes.append(each[0]) dvd_sizes.sort() compact = " ".join(dvd_sizes) + + if manual_dvds: + compact = str(manual_dvds) + return compact - def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): if not is_disc: @@ -2898,7 +4474,7 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd for each in extra: if each.lower().startswith('tmdb'): parser = Args(config=self.config) - category, tmdbid = parser.parse_tmdb_id(id = extra[each], category=category) + category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) if each.lower().startswith('imdb'): try: imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) @@ -2906,28 +4482,162 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd pass return category, tmdbid, imdbid - def daily_to_tmdb_season_episode(self, tmdbid, date): show = tmdb.TV(tmdbid) seasons = show.info().get('seasons') - season = '1' - episode = '1' + season = 1 + episode = 1 date = datetime.fromisoformat(str(date)) for each in seasons: air_date = datetime.fromisoformat(each['air_date']) if air_date <= date: - season = str(each['season_number']) + season = int(each['season_number']) season_info = tmdb.TV_Seasons(tmdbid, season).info().get('episodes') for each in season_info: - if str(each['air_date']) == str(date): - episode = str(each['episode_number']) + if str(each['air_date']) == str(date.date()): + episode = int(each['episode_number']) break else: console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") return season, episode + def safe_get(self, data, path, default=None): + for key in path: + if isinstance(data, dict): + data = data.get(key, default) + else: + return default + return data + + async def get_imdb_info_api(self, imdbID, meta): + imdb_info = { + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), + } + if len(meta.get('tmdb_directors', [])) >= 1: + imdb_info['directors'] = meta['tmdb_directors'] + + if imdbID == "0": + return imdb_info + else: + try: + if not imdbID.startswith("tt"): + imdbIDtt = f"tt{imdbID}" + else: + imdbIDtt = imdbID + except Exception: + return imdb_info + query = { + "query": f""" + query GetTitleInfo {{ + title(id: "{imdbIDtt}") {{ + id + titleText {{ + text + isOriginalTitle + }} + originalTitleText {{ + text + }} + releaseYear {{ + year + }} + titleType {{ + id + }} + plot {{ + plotText {{ + plainText + }} + }} + ratingsSummary {{ + aggregateRating + voteCount + }} + primaryImage {{ + url + }} + runtime {{ + displayableProperty {{ + value {{ + plainText + }} + }} + seconds + }} + titleGenres {{ + genres {{ + genre {{ + text + }} + }} + }} + principalCredits {{ + category {{ + text + id + }} + credits {{ + name {{ + id + nameText {{ + text + }} + }} + }} + }} + }} + }} + """ + } + url = "https://api.graphql.imdb.com/" + headers = {"Content-Type": "application/json"} + + response = requests.post(url, json=query, headers=headers) + data = response.json() + + if response.status_code != 200: + return imdb_info + + title_data = self.safe_get(data, ["data", "title"], {}) + if not data or "data" not in data or "title" not in data["data"]: + return imdb_info + + imdb_info['imdbID'] = imdbID + imdb_info['title'] = self.safe_get(title_data, ['titleText', 'text'], meta['title']) + imdb_info['year'] = self.safe_get(title_data, ['releaseYear', 'year'], meta['year']) + original_title = self.safe_get(title_data, ['originalTitleText', 'text'], '') + imdb_info['aka'] = original_title if original_title and original_title != imdb_info['title'] else imdb_info['title'] + imdb_info['type'] = self.safe_get(title_data, ['titleType', 'id'], None) + runtime_seconds = self.safe_get(title_data, ['runtime', 'seconds'], 0) + imdb_info['runtime'] = str(runtime_seconds // 60 if runtime_seconds else 60) + imdb_info['cover'] = self.safe_get(title_data, ['primaryImage', 'url'], meta.get('poster', '')) + imdb_info['plot'] = self.safe_get(title_data, ['plot', 'plotText', 'plainText'], 'No plot available') + genres = self.safe_get(title_data, ['titleGenres', 'genres'], []) + genre_list = [self.safe_get(g, ['genre', 'text'], '') for g in genres] + imdb_info['genres'] = ', '.join(filter(None, genre_list)) + imdb_info['rating'] = self.safe_get(title_data, ['ratingsSummary', 'aggregateRating'], 'N/A') + imdb_info['directors'] = [] + principal_credits = self.safe_get(title_data, ['principalCredits'], []) + if isinstance(principal_credits, list): + for pc in principal_credits: + category_text = self.safe_get(pc, ['category', 'text'], '') + if 'Direct' in category_text: + credits = self.safe_get(pc, ['credits'], []) + for c in credits: + name_id = self.safe_get(c, ['name', 'id'], '') + if name_id.startswith('nm'): + imdb_info['directors'].append(name_id) + break + if meta.get('manual_language'): + imdb_info['original_langauge'] = meta.get('manual_language') + return imdb_info async def get_imdb_info(self, imdbID, meta): imdb_info = {} @@ -2943,6 +4653,7 @@ async def get_imdb_info(self, imdbID, meta): imdb_info['cover'] = info.get('full-size cover url', '').replace(".jpg", "._V1_FMjpg_UX750_.jpg") imdb_info['plot'] = info.get('plot', [''])[0] imdb_info['genres'] = ', '.join(info.get('genres', '')) + imdb_info['rating'] = info.get('rating', 'N/A') imdb_info['original_language'] = info.get('language codes') if isinstance(imdb_info['original_language'], list): if len(imdb_info['original_language']) > 1: @@ -2957,18 +4668,16 @@ async def get_imdb_info(self, imdbID, meta): imdb_info['directors'].append(f"nm{director.getID()}") else: imdb_info = { - 'title' : meta['title'], - 'year' : meta['year'], - 'aka' : '', - 'type' : None, - 'runtime' : meta.get('runtime', '60'), - 'cover' : meta.get('poster'), + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] - return imdb_info - async def search_imdb(self, filename, search_year): imdbID = '0' @@ -2980,15 +4689,15 @@ async def search_imdb(self, filename, search_year): imdbID = str(movie.movieID).replace('tt', '') return imdbID - async def imdb_other_meta(self, meta): - imdb_info = meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) + imdb_info = meta['imdb_info'] = await self.get_imdb_info_api(meta['imdb_id'], meta) meta['title'] = imdb_info['title'] meta['year'] = imdb_info['year'] meta['aka'] = imdb_info['aka'] meta['poster'] = imdb_info['cover'] meta['original_language'] = imdb_info['original_language'] meta['overview'] = imdb_info['plot'] + meta['imdb_rating'] = imdb_info['rating'] difference = SequenceMatcher(None, meta['title'].lower(), meta['aka'][5:].lower()).ratio() if difference >= 0.9 or meta['aka'][5:].strip() == "" or meta['aka'][5:].strip().lower() in meta['title'].lower(): @@ -2997,54 +4706,115 @@ async def imdb_other_meta(self, meta): meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() return meta - async def search_tvmaze(self, filename, year, imdbID, tvdbID): - tvdbID = int(tvdbID) - tvmazeID = 0 - lookup = False - show = None - if imdbID == None: - imdbID = '0' - if tvdbID == None: + async def search_tvmaze(self, filename, year, imdbID, tvdbID, meta): + try: + tvdbID = int(tvdbID) if tvdbID is not None else 0 + except ValueError: + print(f"Error: tvdbID is not a valid integer. Received: {tvdbID}") tvdbID = 0 - if int(tvdbID) != 0: - params = { - "thetvdb" : tvdbID - } - url = "https://api.tvmaze.com/lookup/shows" - lookup = True - elif int(imdbID) != 0: - params = { - "imdb" : f"tt{imdbID}" - } - url = "https://api.tvmaze.com/lookup/shows" - lookup = True + + if meta.get('tvmaze_manual'): + tvmazeID = int(meta['tvmaze_manual']) + return tvmazeID, imdbID, tvdbID else: - params = { - "q" : filename - } - url = f"https://api.tvmaze.com/search/shows" - resp = requests.get(url=url, params=params) - if resp.ok: - resp = resp.json() - if resp == None: + tvmazeID = 0 + results = [] + + if imdbID is None: + imdbID = '0' + + if meta['manual_date'] is None: + if int(tvdbID) != 0: + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + else: + if int(imdbID) != 0: + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + else: + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) + else: + if int(tvdbID) != 0: + tvdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"thetvdb": tvdbID}, meta) + if tvdb_resp: + results.append(tvdb_resp) + if int(imdbID) != 0: + imdb_resp = self._make_tvmaze_request("https://api.tvmaze.com/lookup/shows", {"imdb": f"tt{imdbID}"}, meta) + if imdb_resp: + results.append(imdb_resp) + search_resp = self._make_tvmaze_request("https://api.tvmaze.com/search/shows", {"q": filename}, meta) + if search_resp: + if isinstance(search_resp, list): + results.extend([each['show'] for each in search_resp if 'show' in each]) + else: + results.append(search_resp) + + if year not in (None, ''): + results = [show for show in results if str(show.get('premiered', '')).startswith(str(year))] + + seen = set() + unique_results = [] + for show in results: + if show['id'] not in seen: + seen.add(show['id']) + unique_results.append(show) + results = unique_results + + if not results: + if meta['debug']: + print("No results found.") return tvmazeID, imdbID, tvdbID - if lookup == True: - show = resp + + if meta['manual_date'] is not None: + print("Search results:") + for idx, show in enumerate(results): + console.print(f"[bold red]{idx + 1}[/bold red]. [green]{show.get('name', 'Unknown')} (TVmaze ID:[/green] [bold red]{show['id']}[/bold red])") + console.print(f"[yellow] Premiered: {show.get('premiered', 'Unknown')}[/yellow]") + console.print(f" Externals: {json.dumps(show.get('externals', {}), indent=2)}") + + while True: + try: + choice = int(input(f"Enter the number of the correct show (1-{len(results)}) or 0 to skip: ")) + if choice == 0: + print("Skipping selection.") + break + if 1 <= choice <= len(results): + selected_show = results[choice - 1] + tvmazeID = selected_show['id'] + print(f"Selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + break + else: + print(f"Invalid choice. Please choose a number between 1 and {len(results)}, or 0 to skip.") + except ValueError: + print("Invalid input. Please enter a number.") + else: + selected_show = results[0] + tvmazeID = selected_show['id'] + if meta['debug']: + print(f"Automatically selected show: {selected_show.get('name')} (TVmaze ID: {tvmazeID})") + + if meta['debug']: + print(f"Returning results - TVmaze ID: {tvmazeID}, IMDb ID: {imdbID}, TVDB ID: {tvdbID}") + return tvmazeID, imdbID, tvdbID + + def _make_tvmaze_request(self, url, params, meta): + if meta['debug']: + print(f"Requesting TVmaze API: {url} with params: {params}") + try: + resp = requests.get(url, params=params) + if resp.ok: + return resp.json() else: - if year not in (None, ''): - for each in resp: - premier_date = each['show'].get('premiered', '') - if premier_date != None: - if premier_date.startswith(str(year)): - show = each['show'] - elif len(resp) >= 1: - show = resp[0]['show'] - if show != None: - tvmazeID = show.get('id') - if int(imdbID) == 0: - if show.get('externals', {}).get('imdb', '0') != None: - imdbID = str(show.get('externals', {}).get('imdb', '0')).replace('tt', '') - if int(tvdbID) == 0: - if show.get('externals', {}).get('tvdb', '0') != None: - tvdbID = show.get('externals', {}).get('tvdb', '0') - return tvmazeID, imdbID, tvdbID + if meta['debug']: + print(f"HTTP Request failed with status code: {resp.status_code}, response: {resp.text}") + return None + except Exception as e: + print(f"Error making TVmaze request: {e}") + return None diff --git a/src/search.py b/src/search.py index 8e782ee7e..d658e175c 100644 --- a/src/search.py +++ b/src/search.py @@ -1,8 +1,8 @@ import platform -import asyncio import os from src.console import console + class Search(): """ Logic for searching @@ -11,16 +11,16 @@ def __init__(self, config): self.config = config pass - async def searchFile(self, filename): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 filename = filename.lower() files_total = [] if filename == "": console.print("nothing entered") return - file_found = False + file_found = False # noqa F841 words = filename.split() + async def search_file(search_dir): files_total_search = [] console.print(f"Searching {search_dir}") @@ -30,11 +30,11 @@ async def search_file(search_dir): l_name = name.lower() os_info = platform.platform() if await self.file_search(l_name, words): - file_found = True - if('Windows' in os_info): - files_total_search.append(root+'\\'+name) + file_found = True # noqa F841 + if ('Windows' in os_info): + files_total_search.append(root + '\\' + name) else: - files_total_search.append(root+'/'+name) + files_total_search.append(root + '/' + name) return files_total_search config_dir = self.config['DISCORD']['search_dir'] if isinstance(config_dir, list): @@ -46,14 +46,15 @@ async def search_file(search_dir): return files_total async def searchFolder(self, foldername): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 foldername = foldername.lower() folders_total = [] if foldername == "": console.print("nothing entered") return - folders_found = False + folders_found = False # noqa F841 words = foldername.split() + async def search_dir(search_dir): console.print(f"Searching {search_dir}") folders_total_search = [] @@ -65,28 +66,29 @@ async def search_dir(search_dir): os_info = platform.platform() if await self.file_search(l_name, words): - folder_found = True - if('Windows' in os_info): - folders_total_search.append(root+'\\'+name) + folder_found = True # noqa F841 + if ('Windows' in os_info): + folders_total_search.append(root + '\\' + name) else: - folders_total_search.append(root+'/'+name) - + folders_total_search.append(root + '/' + name) + return folders_total_search config_dir = self.config['DISCORD']['search_dir'] if isinstance(config_dir, list): for each in config_dir: folders = await search_dir(each) - + folders_total = folders_total + folders else: folders_total = await search_dir(config_dir) return folders_total return folders_total + async def file_search(self, name, name_words): check = True for word in name_words: if word not in name: check = False break - return check \ No newline at end of file + return check diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 270fd25b3..76fd3b9f4 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -2,12 +2,12 @@ # import discord import asyncio import requests -import distutils.util import os import platform +from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console - +import bencodepy class ACM(): @@ -19,30 +19,24 @@ class ACM(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ACM' self.source_flag = 'AsianCinema' - self.upload_url = 'https://asiancinema.me/api/torrents/upload' - self.search_url = 'https://asiancinema.me/api/torrents/filter' + self.upload_url = 'https://eiga.moi/api/torrents/upload' + self.search_url = 'https://eiga.moi/api/torrents/filter' self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id - async def get_type (self, meta): + async def get_type(self, meta): if meta['is_disc'] == "BDMV": bdinfo = meta['bdinfo'] bd_sizes = [25, 50, 66, 100] @@ -60,7 +54,7 @@ async def get_type (self, meta): if "DVD5" in meta['dvd_size']: type_string = "DVD 5" elif "DVD9" in meta['dvd_size']: - type_string = "DVD 9" + type_string = "DVD 9" else: if meta['type'] == "REMUX": if meta['source'] == "BluRay": @@ -73,91 +67,91 @@ async def get_type (self, meta): # acceptable_res = ["2160p", "1080p", "1080i", "720p", "576p", "576i", "540p", "480p", "Other"] # if meta['resolution'] in acceptable_res: # type_id = meta['resolution'] - # else: + # else: # type_id = "Other" return type_string async def get_type_id(self, type): type_id = { - 'UHD 100': '1', + 'UHD 100': '1', 'UHD 66': '2', 'UHD 50': '3', 'UHD REMUX': '12', 'BD 50': '4', - 'BD 25': '5', + 'BD 25': '5', 'DVD 5': '14', 'REMUX': '7', 'WEBDL': '9', 'SDTV': '13', 'DVD 9': '16', 'HDTV': '17' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '2160p': '1', + '2160p': '1', '1080p': '2', - '1080i':'2', - '720p': '3', - '576p': '4', + '1080i': '2', + '720p': '3', + '576p': '4', '576i': '4', - '480p': '5', + '480p': '5', '480i': '5' - }.get(resolution, '10') - return resolution_id + }.get(resolution, '10') + return resolution_id - #ACM rejects uploads with more that 4 keywords + # ACM rejects uploads with more that 4 keywords async def get_keywords(self, keywords): - if keywords !='': - keywords_list = keywords.split(',') + if keywords != '': + keywords_list = keywords.split(',') keywords_list = [keyword for keyword in keywords_list if " " not in keyword][:4] - keywords = ', '.join( keywords_list) + keywords = ', '.join(keywords_list) return keywords def get_subtitles(self, meta): sub_lang_map = { - ("Arabic", "ara", "ar") : 'Ara', - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 'Por-BR', - ("Bulgarian", "bul", "bg") : 'Bul', - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 'Chi', - ("Croatian", "hrv", "hr", "scr") : 'Cro', - ("Czech", "cze", "cz", "cs") : 'Cze', - ("Danish", "dan", "da") : 'Dan', - ("Dutch", "dut", "nl") : 'Dut', - ("English", "eng", "en", "English (CC)", "English - SDH") : 'Eng', - ("English - Forced", "English (Forced)", "en (Forced)") : 'Eng', - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 'Eng', - ("Estonian", "est", "et") : 'Est', - ("Finnish", "fin", "fi") : 'Fin', - ("French", "fre", "fr") : 'Fre', - ("German", "ger", "de") : 'Ger', - ("Greek", "gre", "el") : 'Gre', - ("Hebrew", "heb", "he") : 'Heb', - ("Hindi" "hin", "hi") : 'Hin', - ("Hungarian", "hun", "hu") : 'Hun', - ("Icelandic", "ice", "is") : 'Ice', - ("Indonesian", "ind", "id") : 'Ind', - ("Italian", "ita", "it") : 'Ita', - ("Japanese", "jpn", "ja") : 'Jpn', - ("Korean", "kor", "ko") : 'Kor', - ("Latvian", "lav", "lv") : 'Lav', - ("Lithuanian", "lit", "lt") : 'Lit', - ("Norwegian", "nor", "no") : 'Nor', - ("Persian", "fa", "far") : 'Per', - ("Polish", "pol", "pl") : 'Pol', - ("Portuguese", "por", "pt") : 'Por', - ("Romanian", "rum", "ro") : 'Rom', - ("Russian", "rus", "ru") : 'Rus', - ("Serbian", "srp", "sr", "scc") : 'Ser', - ("Slovak", "slo", "sk") : 'Slo', - ("Slovenian", "slv", "sl") : 'Slv', - ("Spanish", "spa", "es") : 'Spa', - ("Swedish", "swe", "sv") : 'Swe', - ("Thai", "tha", "th") : 'Tha', - ("Turkish", "tur", "tr") : 'Tur', - ("Ukrainian", "ukr", "uk") : 'Ukr', - ("Vietnamese", "vie", "vi") : 'Vie', + ("Arabic", "ara", "ar"): 'Ara', + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 'Por-BR', + ("Bulgarian", "bul", "bg"): 'Bul', + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 'Chi', + ("Croatian", "hrv", "hr", "scr"): 'Cro', + ("Czech", "cze", "cz", "cs"): 'Cze', + ("Danish", "dan", "da"): 'Dan', + ("Dutch", "dut", "nl"): 'Dut', + ("English", "eng", "en", "English (CC)", "English - SDH"): 'Eng', + ("English - Forced", "English (Forced)", "en (Forced)"): 'Eng', + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 'Eng', + ("Estonian", "est", "et"): 'Est', + ("Finnish", "fin", "fi"): 'Fin', + ("French", "fre", "fr"): 'Fre', + ("German", "ger", "de"): 'Ger', + ("Greek", "gre", "el"): 'Gre', + ("Hebrew", "heb", "he"): 'Heb', + ("Hindi" "hin", "hi"): 'Hin', + ("Hungarian", "hun", "hu"): 'Hun', + ("Icelandic", "ice", "is"): 'Ice', + ("Indonesian", "ind", "id"): 'Ind', + ("Italian", "ita", "it"): 'Ita', + ("Japanese", "jpn", "ja"): 'Jpn', + ("Korean", "kor", "ko"): 'Kor', + ("Latvian", "lav", "lv"): 'Lav', + ("Lithuanian", "lit", "lt"): 'Lit', + ("Norwegian", "nor", "no"): 'Nor', + ("Persian", "fa", "far"): 'Per', + ("Polish", "pol", "pl"): 'Pol', + ("Portuguese", "por", "pt"): 'Por', + ("Romanian", "rum", "ro"): 'Rom', + ("Russian", "rus", "ru"): 'Rus', + ("Serbian", "srp", "sr", "scc"): 'Ser', + ("Slovak", "slo", "sk"): 'Slo', + ("Slovenian", "slv", "sl"): 'Slv', + ("Spanish", "spa", "es"): 'Spa', + ("Swedish", "swe", "sv"): 'Swe', + ("Thai", "tha", "th"): 'Tha', + ("Turkish", "tur", "tr"): 'Tur', + ("Ukrainian", "ukr", "uk"): 'Ukr', + ("Vietnamese", "vie", "vi"): 'Vie', } sub_langs = [] @@ -179,12 +173,12 @@ def get_subtitles(self, meta): for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) - - # if sub_langs == []: + + # if sub_langs == []: # sub_langs = [44] # No Subtitle return sub_langs - def get_subs_tag(self, subs): + def get_subs_tag(self, subs): if subs == []: return ' [No subs]' elif 'Eng' in subs: @@ -193,11 +187,7 @@ def get_subs_tag(self, subs): return ' [No Eng subs]' return f" [{subs[0]} subs only]" - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -207,48 +197,48 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: # bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() mi_dump = None bd_dump = "" for each in meta['discs']: bd_dump = bd_dump + each['summary'].strip() + "\n\n" - else: + else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : acm_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : await self.get_keywords(meta['keywords']), - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': acm_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': await self.get_keywords(meta['keywords']), + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 if region_id != 0: @@ -259,36 +249,32 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on ACM...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(await self.get_type(meta)), + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(await self.get_type(meta)), # A majority of the ACM library doesn't contain resolution information # 'resolutions[]' : await self.get_res_id(meta['resolution']), # 'name' : "" @@ -302,7 +288,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -319,7 +305,7 @@ async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') source = meta.get('source') is_disc = meta.get('is_disc') @@ -328,7 +314,7 @@ async def edit_name(self, meta): if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' - name = name.replace (aka, f' / {original_title} {chr(int("202A", 16))}') + name = name.replace(aka, f' / {original_title} {chr(int("202A", 16))}') elif aka == '': if meta.get('title') != original_title: # name = f'{name[:name.find(year)]}/ {original_title} {chr(int("202A", 16))}{name[name.find(year):]}' @@ -336,23 +322,21 @@ async def edit_name(self, meta): if 'AAC' in audio: name = name.replace(audio.strip().replace(" ", " "), audio.replace("AAC ", "AAC")) name = name.replace("DD+ ", "DD+") - name = name.replace ("UHD BluRay REMUX", "Remux") - name = name.replace ("BluRay REMUX", "Remux") - name = name.replace ("H.265", "HEVC") + name = name.replace("UHD BluRay REMUX", "Remux") + name = name.replace("BluRay REMUX", "Remux") + name = name.replace("H.265", "HEVC") if is_disc == 'DVD': - name = name.replace (f'{source} DVD5', f'{resolution} DVD {source}') - name = name.replace (f'{source} DVD9', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD5', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD9', f'{resolution} DVD {source}') if audio == meta.get('channels'): - name = name.replace (f'{audio}', f'MPEG {audio}') + name = name.replace(f'{audio}', f'MPEG {audio}') name = name + self.get_subs_tag(subs) return name - - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '': @@ -380,14 +364,53 @@ async def edit_desc(self, meta): desc = desc.replace('[img]', '[img=300]') descfile.write(desc) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() return + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 89777724a..2ab28bbee 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -2,15 +2,17 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher -import distutils.util -import json -import os +from str2bool import str2bool import platform +import re +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console + class AITHER(): """ Edit for Tracker: @@ -25,165 +27,220 @@ def __init__(self, config): self.source_flag = 'Aither' self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://aither.cc/forums/topics/1349]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['4K4U', 'AROMA', 'EMBER', 'FGT', 'Hi10', 'ION10', 'Judas', 'LAMA', 'MeGusta', 'QxR', 'RARBG', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'Sicario', 'TAoE', 'TGx', 'TSP', 'TSPxL', 'Tigole', 'Weasley[HONE]', 'Will1869', 'YIFY', 'd3g', 'nikt0', 'x0r'] + self.torrent_url = 'https://aither.cc/api/torrents/' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [ + '4K4U', 'afm72', 'AROMA', 'Bandi', 'BiTOR', 'Bluespots', 'Chivaman', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Garshasp', 'Ghost', 'Grym', 'Hi10', 'HiQVE', 'ImE', 'ION10', + 'iVy', 'Judas', 'LAMA', 'Langbard', 'LION', 'MeGusta', 'MONOLITH', 'Natty', 'nikt0', 'noxxus', 'OEPlus', 'OFT', 'OsC', 'Panda', 'PYC', 'QxR', 'r00t', 'Ralphy', 'RARBG', 'RCVR', 'RetroPeeps', + 'RZeroX', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SM737', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 't3nzin', 'TAoE', 'Telly', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Vyndros', 'Weasley[HONE]', + 'Will1869', 'x0r', 'YIFY'] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + 'mod_queue_opt_in': modq, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 + return 1 if meta.get(flag_name, False) else 0 async def edit_name(self, meta): aither_name = meta['name'] - has_eng_audio = False - if meta['is_disc'] != "BDMV": - with open(f"{meta.get('base_dir')}/tmp/{meta.get('uuid')}/MediaInfo.json", 'r', encoding='utf-8') as f: - mi = json.load(f) - - for track in mi['media']['track']: - if track['@type'] == "Audio": - if track.get('Language', 'None').startswith('en'): - has_eng_audio = True - if not has_eng_audio: - audio_lang = mi['media']['track'][2].get('Language_String', "").upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) - else: - for audio in meta['bdinfo']['audio']: - if audio['language'] == 'English': - has_eng_audio = True - if not has_eng_audio: - audio_lang = meta['bdinfo']['audio'][0]['language'].upper() - if audio_lang != "": - aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + resolution = meta.get('resolution') + video_codec = meta.get('video_codec') + video_encode = meta.get('video_encode') + name_type = meta.get('type', "") + source = meta.get('source', "") + + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + aither_name = aither_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + aither_name = aither_name.replace((meta['audio']), f"{meta['audio']} {video_encode}", 1) + else: + aither_name = aither_name.replace(f"{meta['source']}", f"{resolution}", 1) + aither_name = aither_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) + + if not meta['is_disc']: + + def has_english_audio(tracks=None, media_info_text=None): + if media_info_text: + audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + for i, language in enumerate(audio_section): + language = language.lower().strip() + if language.lower().startswith('en'): # Check if it's English + return True + return False + + def get_audio_lang(tracks=None, is_bdmv=False, media_info_text=None): + if media_info_text: + match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if match: + return match.group(1).upper() + return "" + + try: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) + if audio_lang: + if (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): + aither_name = aither_name.replace(str(meta['year']), f"{meta['year']} {audio_lang}", 1) + else: + aither_name = aither_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") + + if meta['is_disc'] == "DVD" or (name_type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD")): + aither_name = aither_name.replace((meta['source']), f"{resolution} {meta['source']}", 1) + aither_name = aither_name.replace((meta['audio']), f"{video_codec} {meta['audio']}", 1) + if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': aither_name = aither_name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) + return aither_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + 'ENCODE': '3', + 'DVDRIP': '3' + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on Aither...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" - + try: response = requests.get(url=self.search_url, params=params) response = response.json() @@ -192,8 +249,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/AL.py b/src/trackers/AL.py new file mode 100644 index 000000000..eea9eeded --- /dev/null +++ b/src/trackers/AL.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import platform +from str2bool import str2bool +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class AL(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'AL' + self.source_flag = 'AnimeLovers' + self.upload_url = 'https://animelovers.club/api/torrents/upload' + self.search_url = 'https://animelovers.club/api/torrents/filter' + self.signature = None + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '1') + return category_id + + async def get_type_id(self, type): + type_id = { + 'BDMV': '1', + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'DVDISO': '7', + 'DVDRIP': '8', + 'RAW': '9', + 'BDRIP': '10', + 'COLOR': '11', + 'MONO': '12' + }.get(type, '1') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on AL...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + # Got this from CBR and changed the encoding rename + async def edit_name(self, meta): + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index 0bd5c40b8..f88ae3796 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -3,10 +3,10 @@ import os import asyncio import requests -import distutils.util import platform +from str2bool import str2bool from pymediainfo import MediaInfo - +from pathlib import Path from src.trackers.COMMON import COMMON from src.console import console @@ -20,21 +20,21 @@ class ANT(): Upload """ - ############################################################### - # ####### EDIT ME ##### # - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ANT' self.source_flag = 'ANT' self.search_url = 'https://anthelion.me/api.php' self.upload_url = 'https://anthelion.me/api.php' - self.banned_groups = ['Ozlem', 'RARBG', 'FGT', 'STUTTERSHIT', 'LiGaS', 'DDR', 'Zeus', 'TBS', 'aXXo', 'CrEwSaDe', 'DNL', 'EVO', - 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'iPlanet', 'KiNGDOM', 'NhaNc3', 'PRoDJi', 'SANTi', 'ViSiON', 'WAF', 'YIFY', - 'YTS', 'MkvCage', 'mSD'] + self.banned_groups = [ + '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', + 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', + 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', + 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', + 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', + 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', + 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT' + ] self.signature = None pass @@ -60,15 +60,25 @@ async def get_flags(self, meta): flags.append('Remux') return flags - ############################################################### - # #### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ### # - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent_file_size_kib = os.path.getsize(torrent_path) / 1024 + + # Trigger regeneration automatically if size constraints aren't met + if torrent_file_size_kib > 250: # 250 KiB + console.print("[yellow]Existing .torrent exceeds 250 KiB and will be regenerated to fit constraints.") + + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + meta['max_piece_size'] = '256' # 256 MiB + prep.create_torrent(meta, Path(meta['path']), "ANT") + torrent_filename = "ANT" + + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) flags = await self.get_flags(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 @@ -77,7 +87,12 @@ async def upload(self, meta): bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() bd_dump = f'[spoiler=BDInfo][pre]{bd_dump}[/pre][/spoiler]' path = os.path.join(meta['bdinfo']['path'], 'STREAM') - m2ts = os.path.join(path, meta['bdinfo']['files'][0]['file']) + longest_file = max( + meta['bdinfo']['files'], + key=lambda x: x.get('length', 0) + ) + file_name = longest_file['file'].lower() + m2ts = os.path.join(path, file_name) media_info_output = str(MediaInfo.parse(m2ts, output="text", full=False)) mi_dump = media_info_output.replace('\r\n', '\n') else: @@ -98,33 +113,41 @@ async def upload(self, meta): 'media': 'Blu-ray', 'releasegroup': str(meta['tag'])[1:], 'release_desc': bd_dump, - 'flagchangereason': "BDMV Uploaded with L4G's Upload Assistant"}) + 'flagchangereason': "BDMV Uploaded with Upload Assistant"}) if meta['scene']: # ID of "Scene?" checkbox on upload form is actually "censored" data['censored'] = 1 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } - if meta['debug'] is False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) - if response.status_code in [200, 201]: - response = response.json() - try: - console.print(response) - except Exception: - console.print("It may have uploaded, go check") - return - else: - console.print("[cyan]Request Data:") - console.print(data) - open_torrent.close() + + try: + if not meta['debug']: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) + if response.status_code in [200, 201]: + response_data = response.json() + else: + response_data = { + "error": f"Unexpected status code: {response.status_code}", + "response_content": response.text # or use response.json() if JSON is expected + } + console.print(response_data) + else: + console.print("[cyan]Request Data:") + console.print(data) + finally: + open_torrent.close() async def edit_desc(self, meta): return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + meta['skipping'] = "ANT" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on ANT...") params = { 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 't': 'search', diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index d6ce9bca1..d3a2361a2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -3,14 +3,19 @@ import asyncio import requests from difflib import SequenceMatcher -import distutils.util -import urllib +from str2bool import str2bool import os import platform +import hashlib +import bencodepy +import glob +import multiprocessing +from urllib.parse import urlparse from src.trackers.COMMON import COMMON from src.console import console + class BHD(): """ Edit for Tracker: @@ -24,11 +29,72 @@ def __init__(self, config): self.tracker = 'BHD' self.source_flag = 'BHD' self.upload_url = 'https://beyond-hd.me/api/upload/' - self.signature = f"\n[center][url=https://beyond-hd.me/forums/topic/toolpython-l4gs-upload-assistant.5456]Created by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD'] + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = ['Sicario', 'TOMMY', 'x0r', 'nikt0', 'FGT', 'd3g', 'MeGusta', 'YIFY', 'tigole', 'TEKNO3D', 'C4K', 'RARBG', '4K4U', 'EASports', 'ReaLHD', 'Telly', 'AOC', 'WKS', 'SasukeducK'] pass - - async def upload(self, meta): + + def match_host(self, hostname, approved_hosts): + for approved_host in approved_hosts: + if hostname == approved_host or hostname.endswith(f".{approved_host}"): + return approved_host + return hostname + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await self.upload_with_retry(meta, common) + + async def upload_with_retry(self, meta, common, img_host_index=1): + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", + "beyondhd.co": "bhd", + "imagebam.com": "bam", + } + + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost', 'bhd', 'bam'] + + for image in meta['image_list']: + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + if meta['debug']: + if mapped_host in approved_image_hosts: + console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") + else: + console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta['image_list'] + ): + console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") + image_list = meta['image_list'] + else: + images_reuploaded = False + while img_host_index <= len(approved_image_hosts): + image_list, retry_mode, images_reuploaded = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + + if retry_mode: + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + img_host_index += 1 + continue + + new_images_key = 'bhd_images_key' + if image_list is not None: + image_list = meta[new_images_key] + break + + if image_list is None: + console.print("[red]All image hosts failed. Please check your configuration.") + return + common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -39,37 +105,37 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - - if meta['bdinfo'] != None: + + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') - - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { - 'mediainfo' : mi_dump, - } + 'mediainfo': mi_dump, + } if os.path.exists(torrent_file): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files['file'] = open_torrent.read() open_torrent.close() - + data = { - 'name' : bhd_name, - 'category_id' : cat_id, - 'type' : type_id, + 'name': bhd_name, + 'category_id': cat_id, + 'type': type_id, 'source': source_id, - 'imdb_id' : meta['imdb_id'].replace('tt', ''), - 'tmdb_id' : meta['tmdb'], - 'description' : desc, - 'anon' : anon, - 'sd' : meta.get('sd', 0), - 'live' : draft + 'imdb_id': meta['imdb_id'].replace('tt', ''), + 'tmdb_id': meta['tmdb'], + 'description': desc, + 'anon': anon, + 'sd': meta.get('sd', 0), + 'live': draft # 'internal' : 0, # 'featured' : 0, # 'free' : 0, @@ -77,28 +143,28 @@ async def upload(self, meta): # 'sticky' : 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('tv_pack', 0) == 1: data['pack'] = 1 if meta.get('season', None) == "S00": data['special'] = 1 if meta.get('region', "") != "": data['region'] = meta['region'] - if custom == True: + if custom is True: data['custom_edition'] = edition elif edition != "": data['edition'] = edition if len(tags) > 0: data['tags'] = ','.join(tags) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } - + url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: response = response.json() @@ -109,42 +175,167 @@ async def upload(self, meta): data['imdb_id'] = 1 response = requests.post(url=url, files=files, data=data, headers=headers) response = response.json() - elif response['satus_message'].startswith('Invalid name value'): + elif response['status_message'].startswith('Invalid name value'): console.print(f"[bold yellow]Submitted Name: {bhd_name}") console.print(response) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - - + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): + if approved_image_hosts is None: + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb', 'pixhost'] + + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "pixhost.to": "pixhost", + "imgbox.com": "imgbox", + } + + retry_mode = False + images_reuploaded = False + new_images_key = 'bhd_images_key' + discs = meta.get('discs', []) # noqa F841 + filelist = meta.get('video', []) + filename = meta['filename'] + path = meta['path'] + + if isinstance(filelist, str): + filelist = [filelist] + + multi_screens = int(self.config['DEFAULT'].get('screens', 6)) + base_dir = meta['base_dir'] + folder_id = meta['uuid'] + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + meta[new_images_key] = [] + + screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) + all_screenshots = [] + + for i, file in enumerate(filelist): + filename_pattern = f"{filename}*.png" + + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + if len(existing_screens) < multi_screens: + if meta.get('debug'): + console.print("[yellow]The image host of existing images is not supported.") + console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") + if meta['is_disc'] == "BDMV": + s = multiprocessing.Process( + target=prep.disc_screenshots, + args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, + meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) + ) + elif meta['is_disc'] == "DVD": + s = multiprocessing.Process( + target=prep.dvd_screenshots, + args=(meta, 0, None, True) + ) + else: + s = multiprocessing.Process( + target=prep.screenshots, + args=(path, f"{filename}", meta['uuid'], base_dir, + meta, multi_screens + 1, True, None) + ) + + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + all_screenshots.extend(existing_screens) + + if not all_screenshots: + console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") + return [], True, images_reuploaded + + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + return + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at BHD, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break + + uploaded_images, _ = prep.upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) + + if uploaded_images: + meta[new_images_key] = uploaded_images + + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '1') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '1') return category_id async def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HDDVD" : "HD-DVD", - "HD DVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - "UHDTV" : "HDTV", - "NTSC" : "DVD", "NTSC DVD" : "DVD", - "PAL" : "DVD", "PAL DVD": "DVD", + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HDDVD": "HD-DVD", + "HD DVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + "UHDTV": "HDTV", + "NTSC": "DVD", "NTSC DVD": "DVD", + "PAL": "DVD", "PAL DVD": "DVD", } - + source_id = sources.get(source) return source_id @@ -166,7 +357,7 @@ async def get_type(self, meta): if "DVD5" in meta['dvd_size']: type_id = "DVD 5" elif "DVD9" in meta['dvd_size']: - type_id = "DVD 9" + type_id = "DVD 9" else: if meta['type'] == "REMUX": if meta['source'] == "BluRay": @@ -185,11 +376,9 @@ async def get_type(self, meta): type_id = "Other" return type_id - - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta.get('discs', []) != []: discs = meta['discs'] if discs[0]['type'] == "DVD": @@ -209,30 +398,50 @@ async def edit_desc(self, meta): desc.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") desc.write("\n") desc.write(base.replace("[img]", "[img width=300]")) - images = meta['image_list'] - if len(images) > 0: - desc.write("[center]") + if 'bhd_images_key' in meta: + images = meta['bhd_images_key'] + else: + images = meta['image_list'] + if len(images) > 0: + desc.write("[align=center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] - desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") - desc.write("[/center]") + if (each == len(images) - 1): + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]") + elif (each + 1) % 2 == 0: + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url]\n") + desc.write("\n") + else: + desc.write(f"[url={web_url}][img width=350]{img_url}[/img][/url] ") + desc.write("[/align]") desc.write(self.signature) desc.close() return - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + bhd_name = await self.edit_name(meta) + if any(phrase in bhd_name.lower() for phrase in ("-framestor", "-bhdstudio", "-bmf", "-decibel", "-d-zone", "-hifi", "-ncmt", "-tdd", "-flux", "-crfw", "-sonny", "-zr-", "-mkvultra", "-rpg", "-w4nk3r", "-irobot", "-beyondhd")): + console.print("[bold red]This is an internal BHD release, skipping upload[/bold red]") + meta['skipping'] = "BHD" + return + if meta['type'] == "DVDRIP": + console.print("[bold red]No DVDRIP at BHD, skipping upload[/bold red]") + meta['skipping'] = "BHD" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on BHD...") category = meta['category'] if category == 'MOVIE': + tmdbID = "movie" category = "Movies" + if category == "TV": + tmdbID = "tv" data = { - 'tmdb_id' : meta['tmdb'], - 'categories' : category, - 'types' : await self.get_type(meta), + 'action': 'search', + 'tmdb_id': f"{tmdbID}/{meta['tmdb']}", + 'categories': category, + 'types': await self.get_type(meta), } # Search all releases if SD if meta['sd'] == 1: @@ -242,7 +451,7 @@ async def search_existing(self, meta): if meta.get('tv_pack', 0) == 1: data['pack'] = 1 data['search'] = f"{meta.get('season', '')}{meta.get('episode', '')}" - url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}?action=search" + url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" try: response = requests.post(url=url, data=data) response = response.json() @@ -254,16 +463,16 @@ async def search_existing(self, meta): dupes.append(result) else: console.print(f"[yellow]{response.get('status_message')}") - await asyncio.sleep(5) - except: + await asyncio.sleep(5) + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Most likely the site is down.') await asyncio.sleep(5) return dupes - async def get_live(self, meta): + async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(distutils.util.strtobool(str(draft))) #0 for send to draft, 1 for live + draft = bool(str2bool(str(draft))) # 0 for send to draft, 1 for live if draft: draft_int = 0 else: @@ -284,7 +493,7 @@ async def get_edition(self, meta, tags): elif edition == "": edition = "" else: - custom = True + custom = True return custom, edition async def get_tags(self, meta): @@ -301,13 +510,13 @@ async def get_tags(self, meta): tags.append('EnglishDub') if "Open Matte" in meta.get('edition', ""): tags.append("OpenMatte") - if meta.get('scene', False) == True: + if meta.get('scene', False) is True: tags.append("Scene") - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: tags.append('Personal') if "hybrid" in meta.get('edition', "").lower(): tags.append('Hybrid') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: tags.append('Commentary') if "DV" in meta.get('hdr', ''): tags.append('DV') @@ -331,4 +540,49 @@ async def edit_name(self, meta): # name = name.replace('H.264', 'x264') if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) - return name \ No newline at end of file + return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + torrent = bencodepy.decode(torrent_data) + info_dict = torrent[b'info'] + bencoded_info = bencodepy.encode(info_dict) + info_hash = hashlib.sha1(bencoded_info).hexdigest() + # console.print(f"Info Hash: {info_hash}") + + params = { + 'action': 'search', + 'info_hash': info_hash + } + url = f"https://beyond-hd.me/api/torrents/{self.config['TRACKERS']['BHD']['api_key'].strip()}" + try: + response = requests.post(url=url, json=params) + response_data = response.json() + # console.print(f"[yellow]Response Data: {response_data}") + + if response_data.get('total_results') == 1: + for each in response_data['results']: + details_link = f"https://beyond-hd.me/details/{each['id']}" + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index 97d0e1c8e..7dd05ed7d 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- # import discord -import asyncio -from torf import Torrent import requests from src.console import console -import distutils.util +from str2bool import str2bool from pprint import pprint import os import traceback @@ -12,8 +10,6 @@ from pymediainfo import MediaInfo -# from pprint import pprint - class BHDTV(): """ Edit for Tracker: @@ -27,14 +23,14 @@ def __init__(self, config): self.config = config self.tracker = 'BHDTV' self.source_flag = 'BIT-HDTV' - #search not implemented - #self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' + # search not implemented + # self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' self.upload_url = 'https://www.bit-hdtv.com/takeupload.php' - #self.forum_link = 'https://www.bit-hdtv.com/rules.php' + # self.forum_link = 'https://www.bit-hdtv.com/rules.php' self.banned_groups = [] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -48,24 +44,22 @@ async def upload(self, meta): # must be TV pack sub_cat_id = await self.get_type_tv_pack_id(meta['type']) - - resolution_id = await self.get_res_id(meta['resolution']) # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - distutils.util.strtobool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) is False: anon = 0 else: - anon = 1 + anon = 1 # noqa F841 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file': open_torrent} @@ -80,32 +74,31 @@ async def upload(self, meta): data = { 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'name': meta['name'].replace(' ', '.').replace(':.', '.').replace(':', '.').replace('DD+', 'DDP'), - 'mediainfo': mi_dump if bd_dump == None else bd_dump, + 'mediainfo': mi_dump if bd_dump is None else bd_dump, 'cat': cat_id, 'subcat': sub_cat_id, 'resolution': resolution_id, - #'anon': anon, + # 'anon': anon, # admins asked to remove short description. 'sdescr': " ", - 'descr': media_info if bd_dump == None else "Disc so Check Mediainfo dump ", + 'descr': media_info if bd_dump is None else "Disc so Check Mediainfo dump ", 'screen': desc, 'url': f"https://www.tvmaze.com/shows/{meta['tvmaze_id']}" if meta['category'] == 'TV' else f"https://www.imdb.com/title/tt{meta['imdb_id']}", 'format': 'json' } - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, data=data, files=files) try: # pprint(data) console.print(response.json()) - except: - console.print(f"[cyan]It may have uploaded, go check") + except Exception: + console.print("[cyan]It may have uploaded, go check") # cprint(f"Request Data:", 'cyan') pprint(data) console.print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") pprint(data) # # adding my anounce url to torrent. if 'view' in response.json()['data']: @@ -116,7 +109,6 @@ async def upload(self, meta): "Torrent Did not upload") open_torrent.close() - async def get_cat_id(self, meta): category_id = '0' if meta['category'] == 'MOVIE': @@ -128,17 +120,16 @@ async def get_cat_id(self, meta): category_id = '10' return category_id - async def get_type_movie_id(self, meta): type_id = '0' - test = meta['type'] + test = meta['type'] # noqa F841 if meta['type'] == 'DISC': if meta['3D']: type_id = '46' else: type_id = '2' elif meta['type'] == 'REMUX': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '48' elif meta['3D']: type_id = '45' @@ -147,58 +138,55 @@ async def get_type_movie_id(self, meta): elif meta['type'] == 'HDTV': type_id = '6' elif meta['type'] == 'ENCODE': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '43' elif meta['3D']: type_id = '44' else: type_id = '1' elif meta['type'] == 'WEBDL' or meta['type'] == 'WEBRIP': - type_id = '5' + type_id = '5' return type_id - async def get_type_tv_id(self, type): type_id = { 'HDTV': '7', 'WEBDL': '8', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '10', 'REMUX': '11', 'DISC': '12', }.get(type, '0') return type_id - async def get_type_tv_pack_id(self, type): type_id = { 'HDTV': '13', 'WEBDL': '14', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '16', 'REMUX': '17', 'DISC': '18', }.get(type, '0') return type_id - async def get_res_id(self, resolution): resolution_id = { '2160p': '4', '1080p': '3', - '1080i':'2', + '1080i': '2', '720p': '1' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: desc.write(base.replace("[img=250]", "[img=250x250]")) images = meta['image_list'] if len(images) > 0: @@ -210,7 +198,7 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): - console.print(f"[red]Dupes must be checked Manually") + async def search_existing(self, meta, disctype): + console.print("[red]Dupes must be checked Manually") return ['Dupes must be checked Manually'] - ### hopefully someone else has the time to implement this. + # hopefully someone else has the time to implement this. diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index 33e03975b..23908ab7e 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,13 +2,16 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console + class BLU(): """ Edit for Tracker: @@ -23,21 +26,20 @@ def __init__(self, config): self.source_flag = 'BLU' self.search_url = 'https://blutopia.cc/api/torrents/filter' self.torrent_url = 'https://blutopia.cc/api/torrents/' - self.upload_url = 'https://blutopia.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://blutopia.cc/forums/topics/3087]Created by L4G's Upload Assistant[/url][/center]" + self.upload_url = 'https://blutopia.cc/api/torrents/upload' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [ - '[Oj]', '3LTON', '4yEo', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', - 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', - 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'STUTTERSHIT', 'Telly', 'TM', 'TRiToN', 'UPiNSMOKE', - 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', + '[Oj]', '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', 'CrEwSaDe', 'd3g', 'DeadFish', 'DNL', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', + 'FRDS', 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', 'LEGi0N', 'LOAD', 'MeGusta', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'nikt0', 'NOIVTC', 'OFT', + 'nSD', 'PiRaTeS', 'playBD', 'PlaySD', 'playXD', 'PRODJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'Telly', 'TM', + 'TRiToN', 'UPiNSMOKE', 'URANiME', 'WAF', 'x0r', 'xRed', 'XS', 'YIFY', 'ZKBL', 'ZmN', 'ZMNT', 'AOC', ['EVO', 'Raw Content Only'], ['TERMiNAL', 'Raw Content Only'], ['ViSION', 'Note the capitalization and characters used'], ['CMRG', 'Raw Content Only'] ] - + pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) - blu_name = meta['name'] desc_header = "" if meta.get('webdv', False): @@ -47,51 +49,66 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]DESCRIPTION.txt", 'r', encoding='utf-8').read() + + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { - 'name' : blu_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': blu_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + 'mod_queue_opt_in': modq, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -100,64 +117,67 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - - return + + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 - - + return 1 if meta.get(flag_name, False) else 0 async def get_cat_id(self, category_name, edition): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '12' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '11', - '2160p': '1', - '1440p' : '2', + '8640p': '10', + '4320p': '11', + '2160p': '1', + '1440p': '2', '1080p': '2', - '1080i':'3', - '720p': '5', - '576p': '6', + '1080i': '3', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def derived_dv_layer(self, meta): @@ -173,12 +193,12 @@ async def derived_dv_layer(self, meta): if cli_ui.ask_yes_no("Is the DV Layer sourced from the same service as the video?"): ask_comp = False desc_header = "[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons not required as DV and HDR are from same provider.[/code]" - + if ask_comp: while desc_header == "": desc_input = cli_ui.ask_string("Please provide comparisons between HDR masters. (link or bbcode)", default="") desc_header = f"[code]This release contains a derived Dolby Vision profile 8 layer. Comparisons between HDR masters: {desc_input}[/code]" - + if "hybrid" not in name.lower(): if "REPACK" in name: name = name.replace('REPACK', 'Hybrid REPACK') @@ -186,17 +206,16 @@ async def derived_dv_layer(self, meta): name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") return name, desc_header - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on BLU...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -210,8 +229,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py new file mode 100644 index 000000000..f6813c057 --- /dev/null +++ b/src/trackers/CBR.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class CBR(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + def __init__(self, config): + self.config = config + self.tracker = 'CBR' + self.source_flag = 'CapybaraBR' + self.search_url = 'https://capybarabr.com/api/torrents/filter' + self.torrent_url = 'https://capybarabr.com/api/torrents/' + self.upload_url = 'https://capybarabr.com/api/torrents/upload' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + cat_id = await self.get_cat_id(meta['category'], meta.get('edition', ''), meta) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def get_cat_id(self, category_name, edition, meta): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'ANIMES': '4' + }.get(category_name, '0') + if meta['anime'] is True and category_id == '2': + category_id = '4' + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9', + 'Other': '10', + }.get(resolution, '10') + return resolution_id + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Buscando por duplicatas no tracker...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]NĆ£o foi possivel buscar no tracker torrents duplicados. O tracker estĆ” offline ou sua api estĆ” incorreta') + await asyncio.sleep(5) + + return dupes + + async def edit_name(self, meta): + + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") + + return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index 87fc0ccfb..5d2ed2532 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -1,16 +1,23 @@ from torf import Torrent import os -import traceback import requests import re import json +import click +import sys +import glob +from pymediainfo import MediaInfo +import multiprocessing +import asyncio from src.bbcode import BBCODE from src.console import console + class COMMON(): def __init__(self, config): self.config = config + self.parser = self.MediaInfoParser() pass async def edit_torrent(self, meta, tracker, source_flag, torrent_filename="BASE"): @@ -31,153 +38,529 @@ async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, com new_torrent.metainfo['comment'] = comment new_torrent.metainfo['info']['source'] = source_flag Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) - - + async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() + multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + char_limit = int(self.config['DEFAULT'].get('charLimit', 14000)) + file_limit = int(self.config['DEFAULT'].get('fileLimit', 5)) + thumb_size = int(self.config['DEFAULT'].get('pack_thumb_size', '300')) + process_limit = int(self.config['DEFAULT'].get('processLimit', 10)) + try: + screenheader = self.config['DEFAULT']['screenshot_header'] + except Exception: + screenheader = None with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: - if desc_header != "": + if desc_header: descfile.write(desc_header) - + bbcode = BBCODE() - if meta.get('discs', []) != []: - discs = meta['discs'] - if discs[0]['type'] == "DVD": - descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") - descfile.write("\n") - if len(discs) >= 2: - for each in discs[1:]: - if each['type'] == "BDMV": - descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") - descfile.write("\n") - elif each['type'] == "DVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") - descfile.write("\n") - elif each['type'] == "HDDVD": - descfile.write(f"{each['name']}:\n") - descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n") - descfile.write("\n") + discs = meta.get('discs', []) + filelist = meta.get('filelist', []) desc = base + desc = re.sub(r'\[center\]\[spoiler=Scene NFO:\].*?\[/center\]', '', desc, flags=re.DOTALL) desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) - if comparison == False: + if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) - desc = desc.replace('[img]', '[img=300]') descfile.write(desc) - images = meta['image_list'] - if len(images) > 0: + # Handle single disc case + if len(discs) == 1: + each = discs[0] + if each['type'] == "DVD": + descfile.write("[center]") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]") + images = meta['image_list'] + if screenheader is not None: + descfile.write(screenheader + '\n') + descfile.write("[center]") + for img_index in range(len(images[:int(meta['screens'])])): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url]") + descfile.write("[/center]") + + # Handle multiple discs case + elif len(discs) > 1: + # Initialize retry_count if not already set + if 'retry_count' not in meta: + meta['retry_count'] = 0 + + for i, each in enumerate(discs): + # Set a unique key per disc for managing images + new_images_key = f'new_images_disc_{i}' + + if i == 0: + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"{each.get('name', 'BDINFO')}\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler]") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + # For the first disc, use images from `meta['image_list']` + if meta['debug']: + console.print("[yellow]Using original uploaded images for first disc") + images = meta['image_list'] + for img_index in range(len(images[:int(meta['screens'])])): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + descfile.write("[/center]\n\n") + else: + # Check if screenshots exist for the current disc key + if new_images_key in meta and meta[new_images_key]: + if meta['debug']: + console.print(f"[yellow]Found needed image URLs for {new_images_key}") + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") + # Use existing URLs from meta to write to descfile + descfile.write("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + descfile.write("[/center]\n\n") + else: + # Increment retry_count for tracking but use unique disc keys for each disc + meta['retry_count'] += 1 + meta[new_images_key] = [] + descfile.write("[center]") + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code]{each['vob_mi']}[/code][/spoiler] ") + descfile.write(f"[spoiler={os.path.basename(each['ifo'])}][code]{each['ifo_mi']}[/code][/spoiler]\n\n") + descfile.write("[/center]\n\n") + # Check if new screenshots already exist before running prep.screenshots + if each['type'] == "BDMV": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + elif each['type'] == "DVD": + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if not new_screens: + if meta['debug']: + console.print(f"[yellow]No new screens for {new_images_key}; creating new screenshots") + # Run prep.screenshots if no screenshots are present + if each['type'] == "BDMV": + use_vs = meta.get('vapoursynth', False) + s = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + elif each['type'] == "DVD": + s = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) + s.start() + while s.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + + # Append each uploaded image's data to `meta[new_images_key]` + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + + # Write new URLs to descfile + descfile.write("[center]") + for img in uploaded_images: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + descfile.write("[/center]\n\n") + + # Save the updated meta to `meta.json` after upload + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + # Handle single file case + if len(filelist) == 1: + if meta['debug']: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + if mi_dump: + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + for i, file in enumerate(filelist): + if i == 0: + filename = os.path.splitext(os.path.basename(file.strip()))[0] + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler]\n") + images = meta['image_list'] + if screenheader is not None: + descfile.write(screenheader + '\n') descfile.write("[center]") - for each in range(len(images[:int(meta['screens'])])): - web_url = images[each]['web_url'] - raw_url = images[each]['raw_url'] - descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + for img_index in range(len(images[:int(meta['screens'])])): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + descfile.write(f"[url={web_url}][img={self.config['DEFAULT'].get('thumbnail_size', '350')}]{raw_url}[/img][/url]") descfile.write("[/center]") - if signature != None: + # Handle multiple files case + # Initialize character counter + char_count = 0 + max_char_limit = char_limit # Character limit + other_files_spoiler_open = False # Track if "Other files" spoiler has been opened + + # First Pass: Create and Upload Images for Each File + for i, file in enumerate(filelist): + if i >= process_limit: + # console.print("[yellow]Skipping processing more files as they exceed the process limit.") + continue + if multi_screens != 0: + if i > 0: + new_images_key = f'new_images_file_{i}' + if new_images_key not in meta or not meta[new_images_key]: + # Proceed with image generation if not already present + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + # If no screenshots exist, create them + if not new_screens: + if meta['debug']: + console.print(f"[yellow]No existing screenshots for {new_images_key}; generating new ones.") + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + + # Upload generated screenshots + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + meta[new_images_key] = [] + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + + # Save updated meta + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + # Second Pass: Process MediaInfo and Write Descriptions + if len(filelist) > 1: + for i, file in enumerate(filelist): + if i >= process_limit: + continue + # Extract filename directly from the file path + filename = os.path.splitext(os.path.basename(file.strip()))[0] + + # If we are beyond the file limit, add all further files in a spoiler + if multi_screens != 0: + if i >= file_limit: + if not other_files_spoiler_open: + descfile.write("[center][spoiler=Other files]\n") + char_count += len("[center][spoiler=Other files]\n") + other_files_spoiler_open = True + + # Write filename in BBCode format with MediaInfo in spoiler if not the first file + if multi_screens != 0: + if i > 0 and char_count < max_char_limit: + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) + parsed_mediainfo = self.parser.parse_mediainfo(mi_dump) + formatted_bbcode = self.parser.format_bbcode(parsed_mediainfo) + descfile.write(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + char_count += len(f"[center][spoiler={filename}]{formatted_bbcode}[/spoiler][/center]\n") + else: + descfile.write(f"[center]{filename}\n[/center]\n") + char_count += len(f"[center]{filename}\n[/center]\n") + + # Write images if they exist + new_images_key = f'new_images_file_{i}' + if i == 0: # For the first file, use 'image_list' key + images = meta['image_list'] + if images: + descfile.write("[center]") + char_count += len("[center]") + for img_index in range(len(images)): + web_url = images[img_index]['web_url'] + raw_url = images[img_index]['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + elif multi_screens != 0: + if new_images_key in meta and meta[new_images_key]: + descfile.write("[center]") + char_count += len("[center]") + for img in meta[new_images_key]: + web_url = img['web_url'] + raw_url = img['raw_url'] + image_str = f"[url={web_url}][img={thumb_size}]{raw_url}[/img][/url]" + descfile.write(image_str) + char_count += len(image_str) + descfile.write("[/center]\n\n") + char_count += len("[/center]\n\n") + + if other_files_spoiler_open: + descfile.write("[/spoiler][/center]\n") + char_count += len("[/spoiler][/center]\n") + + console.print(f"[yellow]Total characters written to description: {char_count}") + + # Append signature if provided + if signature: descfile.write(signature) descfile.close() - return - + return - - async def unit3d_region_ids(self, region): region_id = { - 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, - 'ASA': 10, 'ATA': 11, 'ATF': 12, 'ATG': 13, 'AUS': 14, 'AUT': 15, 'AZE': 16, 'BAH': 17, - 'BAN': 18, 'BDI': 19, 'BEL': 20, 'BEN': 21, 'BER': 22, 'BES': 23, 'BFA': 24, 'BHR': 25, - 'BHU': 26, 'BIH': 27, 'BLM': 28, 'BLR': 29, 'BLZ': 30, 'BOL': 31, 'BOT': 32, 'BRA': 33, - 'BRB': 34, 'BRU': 35, 'BVT': 36, 'CAM': 37, 'CAN': 38, 'CAY': 39, 'CCK': 40, 'CEE': 41, - 'CGO': 42, 'CHA': 43, 'CHI': 44, 'CHN': 45, 'CIV': 46, 'CMR': 47, 'COD': 48, 'COK': 49, - 'COL': 50, 'COM': 51, 'CPV': 52, 'CRC': 53, 'CRO': 54, 'CTA': 55, 'CUB': 56, 'CUW': 57, - 'CXR': 58, 'CYP': 59, 'DJI': 60, 'DMA': 61, 'DOM': 62, 'ECU': 63, 'EGY': 64, 'ENG': 65, - 'EQG': 66, 'ERI': 67, 'ESH': 68, 'ESP': 69, 'ETH': 70, 'FIJ': 71, 'FLK': 72, 'FRA': 73, - 'FRO': 74, 'FSM': 75, 'GAB': 76, 'GAM': 77, 'GBR': 78, 'GEO': 79, 'GER': 80, 'GGY': 81, - 'GHA': 82, 'GIB': 83, 'GLP': 84, 'GNB': 85, 'GRE': 86, 'GRL': 87, 'GRN': 88, 'GUA': 89, - 'GUF': 90, 'GUI': 91, 'GUM': 92, 'GUY': 93, 'HAI': 94, 'HKG': 95, 'HMD': 96, 'HON': 97, - 'HUN': 98, 'IDN': 99, 'IMN': 100, 'IND': 101, 'IOT': 102, 'IRL': 103, 'IRN': 104, 'IRQ': 105, - 'ISL': 106, 'ISR': 107, 'ITA': 108, 'JAM': 109, 'JEY': 110, 'JOR': 111, 'JPN': 112, 'KAZ': 113, - 'KEN': 114, 'KGZ': 115, 'KIR': 116, 'KNA': 117, 'KOR': 118, 'KSA': 119, 'KUW': 120, 'KVX': 121, - 'LAO': 122, 'LBN': 123, 'LBR': 124, 'LBY': 125, 'LCA': 126, 'LES': 127, 'LIE': 128, 'LKA': 129, - 'LUX': 130, 'MAC': 131, 'MAD': 132, 'MAF': 133, 'MAR': 134, 'MAS': 135, 'MDA': 136, 'MDV': 137, - 'MEX': 138, 'MHL': 139, 'MKD': 140, 'MLI': 141, 'MLT': 142, 'MNG': 143, 'MNP': 144, 'MON': 145, - 'MOZ': 146, 'MRI': 147, 'MSR': 148, 'MTN': 149, 'MTQ': 150, 'MWI': 151, 'MYA': 152, 'MYT': 153, - 'NAM': 154, 'NCA': 155, 'NCL': 156, 'NEP': 157, 'NFK': 158, 'NIG': 159, 'NIR': 160, 'NIU': 161, - 'NLD': 162, 'NOR': 163, 'NRU': 164, 'NZL': 165, 'OMA': 166, 'PAK': 167, 'PAN': 168, 'PAR': 169, - 'PCN': 170, 'PER': 171, 'PHI': 172, 'PLE': 173, 'PLW': 174, 'PNG': 175, 'POL': 176, 'POR': 177, - 'PRK': 178, 'PUR': 179, 'QAT': 180, 'REU': 181, 'ROU': 182, 'RSA': 183, 'RUS': 184, 'RWA': 185, - 'SAM': 186, 'SCO': 187, 'SDN': 188, 'SEN': 189, 'SEY': 190, 'SGS': 191, 'SHN': 192, 'SIN': 193, - 'SJM': 194, 'SLE': 195, 'SLV': 196, 'SMR': 197, 'SOL': 198, 'SOM': 199, 'SPM': 200, 'SRB': 201, - 'SSD': 202, 'STP': 203, 'SUI': 204, 'SUR': 205, 'SWZ': 206, 'SXM': 207, 'SYR': 208, 'TAH': 209, - 'TAN': 210, 'TCA': 211, 'TGA': 212, 'THA': 213, 'TJK': 214, 'TKL': 215, 'TKM': 216, 'TLS': 217, - 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, - 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, - 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, - 'ZIM': 242, 'EUR' : 243 + 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, + 'ASA': 10, 'ATA': 11, 'ATF': 12, 'ATG': 13, 'AUS': 14, 'AUT': 15, 'AZE': 16, 'BAH': 17, + 'BAN': 18, 'BDI': 19, 'BEL': 20, 'BEN': 21, 'BER': 22, 'BES': 23, 'BFA': 24, 'BHR': 25, + 'BHU': 26, 'BIH': 27, 'BLM': 28, 'BLR': 29, 'BLZ': 30, 'BOL': 31, 'BOT': 32, 'BRA': 33, + 'BRB': 34, 'BRU': 35, 'BVT': 36, 'CAM': 37, 'CAN': 38, 'CAY': 39, 'CCK': 40, 'CEE': 41, + 'CGO': 42, 'CHA': 43, 'CHI': 44, 'CHN': 45, 'CIV': 46, 'CMR': 47, 'COD': 48, 'COK': 49, + 'COL': 50, 'COM': 51, 'CPV': 52, 'CRC': 53, 'CRO': 54, 'CTA': 55, 'CUB': 56, 'CUW': 57, + 'CXR': 58, 'CYP': 59, 'DJI': 60, 'DMA': 61, 'DOM': 62, 'ECU': 63, 'EGY': 64, 'ENG': 65, + 'EQG': 66, 'ERI': 67, 'ESH': 68, 'ESP': 69, 'ETH': 70, 'FIJ': 71, 'FLK': 72, 'FRA': 73, + 'FRO': 74, 'FSM': 75, 'GAB': 76, 'GAM': 77, 'GBR': 78, 'GEO': 79, 'GER': 80, 'GGY': 81, + 'GHA': 82, 'GIB': 83, 'GLP': 84, 'GNB': 85, 'GRE': 86, 'GRL': 87, 'GRN': 88, 'GUA': 89, + 'GUF': 90, 'GUI': 91, 'GUM': 92, 'GUY': 93, 'HAI': 94, 'HKG': 95, 'HMD': 96, 'HON': 97, + 'HUN': 98, 'IDN': 99, 'IMN': 100, 'IND': 101, 'IOT': 102, 'IRL': 103, 'IRN': 104, 'IRQ': 105, + 'ISL': 106, 'ISR': 107, 'ITA': 108, 'JAM': 109, 'JEY': 110, 'JOR': 111, 'JPN': 112, 'KAZ': 113, + 'KEN': 114, 'KGZ': 115, 'KIR': 116, 'KNA': 117, 'KOR': 118, 'KSA': 119, 'KUW': 120, 'KVX': 121, + 'LAO': 122, 'LBN': 123, 'LBR': 124, 'LBY': 125, 'LCA': 126, 'LES': 127, 'LIE': 128, 'LKA': 129, + 'LUX': 130, 'MAC': 131, 'MAD': 132, 'MAF': 133, 'MAR': 134, 'MAS': 135, 'MDA': 136, 'MDV': 137, + 'MEX': 138, 'MHL': 139, 'MKD': 140, 'MLI': 141, 'MLT': 142, 'MNG': 143, 'MNP': 144, 'MON': 145, + 'MOZ': 146, 'MRI': 147, 'MSR': 148, 'MTN': 149, 'MTQ': 150, 'MWI': 151, 'MYA': 152, 'MYT': 153, + 'NAM': 154, 'NCA': 155, 'NCL': 156, 'NEP': 157, 'NFK': 158, 'NIG': 159, 'NIR': 160, 'NIU': 161, + 'NLD': 162, 'NOR': 163, 'NRU': 164, 'NZL': 165, 'OMA': 166, 'PAK': 167, 'PAN': 168, 'PAR': 169, + 'PCN': 170, 'PER': 171, 'PHI': 172, 'PLE': 173, 'PLW': 174, 'PNG': 175, 'POL': 176, 'POR': 177, + 'PRK': 178, 'PUR': 179, 'QAT': 180, 'REU': 181, 'ROU': 182, 'RSA': 183, 'RUS': 184, 'RWA': 185, + 'SAM': 186, 'SCO': 187, 'SDN': 188, 'SEN': 189, 'SEY': 190, 'SGS': 191, 'SHN': 192, 'SIN': 193, + 'SJM': 194, 'SLE': 195, 'SLV': 196, 'SMR': 197, 'SOL': 198, 'SOM': 199, 'SPM': 200, 'SRB': 201, + 'SSD': 202, 'STP': 203, 'SUI': 204, 'SUR': 205, 'SWZ': 206, 'SXM': 207, 'SYR': 208, 'TAH': 209, + 'TAN': 210, 'TCA': 211, 'TGA': 212, 'THA': 213, 'TJK': 214, 'TKL': 215, 'TKM': 216, 'TLS': 217, + 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, + 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, + 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, + 'ZIM': 242, 'EUR': 243 }.get(region, 0) return region_id async def unit3d_distributor_ids(self, distributor): distributor_id = { - '01 DISTRIBUTION': 1, '100 DESTINATIONS TRAVEL FILM': 2, '101 FILMS': 3, '1FILMS': 4, '2 ENTERTAIN VIDEO': 5, '20TH CENTURY FOX': 6, '2L': 7, '3D CONTENT HUB': 8, '3D MEDIA': 9, '3L FILM': 10, '4DIGITAL': 11, '4DVD': 12, '4K ULTRA HD MOVIES': 13, '4K UHD': 13, '8-FILMS': 14, '84 ENTERTAINMENT': 15, '88 FILMS': 16, '@ANIME': 17, 'ANIME': 17, 'A CONTRACORRIENTE': 18, 'A CONTRACORRIENTE FILMS': 19, 'A&E HOME VIDEO': 20, 'A&E': 20, 'A&M RECORDS': 21, 'A+E NETWORKS': 22, 'A+R': 23, 'A-FILM': 24, 'AAA': 25, 'AB VIDƉO': 26, 'AB VIDEO': 26, 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)': 27, 'ABC': 27, 'ABKCO': 28, 'ABSOLUT MEDIEN': 29, 'ABSOLUTE': 30, 'ACCENT FILM ENTERTAINMENT': 31, 'ACCENTUS': 32, 'ACORN MEDIA': 33, 'AD VITAM': 34, 'ADA': 35, 'ADITYA VIDEOS': 36, 'ADSO FILMS': 37, 'AFM RECORDS': 38, 'AGFA': 39, 'AIX RECORDS': 40, 'ALAMODE FILM': 41, 'ALBA RECORDS': 42, 'ALBANY RECORDS': 43, 'ALBATROS': 44, 'ALCHEMY': 45, 'ALIVE': 46, 'ALL ANIME': 47, 'ALL INTERACTIVE ENTERTAINMENT': 48, 'ALLEGRO': 49, 'ALLIANCE': 50, 'ALPHA MUSIC': 51, 'ALTERDYSTRYBUCJA': 52, 'ALTERED INNOCENCE': 53, 'ALTITUDE FILM DISTRIBUTION': 54, 'ALUCARD RECORDS': 55, 'AMAZING D.C.': 56, 'AMAZING DC': 56, 'AMMO CONTENT': 57, 'AMUSE SOFT ENTERTAINMENT': 58, 'ANCONNECT': 59, 'ANEC': 60, 'ANIMATSU': 61, 'ANIME HOUSE': 62, 'ANIME LTD': 63, 'ANIME WORKS': 64, 'ANIMEIGO': 65, 'ANIPLEX': 66, 'ANOLIS ENTERTAINMENT': 67, 'ANOTHER WORLD ENTERTAINMENT': 68, 'AP INTERNATIONAL': 69, 'APPLE': 70, 'ARA MEDIA': 71, 'ARBELOS': 72, 'ARC ENTERTAINMENT': 73, 'ARP SƉLECTION': 74, 'ARP SELECTION': 74, 'ARROW': 75, 'ART SERVICE': 76, 'ART VISION': 77, 'ARTE ƉDITIONS': 78, 'ARTE EDITIONS': 78, 'ARTE VIDƉO': 79, 'ARTE VIDEO': 79, 'ARTHAUS MUSIK': 80, 'ARTIFICIAL EYE': 81, 'ARTSPLOITATION FILMS': 82, 'ARTUS FILMS': 83, 'ASCOT ELITE HOME ENTERTAINMENT': 84, 'ASIA VIDEO': 85, 'ASMIK ACE': 86, 'ASTRO RECORDS & FILMWORKS': 87, 'ASYLUM': 88, 'ATLANTIC FILM': 89, 'ATLANTIC RECORDS': 90, 'ATLAS FILM': 91, 'AUDIO VISUAL ENTERTAINMENT': 92, 'AURO-3D CREATIVE LABEL': 93, 'AURUM': 94, 'AV VISIONEN': 95, 'AV-JET': 96, 'AVALON': 97, 'AVENTI': 98, 'AVEX TRAX': 99, 'AXIOM': 100, 'AXIS RECORDS': 101, 'AYNGARAN': 102, 'BAC FILMS': 103, 'BACH FILMS': 104, 'BANDAI VISUAL': 105, 'BARCLAY': 106, 'BBC': 107, 'BRITISH BROADCASTING CORPORATION': 107, 'BBI FILMS': 108, 'BBI': 108, 'BCI HOME ENTERTAINMENT': 109, 'BEGGARS BANQUET': 110, 'BEL AIR CLASSIQUES': 111, 'BELGA FILMS': 112, 'BELVEDERE': 113, 'BENELUX FILM DISTRIBUTORS': 114, 'BENNETT-WATT MEDIA': 115, 'BERLIN CLASSICS': 116, 'BERLINER PHILHARMONIKER RECORDINGS': 117, 'BEST ENTERTAINMENT': 118, 'BEYOND HOME ENTERTAINMENT': 119, 'BFI VIDEO': 120, 'BFI': 120, 'BRITISH FILM INSTITUTE': 120, 'BFS ENTERTAINMENT': 121, 'BFS': 121, 'BHAVANI': 122, 'BIBER RECORDS': 123, 'BIG HOME VIDEO': 124, 'BILDSTƖRUNG': 125, 'BILDSTORUNG': 125, 'BILL ZEBUB': 126, 'BIRNENBLATT': 127, 'BIT WEL': 128, 'BLACK BOX': 129, 'BLACK HILL PICTURES': 130, 'BLACK HILL': 130, 'BLACK HOLE RECORDINGS': 131, 'BLACK HOLE': 131, 'BLAQOUT': 132, 'BLAUFIELD MUSIC': 133, 'BLAUFIELD': 133, 'BLOCKBUSTER ENTERTAINMENT': 134, 'BLOCKBUSTER': 134, 'BLU PHASE MEDIA': 135, 'BLU-RAY ONLY': 136, 'BLU-RAY': 136, 'BLURAY ONLY': 136, 'BLURAY': 136, 'BLUE GENTIAN RECORDS': 137, 'BLUE KINO': 138, 'BLUE UNDERGROUND': 139, 'BMG/ARISTA': 140, 'BMG': 140, 'BMGARISTA': 140, 'BMG ARISTA': 140, 'ARISTA': - 140, 'ARISTA/BMG': 140, 'ARISTABMG': 140, 'ARISTA BMG': 140, 'BONTON FILM': 141, 'BONTON': 141, 'BOOMERANG PICTURES': 142, 'BOOMERANG': 142, 'BQHL ƉDITIONS': 143, 'BQHL EDITIONS': 143, 'BQHL': 143, 'BREAKING GLASS': 144, 'BRIDGESTONE': 145, 'BRINK': 146, 'BROAD GREEN PICTURES': 147, 'BROAD GREEN': 147, 'BUSCH MEDIA GROUP': 148, 'BUSCH': 148, 'C MAJOR': 149, 'C.B.S.': 150, 'CAICHANG': 151, 'CALIFƓRNIA FILMES': 152, 'CALIFORNIA FILMES': 152, 'CALIFORNIA': 152, 'CAMEO': 153, 'CAMERA OBSCURA': 154, 'CAMERATA': 155, 'CAMP MOTION PICTURES': 156, 'CAMP MOTION': 156, 'CAPELIGHT PICTURES': 157, 'CAPELIGHT': 157, 'CAPITOL': 159, 'CAPITOL RECORDS': 159, 'CAPRICCI': 160, 'CARGO RECORDS': 161, 'CARLOTTA FILMS': 162, 'CARLOTTA': 162, 'CARLOTA': 162, 'CARMEN FILM': 163, 'CASCADE': 164, 'CATCHPLAY': 165, 'CAULDRON FILMS': 166, 'CAULDRON': 166, 'CBS TELEVISION STUDIOS': 167, 'CBS': 167, 'CCTV': 168, 'CCV ENTERTAINMENT': 169, 'CCV': 169, 'CD BABY': 170, 'CD LAND': 171, 'CECCHI GORI': 172, 'CENTURY MEDIA': 173, 'CHUAN XUN SHI DAI MULTIMEDIA': 174, 'CINE-ASIA': 175, 'CINƉART': 176, 'CINEART': 176, 'CINEDIGM': 177, 'CINEFIL IMAGICA': 178, 'CINEMA EPOCH': 179, 'CINEMA GUILD': 180, 'CINEMA LIBRE STUDIOS': 181, 'CINEMA MONDO': 182, 'CINEMATIC VISION': 183, 'CINEPLOIT RECORDS': 184, 'CINESTRANGE EXTREME': 185, 'CITEL VIDEO': 186, 'CITEL': 186, 'CJ ENTERTAINMENT': 187, 'CJ': 187, 'CLASSIC MEDIA': 188, 'CLASSICFLIX': 189, 'CLASSICLINE': 190, 'CLAUDIO RECORDS': 191, 'CLEAR VISION': 192, 'CLEOPATRA': 193, 'CLOSE UP': 194, 'CMS MEDIA LIMITED': 195, 'CMV LASERVISION': 196, 'CN ENTERTAINMENT': 197, 'CODE RED': 198, 'COHEN MEDIA GROUP': 199, 'COHEN': 199, 'COIN DE MIRE CINƉMA': 200, 'COIN DE MIRE CINEMA': 200, 'COLOSSEO FILM': 201, 'COLUMBIA': 203, 'COLUMBIA PICTURES': 203, 'COLUMBIA/TRI-STAR': 204, 'TRI-STAR': 204, 'COMMERCIAL MARKETING': 205, 'CONCORD MUSIC GROUP': 206, 'CONCORDE VIDEO': 207, 'CONDOR': 208, 'CONSTANTIN FILM': 209, 'CONSTANTIN': 209, 'CONSTANTINO FILMES': 210, 'CONSTANTINO': 210, 'CONSTRUCTIVE MEDIA SERVICE': 211, 'CONSTRUCTIVE': 211, 'CONTENT ZONE': 212, 'CONTENTS GATE': 213, 'COQUEIRO VERDE': 214, 'CORNERSTONE MEDIA': 215, 'CORNERSTONE': 215, 'CP DIGITAL': 216, 'CREST MOVIES': 217, 'CRITERION': 218, 'CRITERION COLLECTION': - 218, 'CC': 218, 'CRYSTAL CLASSICS': 219, 'CULT EPICS': 220, 'CULT FILMS': 221, 'CULT VIDEO': 222, 'CURZON FILM WORLD': 223, 'D FILMS': 224, "D'AILLY COMPANY": 225, 'DAILLY COMPANY': 225, 'D AILLY COMPANY': 225, "D'AILLY": 225, 'DAILLY': 225, 'D AILLY': 225, 'DA CAPO': 226, 'DA MUSIC': 227, "DALL'ANGELO PICTURES": 228, 'DALLANGELO PICTURES': 228, "DALL'ANGELO": 228, 'DALL ANGELO PICTURES': 228, 'DALL ANGELO': 228, 'DAREDO': 229, 'DARK FORCE ENTERTAINMENT': 230, 'DARK FORCE': 230, 'DARK SIDE RELEASING': 231, 'DARK SIDE': 231, 'DAZZLER MEDIA': 232, 'DAZZLER': 232, 'DCM PICTURES': 233, 'DCM': 233, 'DEAPLANETA': 234, 'DECCA': 235, 'DEEPJOY': 236, 'DEFIANT SCREEN ENTERTAINMENT': 237, 'DEFIANT SCREEN': 237, 'DEFIANT': 237, 'DELOS': 238, 'DELPHIAN RECORDS': 239, 'DELPHIAN': 239, 'DELTA MUSIC & ENTERTAINMENT': 240, 'DELTA MUSIC AND ENTERTAINMENT': 240, 'DELTA MUSIC ENTERTAINMENT': 240, 'DELTA MUSIC': 240, 'DELTAMAC CO. LTD.': 241, 'DELTAMAC CO LTD': 241, 'DELTAMAC CO': 241, 'DELTAMAC': 241, 'DEMAND MEDIA': 242, 'DEMAND': 242, 'DEP': 243, 'DEUTSCHE GRAMMOPHON': 244, 'DFW': 245, 'DGM': 246, 'DIAPHANA': 247, 'DIGIDREAMS STUDIOS': 248, 'DIGIDREAMS': 248, 'DIGITAL ENVIRONMENTS': 249, 'DIGITAL': 249, 'DISCOTEK MEDIA': 250, 'DISCOVERY CHANNEL': 251, 'DISCOVERY': 251, 'DISK KINO': 252, 'DISNEY / BUENA VISTA': 253, 'DISNEY': 253, 'BUENA VISTA': 253, 'DISNEY BUENA VISTA': 253, 'DISTRIBUTION SELECT': 254, 'DIVISA': 255, 'DNC ENTERTAINMENT': 256, 'DNC': 256, 'DOGWOOF': 257, 'DOLMEN HOME VIDEO': 258, 'DOLMEN': 258, 'DONAU FILM': 259, 'DONAU': 259, 'DORADO FILMS': 260, 'DORADO': 260, 'DRAFTHOUSE FILMS': 261, 'DRAFTHOUSE': 261, 'DRAGON FILM ENTERTAINMENT': 262, 'DRAGON ENTERTAINMENT': 262, 'DRAGON FILM': 262, 'DRAGON': 262, 'DREAMWORKS': 263, 'DRIVE ON RECORDS': 264, 'DRIVE ON': 264, 'DRIVE-ON': 264, 'DRIVEON': 264, 'DS MEDIA': 265, 'DTP ENTERTAINMENT AG': 266, 'DTP ENTERTAINMENT': 266, 'DTP AG': 266, 'DTP': 266, 'DTS ENTERTAINMENT': 267, 'DTS': 267, 'DUKE MARKETING': 268, 'DUKE VIDEO DISTRIBUTION': 269, 'DUKE': 269, 'DUTCH FILMWORKS': 270, 'DUTCH': 270, 'DVD INTERNATIONAL': 271, 'DVD': 271, 'DYBEX': 272, 'DYNAMIC': 273, 'DYNIT': 274, 'E1 ENTERTAINMENT': 275, 'E1': 275, 'EAGLE ENTERTAINMENT': 276, 'EAGLE HOME ENTERTAINMENT PVT.LTD.': - 277, 'EAGLE HOME ENTERTAINMENT PVTLTD': 277, 'EAGLE HOME ENTERTAINMENT PVT LTD': 277, 'EAGLE HOME ENTERTAINMENT': 277, 'EAGLE PICTURES': 278, 'EAGLE ROCK ENTERTAINMENT': 279, 'EAGLE ROCK': 279, 'EAGLE VISION MEDIA': 280, 'EAGLE VISION': 280, 'EARMUSIC': 281, 'EARTH ENTERTAINMENT': 282, 'EARTH': 282, 'ECHO BRIDGE ENTERTAINMENT': 283, 'ECHO BRIDGE': 283, 'EDEL GERMANY GMBH': 284, 'EDEL GERMANY': 284, 'EDEL RECORDS': 285, 'EDITION TONFILM': 286, 'EDITIONS MONTPARNASSE': 287, 'EDKO FILMS LTD.': 288, 'EDKO FILMS LTD': 288, 'EDKO FILMS': 288, 'EDKO': 288, "EIN'S M&M CO": 289, 'EINS M&M CO': 289, "EIN'S M&M": 289, 'EINS M&M': 289, 'ELEA-MEDIA': 290, 'ELEA MEDIA': 290, 'ELEA': 290, 'ELECTRIC PICTURE': 291, 'ELECTRIC': 291, 'ELEPHANT FILMS': 292, 'ELEPHANT': 292, 'ELEVATION': 293, 'EMI': 294, 'EMON': 295, 'EMS': 296, 'EMYLIA': 297, 'ENE MEDIA': 298, 'ENE': 298, 'ENTERTAINMENT IN VIDEO': 299, 'ENTERTAINMENT IN': 299, 'ENTERTAINMENT ONE': 300, 'ENTERTAINMENT ONE FILMS CANADA INC.': 301, 'ENTERTAINMENT ONE FILMS CANADA INC': 301, 'ENTERTAINMENT ONE FILMS CANADA': 301, 'ENTERTAINMENT ONE CANADA INC': 301, - 'ENTERTAINMENT ONE CANADA': 301, 'ENTERTAINMENTONE': 302, 'EONE': 303, 'EOS': 304, 'EPIC PICTURES': 305, 'EPIC': 305, 'EPIC RECORDS': 306, 'ERATO': 307, 'EROS': 308, 'ESC EDITIONS': 309, 'ESCAPI MEDIA BV': 310, 'ESOTERIC RECORDINGS': 311, 'ESPN FILMS': 312, 'EUREKA ENTERTAINMENT': 313, 'EUREKA': 313, 'EURO PICTURES': 314, 'EURO VIDEO': 315, 'EUROARTS': 316, 'EUROPA FILMES': 317, 'EUROPA': 317, 'EUROPACORP': 318, 'EUROZOOM': 319, 'EXCEL': 320, 'EXPLOSIVE MEDIA': 321, 'EXPLOSIVE': 321, 'EXTRALUCID FILMS': 322, 'EXTRALUCID': 322, 'EYE SEE MOVIES': 323, 'EYE SEE': 323, 'EYK MEDIA': 324, 'EYK': 324, 'FABULOUS FILMS': 325, 'FABULOUS': 325, 'FACTORIS FILMS': 326, 'FACTORIS': 326, 'FARAO RECORDS': 327, 'FARBFILM HOME ENTERTAINMENT': 328, 'FARBFILM ENTERTAINMENT': 328, 'FARBFILM HOME': 328, 'FARBFILM': 328, 'FEELGOOD ENTERTAINMENT': 329, 'FEELGOOD': 329, 'FERNSEHJUWELEN': 330, 'FILM CHEST': 331, 'FILM MEDIA': 332, 'FILM MOVEMENT': 333, 'FILM4': 334, 'FILMART': 335, 'FILMAURO': 336, 'FILMAX': 337, 'FILMCONFECT HOME ENTERTAINMENT': 338, 'FILMCONFECT ENTERTAINMENT': 338, 'FILMCONFECT HOME': 338, 'FILMCONFECT': 338, 'FILMEDIA': 339, 'FILMJUWELEN': 340, 'FILMOTEKA NARODAWA': 341, 'FILMRISE': 342, 'FINAL CUT ENTERTAINMENT': 343, 'FINAL CUT': 343, 'FIREHOUSE 12 RECORDS': 344, 'FIREHOUSE 12': 344, 'FIRST INTERNATIONAL PRODUCTION': 345, 'FIRST INTERNATIONAL': 345, 'FIRST LOOK STUDIOS': 346, 'FIRST LOOK': 346, 'FLAGMAN TRADE': 347, 'FLASHSTAR FILMES': 348, 'FLASHSTAR': 348, 'FLICKER ALLEY': 349, 'FNC ADD CULTURE': 350, 'FOCUS FILMES': 351, 'FOCUS': 351, 'FOKUS MEDIA': 352, 'FOKUSA': 352, 'FOX PATHE EUROPA': 353, 'FOX PATHE': 353, 'FOX EUROPA': 353, 'FOX/MGM': 354, 'FOX MGM': 354, 'MGM': 354, 'MGM/FOX': 354, 'FOX': 354, 'FPE': 355, 'FRANCE TƉLƉVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS': 356, 'FRANCE': 356, 'FREE DOLPHIN ENTERTAINMENT': 357, 'FREE DOLPHIN': 357, 'FREESTYLE DIGITAL MEDIA': 358, 'FREESTYLE DIGITAL': 358, 'FREESTYLE': 358, 'FREMANTLE HOME ENTERTAINMENT': 359, 'FREMANTLE ENTERTAINMENT': 359, 'FREMANTLE HOME': 359, 'FREMANTL': 359, 'FRENETIC FILMS': 360, 'FRENETIC': 360, 'FRONTIER WORKS': 361, 'FRONTIER': 361, 'FRONTIERS MUSIC': 362, 'FRONTIERS RECORDS': 363, 'FS FILM OY': 364, 'FS FILM': - 364, 'FULL MOON FEATURES': 365, 'FULL MOON': 365, 'FUN CITY EDITIONS': 366, 'FUN CITY': 366, 'FUNIMATION ENTERTAINMENT': 367, 'FUNIMATION': 367, 'FUSION': 368, 'FUTUREFILM': 369, 'G2 PICTURES': 370, 'G2': 370, 'GAGA COMMUNICATIONS': 371, 'GAGA': 371, 'GAIAM': 372, 'GALAPAGOS': 373, 'GAMMA HOME ENTERTAINMENT': 374, 'GAMMA ENTERTAINMENT': 374, 'GAMMA HOME': 374, 'GAMMA': 374, 'GARAGEHOUSE PICTURES': 375, 'GARAGEHOUSE': 375, 'GARAGEPLAY (車åŗ«å؛ę؂)': 376, '車åŗ«å؛ę؂': 376, 'GARAGEPLAY (Che Ku Yu Le )': 376, 'GARAGEPLAY': 376, 'Che Ku Yu Le': 376, 'GAUMONT': 377, 'GEFFEN': 378, 'GENEON ENTERTAINMENT': 379, 'GENEON': 379, 'GENEON UNIVERSAL ENTERTAINMENT': 380, 'GENERAL VIDEO RECORDING': 381, 'GLASS DOLL FILMS': 382, 'GLASS DOLL': 382, 'GLOBE MUSIC MEDIA': 383, 'GLOBE MUSIC': 383, 'GLOBE MEDIA': 383, 'GLOBE': 383, 'GO ENTERTAIN': 384, 'GO': 384, 'GOLDEN HARVEST': 385, 'GOOD!MOVIES': 386, - 'GOOD! MOVIES': 386, 'GOOD MOVIES': 386, 'GRAPEVINE VIDEO': 387, 'GRAPEVINE': 387, 'GRASSHOPPER FILM': 388, 'GRASSHOPPER FILMS': 388, 'GRASSHOPPER': 388, 'GRAVITAS VENTURES': 389, 'GRAVITAS': 389, 'GREAT MOVIES': 390, 'GREAT': 390, - 'GREEN APPLE ENTERTAINMENT': 391, 'GREEN ENTERTAINMENT': 391, 'GREEN APPLE': 391, 'GREEN': 391, 'GREENNARAE MEDIA': 392, 'GREENNARAE': 392, 'GRINDHOUSE RELEASING': 393, 'GRINDHOUSE': 393, 'GRIND HOUSE': 393, 'GRYPHON ENTERTAINMENT': 394, 'GRYPHON': 394, 'GUNPOWDER & SKY': 395, 'GUNPOWDER AND SKY': 395, 'GUNPOWDER SKY': 395, 'GUNPOWDER + SKY': 395, 'GUNPOWDER': 395, 'HANABEE ENTERTAINMENT': 396, 'HANABEE': 396, 'HANNOVER HOUSE': 397, 'HANNOVER': 397, 'HANSESOUND': 398, 'HANSE SOUND': 398, 'HANSE': 398, 'HAPPINET': 399, 'HARMONIA MUNDI': 400, 'HARMONIA': 400, 'HBO': 401, 'HDC': 402, 'HEC': 403, 'HELL & BACK RECORDINGS': 404, 'HELL AND BACK RECORDINGS': 404, 'HELL & BACK': 404, 'HELL AND BACK': 404, "HEN'S TOOTH VIDEO": 405, 'HENS TOOTH VIDEO': 405, "HEN'S TOOTH": 405, 'HENS TOOTH': 405, 'HIGH FLIERS': 406, 'HIGHLIGHT': 407, 'HILLSONG': 408, 'HISTORY CHANNEL': 409, 'HISTORY': 409, 'HK VIDƉO': 410, 'HK VIDEO': 410, 'HK': 410, 'HMH HAMBURGER MEDIEN HAUS': 411, 'HAMBURGER MEDIEN HAUS': 411, 'HMH HAMBURGER MEDIEN': 411, 'HMH HAMBURGER': 411, 'HMH': 411, 'HOLLYWOOD CLASSIC ENTERTAINMENT': 412, 'HOLLYWOOD CLASSIC': 412, 'HOLLYWOOD PICTURES': 413, 'HOLLYWOOD': 413, 'HOPSCOTCH ENTERTAINMENT': 414, 'HOPSCOTCH': 414, 'HPM': 415, 'HƄNNSLER CLASSIC': 416, 'HANNSLER CLASSIC': 416, 'HANNSLER': 416, 'I-CATCHER': 417, 'I CATCHER': 417, 'ICATCHER': 417, 'I-ON NEW MEDIA': 418, 'I ON NEW MEDIA': 418, 'ION NEW MEDIA': 418, 'ION MEDIA': 418, 'I-ON': 418, 'ION': 418, 'IAN PRODUCTIONS': 419, 'IAN': 419, 'ICESTORM': 420, 'ICON FILM DISTRIBUTION': 421, 'ICON DISTRIBUTION': 421, 'ICON FILM': 421, 'ICON': 421, 'IDEALE AUDIENCE': 422, 'IDEALE': 422, 'IFC FILMS': 423, 'IFC': 423, 'IFILM': 424, 'ILLUSIONS UNLTD.': 425, 'ILLUSIONS UNLTD': 425, 'ILLUSIONS': 425, 'IMAGE ENTERTAINMENT': 426, 'IMAGE': 426, - 'IMAGEM FILMES': 427, 'IMAGEM': 427, 'IMOVISION': 428, 'IMPERIAL CINEPIX': 429, 'IMPRINT': 430, 'IMPULS HOME ENTERTAINMENT': 431, 'IMPULS ENTERTAINMENT': 431, 'IMPULS HOME': 431, 'IMPULS': 431, 'IN-AKUSTIK': 432, 'IN AKUSTIK': 432, 'INAKUSTIK': 432, 'INCEPTION MEDIA GROUP': 433, 'INCEPTION MEDIA': 433, 'INCEPTION GROUP': 433, 'INCEPTION': 433, 'INDEPENDENT': 434, 'INDICAN': 435, 'INDIE RIGHTS': 436, 'INDIE': 436, 'INDIGO': 437, 'INFO': 438, 'INJOINGAN': 439, 'INKED PICTURES': 440, 'INKED': 440, 'INSIDE OUT MUSIC': 441, 'INSIDE MUSIC': 441, 'INSIDE OUT': 441, 'INSIDE': 441, 'INTERCOM': 442, 'INTERCONTINENTAL VIDEO': 443, 'INTERCONTINENTAL': 443, 'INTERGROOVE': 444, - 'INTERSCOPE': 445, 'INVINCIBLE PICTURES': 446, 'INVINCIBLE': 446, 'ISLAND/MERCURY': 447, 'ISLAND MERCURY': 447, 'ISLANDMERCURY': 447, 'ISLAND & MERCURY': 447, 'ISLAND AND MERCURY': 447, 'ISLAND': 447, 'ITN': 448, 'ITV DVD': 449, 'ITV': 449, 'IVC': 450, 'IVE ENTERTAINMENT': 451, 'IVE': 451, 'J&R ADVENTURES': 452, 'J&R': 452, 'JR': 452, 'JAKOB': 453, 'JONU MEDIA': 454, 'JONU': 454, 'JRB PRODUCTIONS': 455, 'JRB': 455, 'JUST BRIDGE ENTERTAINMENT': 456, 'JUST BRIDGE': 456, 'JUST ENTERTAINMENT': 456, 'JUST': 456, 'KABOOM ENTERTAINMENT': 457, 'KABOOM': 457, 'KADOKAWA ENTERTAINMENT': 458, 'KADOKAWA': 458, 'KAIROS': 459, 'KALEIDOSCOPE ENTERTAINMENT': 460, 'KALEIDOSCOPE': 460, 'KAM & RONSON ENTERPRISES': 461, 'KAM & RONSON': 461, 'KAM&RONSON ENTERPRISES': 461, 'KAM&RONSON': 461, 'KAM AND RONSON ENTERPRISES': 461, 'KAM AND RONSON': 461, 'KANA HOME VIDEO': 462, 'KARMA FILMS': 463, 'KARMA': 463, 'KATZENBERGER': 464, 'KAZE': 465, 'KBS MEDIA': 466, 'KBS': 466, 'KD MEDIA': 467, 'KD': 467, 'KING MEDIA': 468, 'KING': 468, 'KING RECORDS': 469, 'KINO LORBER': 470, 'KINO': 470, 'KINO SWIAT': 471, 'KINOKUNIYA': 472, 'KINOWELT HOME ENTERTAINMENT/DVD': 473, 'KINOWELT HOME ENTERTAINMENT': 473, 'KINOWELT ENTERTAINMENT': 473, 'KINOWELT HOME DVD': 473, 'KINOWELT ENTERTAINMENT/DVD': 473, 'KINOWELT DVD': 473, 'KINOWELT': 473, 'KIT PARKER FILMS': 474, 'KIT PARKER': 474, 'KITTY MEDIA': 475, 'KNM HOME ENTERTAINMENT': 476, 'KNM ENTERTAINMENT': 476, 'KNM HOME': 476, 'KNM': 476, 'KOBA FILMS': 477, 'KOBA': 477, 'KOCH ENTERTAINMENT': 478, 'KOCH MEDIA': 479, 'KOCH': 479, 'KRAKEN RELEASING': 480, 'KRAKEN': 480, 'KSCOPE': 481, 'KSM': 482, 'KULTUR': 483, "L'ATELIER D'IMAGES": 484, "LATELIER D'IMAGES": 484, "L'ATELIER DIMAGES": 484, 'LATELIER DIMAGES': 484, "L ATELIER D'IMAGES": 484, "L'ATELIER D IMAGES": 484, - 'L ATELIER D IMAGES': 484, "L'ATELIER": 484, 'L ATELIER': 484, 'LATELIER': 484, 'LA AVENTURA AUDIOVISUAL': 485, 'LA AVENTURA': 485, 'LACE GROUP': 486, 'LACE': 486, 'LASER PARADISE': 487, 'LAYONS': 488, 'LCJ EDITIONS': 489, 'LCJ': 489, 'LE CHAT QUI FUME': 490, 'LE PACTE': 491, 'LEDICK FILMHANDEL': 492, 'LEGEND': 493, 'LEOMARK STUDIOS': 494, 'LEOMARK': 494, 'LEONINE FILMS': 495, 'LEONINE': 495, 'LICHTUNG MEDIA LTD': 496, 'LICHTUNG LTD': 496, 'LICHTUNG MEDIA LTD.': 496, 'LICHTUNG LTD.': 496, 'LICHTUNG MEDIA': 496, 'LICHTUNG': 496, 'LIGHTHOUSE HOME ENTERTAINMENT': 497, 'LIGHTHOUSE ENTERTAINMENT': 497, 'LIGHTHOUSE HOME': 497, 'LIGHTHOUSE': 497, 'LIGHTYEAR': 498, 'LIONSGATE FILMS': 499, 'LIONSGATE': 499, 'LIZARD CINEMA TRADE': 500, 'LLAMENTOL': 501, 'LOBSTER FILMS': 502, 'LOBSTER': 502, 'LOGON': 503, 'LORBER FILMS': 504, 'LORBER': 504, 'LOS BANDITOS FILMS': 505, 'LOS BANDITOS': 505, 'LOUD & PROUD RECORDS': 506, 'LOUD AND PROUD RECORDS': 506, 'LOUD & PROUD': 506, 'LOUD AND PROUD': 506, 'LSO LIVE': 507, 'LUCASFILM': 508, 'LUCKY RED': 509, 'LUMIƈRE HOME ENTERTAINMENT': 510, 'LUMIERE HOME ENTERTAINMENT': 510, 'LUMIERE ENTERTAINMENT': 510, 'LUMIERE HOME': 510, 'LUMIERE': 510, 'M6 VIDEO': 511, 'M6': 511, 'MAD DIMENSION': 512, 'MADMAN ENTERTAINMENT': 513, 'MADMAN': 513, 'MAGIC BOX': 514, 'MAGIC PLAY': 515, 'MAGNA HOME ENTERTAINMENT': 516, 'MAGNA ENTERTAINMENT': 516, 'MAGNA HOME': 516, 'MAGNA': 516, 'MAGNOLIA PICTURES': 517, 'MAGNOLIA': 517, 'MAIDEN JAPAN': 518, 'MAIDEN': 518, 'MAJENG MEDIA': 519, 'MAJENG': 519, 'MAJESTIC HOME ENTERTAINMENT': 520, 'MAJESTIC ENTERTAINMENT': 520, 'MAJESTIC HOME': 520, 'MAJESTIC': 520, 'MANGA HOME ENTERTAINMENT': 521, 'MANGA ENTERTAINMENT': 521, 'MANGA HOME': 521, 'MANGA': 521, 'MANTA LAB': 522, 'MAPLE STUDIOS': 523, 'MAPLE': 523, 'MARCO POLO PRODUCTION': - 524, 'MARCO POLO': 524, 'MARIINSKY': 525, 'MARVEL STUDIOS': 526, 'MARVEL': 526, 'MASCOT RECORDS': 527, 'MASCOT': 527, 'MASSACRE VIDEO': 528, 'MASSACRE': 528, 'MATCHBOX': 529, 'MATRIX D': 530, 'MAXAM': 531, 'MAYA HOME ENTERTAINMENT': 532, 'MAYA ENTERTAINMENT': 532, 'MAYA HOME': 532, 'MAYAT': 532, 'MDG': 533, 'MEDIA BLASTERS': 534, 'MEDIA FACTORY': 535, 'MEDIA TARGET DISTRIBUTION': 536, 'MEDIA TARGET': 536, 'MEDIAINVISION': 537, 'MEDIATOON': 538, 'MEDIATRES ESTUDIO': 539, 'MEDIATRES STUDIO': 539, 'MEDIATRES': 539, 'MEDICI ARTS': 540, 'MEDICI CLASSICS': 541, 'MEDIUMRARE ENTERTAINMENT': 542, 'MEDIUMRARE': 542, 'MEDUSA': 543, 'MEGASTAR': 544, 'MEI AH': 545, 'MELI MƉDIAS': 546, 'MELI MEDIAS': 546, 'MEMENTO FILMS': 547, 'MEMENTO': 547, 'MENEMSHA FILMS': 548, 'MENEMSHA': 548, 'MERCURY': 549, 'MERCURY STUDIOS': 550, 'MERGE SOFT PRODUCTIONS': 551, 'MERGE PRODUCTIONS': 551, 'MERGE SOFT': 551, 'MERGE': 551, 'METAL BLADE RECORDS': 552, 'METAL BLADE': 552, 'METEOR': 553, 'METRO-GOLDWYN-MAYER': 554, 'METRO GOLDWYN MAYER': 554, 'METROGOLDWYNMAYER': 554, 'METRODOME VIDEO': 555, 'METRODOME': 555, 'METROPOLITAN': 556, 'MFA+': - 557, 'MFA': 557, 'MIG FILMGROUP': 558, 'MIG': 558, 'MILESTONE': 559, 'MILL CREEK ENTERTAINMENT': 560, 'MILL CREEK': 560, 'MILLENNIUM MEDIA': 561, 'MILLENNIUM': 561, 'MIRAGE ENTERTAINMENT': 562, 'MIRAGE': 562, 'MIRAMAX': 563, - 'MISTERIYA ZVUKA': 564, 'MK2': 565, 'MODE RECORDS': 566, 'MODE': 566, 'MOMENTUM PICTURES': 567, 'MONDO HOME ENTERTAINMENT': 568, 'MONDO ENTERTAINMENT': 568, 'MONDO HOME': 568, 'MONDO MACABRO': 569, 'MONGREL MEDIA': 570, 'MONOLIT': 571, 'MONOLITH VIDEO': 572, 'MONOLITH': 572, 'MONSTER PICTURES': 573, 'MONSTER': 573, 'MONTEREY VIDEO': 574, 'MONTEREY': 574, 'MONUMENT RELEASING': 575, 'MONUMENT': 575, 'MORNINGSTAR': 576, 'MORNING STAR': 576, 'MOSERBAER': 577, 'MOVIEMAX': 578, 'MOVINSIDE': 579, 'MPI MEDIA GROUP': 580, 'MPI MEDIA': 580, 'MPI': 580, 'MR. BONGO FILMS': 581, 'MR BONGO FILMS': 581, 'MR BONGO': 581, 'MRG (MERIDIAN)': 582, 'MRG MERIDIAN': 582, 'MRG': 582, 'MERIDIAN': 582, 'MUBI': 583, 'MUG SHOT PRODUCTIONS': 584, 'MUG SHOT': 584, 'MULTIMUSIC': 585, 'MULTI-MUSIC': 585, 'MULTI MUSIC': 585, 'MUSE': 586, 'MUSIC BOX FILMS': 587, 'MUSIC BOX': 587, 'MUSICBOX': 587, 'MUSIC BROKERS': 588, 'MUSIC THEORIES': 589, 'MUSIC VIDEO DISTRIBUTORS': 590, 'MUSIC VIDEO': 590, 'MUSTANG ENTERTAINMENT': 591, 'MUSTANG': 591, 'MVD VISUAL': 592, 'MVD': 592, 'MVD/VSC': 593, 'MVL': 594, 'MVM ENTERTAINMENT': 595, 'MVM': 595, 'MYNDFORM': 596, 'MYSTIC NIGHT PICTURES': 597, 'MYSTIC NIGHT': 597, 'NAMELESS MEDIA': 598, 'NAMELESS': 598, 'NAPALM RECORDS': 599, 'NAPALM': 599, 'NATIONAL ENTERTAINMENT MEDIA': 600, 'NATIONAL ENTERTAINMENT': 600, 'NATIONAL MEDIA': 600, 'NATIONAL FILM ARCHIVE': 601, 'NATIONAL ARCHIVE': 601, 'NATIONAL FILM': 601, 'NATIONAL GEOGRAPHIC': 602, 'NAT GEO TV': 602, 'NAT GEO': 602, 'NGO': 602, 'NAXOS': 603, 'NBCUNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBC UNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBCUNIVERSAL JAPAN': 604, 'NBC UNIVERSAL JAPAN': 604, 'NBC JAPAN': 604, 'NBO ENTERTAINMENT': 605, 'NBO': 605, 'NEOS': 606, 'NETFLIX': 607, 'NETWORK': 608, 'NEW BLOOD': 609, 'NEW DISC': 610, 'NEW KSM': 611, 'NEW LINE CINEMA': 612, 'NEW LINE': 612, 'NEW MOVIE TRADING CO. LTD': 613, 'NEW MOVIE TRADING CO LTD': 613, 'NEW MOVIE TRADING CO': 613, 'NEW MOVIE TRADING': 613, 'NEW WAVE FILMS': 614, 'NEW WAVE': 614, 'NFI': 615, - 'NHK': 616, 'NIPPONART': 617, 'NIS AMERICA': 618, 'NJUTAFILMS': 619, 'NOBLE ENTERTAINMENT': 620, 'NOBLE': 620, 'NORDISK FILM': 621, 'NORDISK': 621, 'NORSK FILM': 622, 'NORSK': 622, 'NORTH AMERICAN MOTION PICTURES': 623, 'NOS AUDIOVISUAIS': 624, 'NOTORIOUS PICTURES': 625, 'NOTORIOUS': 625, 'NOVA MEDIA': 626, 'NOVA': 626, 'NOVA SALES AND DISTRIBUTION': 627, 'NOVA SALES & DISTRIBUTION': 627, 'NSM': 628, 'NSM RECORDS': 629, 'NUCLEAR BLAST': 630, 'NUCLEUS FILMS': 631, 'NUCLEUS': 631, 'OBERLIN MUSIC': 632, 'OBERLIN': 632, 'OBRAS-PRIMAS DO CINEMA': 633, 'OBRAS PRIMAS DO CINEMA': 633, 'OBRASPRIMAS DO CINEMA': 633, 'OBRAS-PRIMAS CINEMA': 633, 'OBRAS PRIMAS CINEMA': 633, 'OBRASPRIMAS CINEMA': 633, 'OBRAS-PRIMAS': 633, 'OBRAS PRIMAS': 633, 'OBRASPRIMAS': 633, 'ODEON': 634, 'OFDB FILMWORKS': 635, 'OFDB': 635, 'OLIVE FILMS': 636, 'OLIVE': 636, 'ONDINE': 637, 'ONSCREEN FILMS': 638, 'ONSCREEN': 638, 'OPENING DISTRIBUTION': 639, 'OPERA AUSTRALIA': 640, 'OPTIMUM HOME ENTERTAINMENT': 641, 'OPTIMUM ENTERTAINMENT': 641, 'OPTIMUM HOME': 641, 'OPTIMUM': 641, 'OPUS ARTE': 642, 'ORANGE STUDIO': 643, 'ORANGE': 643, 'ORLANDO EASTWOOD FILMS': 644, 'ORLANDO FILMS': 644, 'ORLANDO EASTWOOD': 644, 'ORLANDO': 644, 'ORUSTAK PICTURES': 645, 'ORUSTAK': 645, 'OSCILLOSCOPE PICTURES': 646, 'OSCILLOSCOPE': 646, 'OUTPLAY': 647, 'PALISADES TARTAN': 648, 'PAN VISION': 649, 'PANVISION': 649, 'PANAMINT CINEMA': 650, 'PANAMINT': 650, 'PANDASTORM ENTERTAINMENT': 651, 'PANDA STORM ENTERTAINMENT': 651, 'PANDASTORM': 651, 'PANDA STORM': 651, 'PANDORA FILM': 652, 'PANDORA': 652, 'PANEGYRIC': 653, 'PANORAMA': 654, 'PARADE DECK FILMS': 655, 'PARADE DECK': 655, 'PARADISE': 656, 'PARADISO FILMS': 657, 'PARADOX': 658, 'PARAMOUNT PICTURES': 659, 'PARAMOUNT': 659, 'PARIS FILMES': 660, 'PARIS FILMS': 660, 'PARIS': 660, 'PARK CIRCUS': 661, 'PARLOPHONE': 662, 'PASSION RIVER': 663, 'PATHE DISTRIBUTION': 664, 'PATHE': 664, 'PBS': 665, 'PEACE ARCH TRINITY': 666, 'PECCADILLO PICTURES': 667, 'PEPPERMINT': 668, 'PHASE 4 FILMS': 669, 'PHASE 4': 669, 'PHILHARMONIA BAROQUE': 670, 'PICTURE HOUSE ENTERTAINMENT': 671, 'PICTURE ENTERTAINMENT': 671, 'PICTURE HOUSE': 671, 'PICTURE': 671, 'PIDAX': 672, 'PINK FLOYD RECORDS': 673, 'PINK FLOYD': 673, 'PINNACLE FILMS': 674, 'PINNACLE': 674, 'PLAIN': 675, 'PLATFORM ENTERTAINMENT LIMITED': 676, 'PLATFORM ENTERTAINMENT LTD': 676, 'PLATFORM ENTERTAINMENT LTD.': 676, 'PLATFORM ENTERTAINMENT': 676, 'PLATFORM': 676, 'PLAYARTE': 677, 'PLG UK CLASSICS': 678, 'PLG UK': - 678, 'PLG': 678, 'POLYBAND & TOPPIC VIDEO/WVG': 679, 'POLYBAND AND TOPPIC VIDEO/WVG': 679, 'POLYBAND & TOPPIC VIDEO WVG': 679, 'POLYBAND & TOPPIC VIDEO AND WVG': 679, 'POLYBAND & TOPPIC VIDEO & WVG': 679, 'POLYBAND AND TOPPIC VIDEO WVG': 679, 'POLYBAND AND TOPPIC VIDEO AND WVG': 679, 'POLYBAND AND TOPPIC VIDEO & WVG': 679, 'POLYBAND & TOPPIC VIDEO': 679, 'POLYBAND AND TOPPIC VIDEO': 679, 'POLYBAND & TOPPIC': 679, 'POLYBAND AND TOPPIC': 679, 'POLYBAND': 679, 'WVG': 679, 'POLYDOR': 680, 'PONY': 681, 'PONY CANYON': 682, 'POTEMKINE': 683, 'POWERHOUSE FILMS': 684, 'POWERHOUSE': 684, 'POWERSTATIOM': 685, 'PRIDE & JOY': 686, 'PRIDE AND JOY': 686, 'PRINZ MEDIA': 687, 'PRINZ': 687, 'PRIS AUDIOVISUAIS': 688, 'PRO VIDEO': 689, 'PRO-VIDEO': 689, 'PRO-MOTION': 690, 'PRO MOTION': 690, 'PROD. JRB': 691, 'PROD JRB': 691, 'PRODISC': 692, 'PROKINO': 693, 'PROVOGUE RECORDS': 694, 'PROVOGUE': 694, 'PROWARE': 695, 'PULP VIDEO': 696, 'PULP': 696, 'PULSE VIDEO': 697, 'PULSE': 697, 'PURE AUDIO RECORDINGS': 698, 'PURE AUDIO': 698, 'PURE FLIX ENTERTAINMENT': 699, 'PURE FLIX': 699, 'PURE ENTERTAINMENT': 699, 'PYRAMIDE VIDEO': 700, 'PYRAMIDE': 700, 'QUALITY FILMS': 701, 'QUALITY': 701, 'QUARTO VALLEY RECORDS': 702, 'QUARTO VALLEY': 702, 'QUESTAR': 703, 'R SQUARED FILMS': 704, 'R SQUARED': 704, 'RAPID EYE MOVIES': 705, 'RAPID EYE': 705, 'RARO VIDEO': 706, 'RARO': 706, 'RAROVIDEO U.S.': 707, 'RAROVIDEO US': 707, 'RARO VIDEO US': 707, 'RARO VIDEO U.S.': 707, 'RARO U.S.': 707, 'RARO US': 707, 'RAVEN BANNER RELEASING': 708, 'RAVEN BANNER': 708, 'RAVEN': 708, 'RAZOR DIGITAL ENTERTAINMENT': 709, 'RAZOR DIGITAL': 709, 'RCA': 710, 'RCO LIVE': 711, 'RCO': 711, 'RCV': 712, 'REAL GONE MUSIC': 713, 'REAL GONE': 713, 'REANIMEDIA': 714, 'REANI MEDIA': 714, 'REDEMPTION': 715, 'REEL': 716, 'RELIANCE HOME VIDEO & GAMES': 717, 'RELIANCE HOME VIDEO AND GAMES': 717, 'RELIANCE HOME VIDEO': 717, 'RELIANCE VIDEO': 717, 'RELIANCE HOME': 717, 'RELIANCE': 717, 'REM CULTURE': 718, 'REMAIN IN LIGHT': 719, 'REPRISE': 720, 'RESEN': 721, 'RETROMEDIA': 722, 'REVELATION FILMS LTD.': 723, 'REVELATION FILMS LTD': 723, 'REVELATION FILMS': 723, 'REVELATION LTD.': 723, 'REVELATION LTD': 723, 'REVELATION': 723, 'REVOLVER ENTERTAINMENT': 724, 'REVOLVER': 724, 'RHINO MUSIC': 725, 'RHINO': 725, 'RHV': 726, 'RIGHT STUF': 727, 'RIMINI EDITIONS': 728, 'RISING SUN MEDIA': 729, 'RLJ ENTERTAINMENT': 730, 'RLJ': 730, 'ROADRUNNER RECORDS': 731, 'ROADSHOW ENTERTAINMENT': 732, 'ROADSHOW': 732, 'RONE': 733, 'RONIN FLIX': 734, 'ROTANA HOME ENTERTAINMENT': 735, 'ROTANA ENTERTAINMENT': 735, 'ROTANA HOME': 735, 'ROTANA': 735, 'ROUGH TRADE': 736, 'ROUNDER': 737, 'SAFFRON HILL FILMS': 738, 'SAFFRON HILL': 738, 'SAFFRON': 738, 'SAMUEL GOLDWYN FILMS': 739, 'SAMUEL GOLDWYN': 739, 'SAN FRANCISCO SYMPHONY': 740, 'SANDREW METRONOME': 741, 'SAPHRANE': 742, 'SAVOR': 743, 'SCANBOX ENTERTAINMENT': 744, 'SCANBOX': 744, 'SCENIC LABS': 745, 'SCHRƖDERMEDIA': 746, 'SCHRODERMEDIA': 746, 'SCHRODER MEDIA': 746, 'SCORPION RELEASING': 747, 'SCORPION': 747, 'SCREAM TEAM RELEASING': 748, 'SCREAM TEAM': 748, 'SCREEN MEDIA': 749, 'SCREEN': 749, 'SCREENBOUND PICTURES': 750, 'SCREENBOUND': 750, 'SCREENWAVE MEDIA': 751, 'SCREENWAVE': 751, 'SECOND RUN': 752, 'SECOND SIGHT': 753, 'SEEDSMAN GROUP': 754, 'SELECT VIDEO': 755, 'SELECTA VISION': 756, 'SENATOR': 757, 'SENTAI FILMWORKS': 758, 'SENTAI': 758, 'SEVEN7': 759, 'SEVERIN FILMS': 760, 'SEVERIN': 760, 'SEVILLE': 761, 'SEYONS ENTERTAINMENT': 762, 'SEYONS': 762, 'SF STUDIOS': 763, 'SGL ENTERTAINMENT': 764, 'SGL': 764, 'SHAMELESS': 765, 'SHAMROCK MEDIA': 766, 'SHAMROCK': 766, 'SHANGHAI EPIC MUSIC ENTERTAINMENT': 767, 'SHANGHAI EPIC ENTERTAINMENT': 767, 'SHANGHAI EPIC MUSIC': 767, 'SHANGHAI MUSIC ENTERTAINMENT': 767, 'SHANGHAI ENTERTAINMENT': 767, 'SHANGHAI MUSIC': 767, 'SHANGHAI': 767, 'SHEMAROO': 768, 'SHOCHIKU': 769, 'SHOCK': 770, 'SHOGAKU KAN': 771, 'SHOUT FACTORY': 772, 'SHOUT! FACTORY': 772, 'SHOUT': 772, 'SHOUT!': 772, 'SHOWBOX': 773, 'SHOWTIME ENTERTAINMENT': 774, 'SHOWTIME': 774, 'SHRIEK SHOW': 775, 'SHUDDER': 776, 'SIDONIS': 777, 'SIDONIS CALYSTA': 778, 'SIGNAL ONE ENTERTAINMENT': 779, 'SIGNAL ONE': 779, 'SIGNATURE ENTERTAINMENT': 780, 'SIGNATURE': 780, 'SILVER VISION': 781, 'SINISTER FILM': 782, 'SINISTER': 782, 'SIREN VISUAL ENTERTAINMENT': 783, 'SIREN VISUAL': 783, 'SIREN ENTERTAINMENT': 783, 'SIREN': 783, 'SKANI': 784, 'SKY DIGI': 785, 'SLASHER // VIDEO': 786, 'SLASHER / VIDEO': 786, 'SLASHER VIDEO': 786, 'SLASHER': 786, 'SLOVAK FILM INSTITUTE': 787, 'SLOVAK FILM': 787, - 'SFI': 787, 'SM LIFE DESIGN GROUP': 788, 'SMOOTH PICTURES': 789, 'SMOOTH': 789, 'SNAPPER MUSIC': 790, 'SNAPPER': 790, 'SODA PICTURES': 791, 'SODA': 791, 'SONO LUMINUS': 792, 'SONY MUSIC': 793, 'SONY PICTURES': 794, 'SONY': 794, 'SONY PICTURES CLASSICS': 795, 'SONY CLASSICS': 795, 'SOUL MEDIA': 796, 'SOUL': 796, 'SOULFOOD MUSIC DISTRIBUTION': 797, 'SOULFOOD DISTRIBUTION': 797, 'SOULFOOD MUSIC': 797, 'SOULFOOD': 797, 'SOYUZ': 798, 'SPECTRUM': 799, - 'SPENTZOS FILM': 800, 'SPENTZOS': 800, 'SPIRIT ENTERTAINMENT': 801, 'SPIRIT': 801, 'SPIRIT MEDIA GMBH': 802, 'SPIRIT MEDIA': 802, 'SPLENDID ENTERTAINMENT': 803, 'SPLENDID FILM': 804, 'SPO': 805, 'SQUARE ENIX': 806, 'SRI BALAJI VIDEO': 807, 'SRI BALAJI': 807, 'SRI': 807, 'SRI VIDEO': 807, 'SRS CINEMA': 808, 'SRS': 808, 'SSO RECORDINGS': 809, 'SSO': 809, 'ST2 MUSIC': 810, 'ST2': 810, 'STAR MEDIA ENTERTAINMENT': 811, 'STAR ENTERTAINMENT': 811, 'STAR MEDIA': 811, 'STAR': 811, 'STARLIGHT': 812, 'STARZ / ANCHOR BAY': 813, 'STARZ ANCHOR BAY': 813, 'STARZ': 813, 'ANCHOR BAY': 813, 'STER KINEKOR': 814, 'STERLING ENTERTAINMENT': 815, 'STERLING': 815, 'STINGRAY': 816, 'STOCKFISCH RECORDS': 817, 'STOCKFISCH': 817, 'STRAND RELEASING': 818, 'STRAND': 818, 'STUDIO 4K': 819, 'STUDIO CANAL': 820, 'STUDIO GHIBLI': 821, 'GHIBLI': 821, 'STUDIO HAMBURG ENTERPRISES': 822, 'HAMBURG ENTERPRISES': 822, 'STUDIO HAMBURG': 822, 'HAMBURG': 822, 'STUDIO S': 823, 'SUBKULTUR ENTERTAINMENT': 824, 'SUBKULTUR': 824, 'SUEVIA FILMS': 825, 'SUEVIA': 825, 'SUMMIT ENTERTAINMENT': 826, 'SUMMIT': 826, 'SUNFILM ENTERTAINMENT': 827, 'SUNFILM': 827, 'SURROUND RECORDS': 828, 'SURROUND': 828, 'SVENSK FILMINDUSTRI': 829, 'SVENSK': 829, 'SWEN FILMES': 830, 'SWEN FILMS': 830, 'SWEN': 830, 'SYNAPSE FILMS': 831, 'SYNAPSE': 831, 'SYNDICADO': 832, 'SYNERGETIC': 833, 'T- SERIES': 834, 'T-SERIES': 834, 'T SERIES': 834, 'TSERIES': 834, 'T.V.P.': 835, 'TVP': 835, 'TACET RECORDS': 836, 'TACET': 836, 'TAI SENG': 837, 'TAI SHENG': 838, 'TAKEONE': 839, 'TAKESHOBO': 840, 'TAMASA DIFFUSION': 841, 'TC ENTERTAINMENT': 842, 'TC': 842, 'TDK': 843, 'TEAM MARKETING': 844, 'TEATRO REAL': 845, 'TEMA DISTRIBUCIONES': 846, 'TEMPE DIGITAL': 847, 'TF1 VIDƉO': 848, 'TF1 VIDEO': 848, 'TF1': 848, 'THE BLU': 849, 'BLU': 849, 'THE ECSTASY OF FILMS': 850, 'THE FILM DETECTIVE': 851, 'FILM DETECTIVE': 851, 'THE JOKERS': 852, 'JOKERS': 852, 'THE ON': 853, 'ON': 853, 'THIMFILM': 854, 'THIM FILM': 854, 'THIM': 854, 'THIRD WINDOW FILMS': 855, 'THIRD WINDOW': 855, '3RD WINDOW FILMS': 855, '3RD WINDOW': 855, 'THUNDERBEAN ANIMATION': 856, 'THUNDERBEAN': 856, 'THUNDERBIRD RELEASING': 857, 'THUNDERBIRD': 857, 'TIBERIUS FILM': 858, 'TIME LIFE': 859, 'TIMELESS MEDIA GROUP': 860, 'TIMELESS MEDIA': 860, 'TIMELESS GROUP': 860, 'TIMELESS': 860, 'TLA RELEASING': 861, 'TLA': 861, 'TOBIS FILM': 862, 'TOBIS': 862, 'TOEI': 863, 'TOHO': 864, 'TOKYO SHOCK': 865, 'TOKYO': 865, 'TONPOOL MEDIEN GMBH': 866, 'TONPOOL MEDIEN': 866, 'TOPICS ENTERTAINMENT': 867, 'TOPICS': 867, 'TOUCHSTONE PICTURES': 868, 'TOUCHSTONE': 868, 'TRANSMISSION FILMS': 869, 'TRANSMISSION': 869, 'TRAVEL VIDEO STORE': 870, 'TRIART': 871, 'TRIGON FILM': 872, 'TRIGON': 872, 'TRINITY HOME ENTERTAINMENT': 873, 'TRINITY ENTERTAINMENT': 873, 'TRINITY HOME': 873, 'TRINITY': 873, 'TRIPICTURES': 874, 'TRI-PICTURES': 874, 'TRI PICTURES': 874, 'TROMA': 875, 'TURBINE MEDIEN': 876, 'TURTLE RECORDS': 877, 'TURTLE': 877, 'TVA FILMS': 878, 'TVA': 878, 'TWILIGHT TIME': 879, 'TWILIGHT': 879, 'TT': 879, 'TWIN CO., LTD.': 880, 'TWIN CO, LTD.': 880, 'TWIN CO., LTD': 880, 'TWIN CO, LTD': 880, 'TWIN CO LTD': 880, 'TWIN LTD': 880, 'TWIN CO.': 880, 'TWIN CO': 880, 'TWIN': 880, 'UCA': 881, 'UDR': 882, 'UEK': 883, 'UFA/DVD': 884, 'UFA DVD': 884, 'UFADVD': 884, 'UGC PH': 885, 'ULTIMATE3DHEAVEN': 886, 'ULTRA': 887, 'UMBRELLA ENTERTAINMENT': 888, 'UMBRELLA': 888, 'UMC': 889, "UNCORK'D ENTERTAINMENT": 890, 'UNCORKD ENTERTAINMENT': 890, 'UNCORK D ENTERTAINMENT': 890, "UNCORK'D": 890, 'UNCORK D': 890, 'UNCORKD': 890, 'UNEARTHED FILMS': 891, 'UNEARTHED': 891, 'UNI DISC': 892, 'UNIMUNDOS': 893, 'UNITEL': 894, 'UNIVERSAL MUSIC': 895, 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES HOME': 896, 'UNIVERSAL SONY PICTURES': 896, 'UNIVERSAL HOME ENTERTAINMENT': - 896, 'UNIVERSAL ENTERTAINMENT': 896, 'UNIVERSAL HOME': 896, 'UNIVERSAL STUDIOS': 897, 'UNIVERSAL': 897, 'UNIVERSE LASER & VIDEO CO.': 898, 'UNIVERSE LASER AND VIDEO CO.': 898, 'UNIVERSE LASER & VIDEO CO': 898, 'UNIVERSE LASER AND VIDEO CO': 898, 'UNIVERSE LASER CO.': 898, 'UNIVERSE LASER CO': 898, 'UNIVERSE LASER': 898, 'UNIVERSUM FILM': 899, 'UNIVERSUM': 899, 'UTV': 900, 'VAP': 901, 'VCI': 902, 'VENDETTA FILMS': 903, 'VENDETTA': 903, 'VERSƁTIL HOME VIDEO': 904, 'VERSƁTIL VIDEO': 904, 'VERSƁTIL HOME': 904, 'VERSƁTIL': 904, 'VERSATIL HOME VIDEO': 904, 'VERSATIL VIDEO': 904, 'VERSATIL HOME': 904, 'VERSATIL': 904, 'VERTICAL ENTERTAINMENT': 905, 'VERTICAL': 905, 'VƉRTICE 360Āŗ': 906, 'VƉRTICE 360': 906, 'VERTICE 360o': 906, 'VERTICE 360': 906, 'VERTIGO BERLIN': 907, 'VƉRTIGO FILMS': 908, 'VƉRTIGO': 908, 'VERTIGO FILMS': 908, 'VERTIGO': 908, 'VERVE PICTURES': 909, 'VIA VISION ENTERTAINMENT': 910, 'VIA VISION': 910, 'VICOL ENTERTAINMENT': 911, 'VICOL': 911, 'VICOM': 912, 'VICTOR ENTERTAINMENT': 913, 'VICTOR': 913, 'VIDEA CDE': 914, 'VIDEO FILM EXPRESS': 915, 'VIDEO FILM': 915, 'VIDEO EXPRESS': 915, 'VIDEO MUSIC, INC.': 916, 'VIDEO MUSIC, INC': 916, 'VIDEO MUSIC INC.': 916, 'VIDEO MUSIC INC': 916, 'VIDEO MUSIC': 916, 'VIDEO SERVICE CORP.': 917, 'VIDEO SERVICE CORP': 917, 'VIDEO SERVICE': 917, 'VIDEO TRAVEL': 918, 'VIDEOMAX': 919, 'VIDEO MAX': 919, 'VII PILLARS ENTERTAINMENT': 920, 'VII PILLARS': 920, 'VILLAGE FILMS': 921, 'VINEGAR SYNDROME': 922, 'VINEGAR': 922, 'VS': 922, 'VINNY MOVIES': 923, 'VINNY': 923, 'VIRGIL FILMS & ENTERTAINMENT': 924, 'VIRGIL FILMS AND ENTERTAINMENT': 924, 'VIRGIL ENTERTAINMENT': 924, 'VIRGIL FILMS': 924, 'VIRGIL': 924, 'VIRGIN RECORDS': 925, 'VIRGIN': 925, 'VISION FILMS': 926, 'VISION': 926, 'VISUAL ENTERTAINMENT GROUP': 927, 'VISUAL GROUP': 927, 'VISUAL ENTERTAINMENT': 927, 'VISUAL': 927, 'VIVENDI VISUAL ENTERTAINMENT': 928, 'VIVENDI VISUAL': 928, 'VIVENDI': 928, 'VIZ PICTURES': 929, 'VIZ': 929, 'VLMEDIA': 930, 'VL MEDIA': 930, 'VL': 930, 'VOLGA': 931, 'VVS FILMS': 932, - 'VVS': 932, 'VZ HANDELS GMBH': 933, 'VZ HANDELS': 933, 'WARD RECORDS': 934, 'WARD': 934, 'WARNER BROS.': 935, 'WARNER BROS': 935, 'WARNER ARCHIVE': 935, 'WARNER ARCHIVE COLLECTION': 935, 'WAC': 935, 'WARNER': 935, 'WARNER MUSIC': 936, 'WEA': 937, 'WEINSTEIN COMPANY': 938, 'WEINSTEIN': 938, 'WELL GO USA': 939, 'WELL GO': 939, 'WELTKINO FILMVERLEIH': 940, 'WEST VIDEO': 941, 'WEST': 941, 'WHITE PEARL MOVIES': 942, 'WHITE PEARL': 942, + '01 DISTRIBUTION': 1, '100 DESTINATIONS TRAVEL FILM': 2, '101 FILMS': 3, '1FILMS': 4, '2 ENTERTAIN VIDEO': 5, '20TH CENTURY FOX': 6, '2L': 7, '3D CONTENT HUB': 8, '3D MEDIA': 9, '3L FILM': 10, '4DIGITAL': 11, '4DVD': 12, '4K ULTRA HD MOVIES': 13, '4K UHD': 13, '8-FILMS': 14, '84 ENTERTAINMENT': 15, '88 FILMS': 16, '@ANIME': 17, 'ANIME': 17, 'A CONTRACORRIENTE': 18, 'A CONTRACORRIENTE FILMS': 19, 'A&E HOME VIDEO': 20, 'A&E': 20, 'A&M RECORDS': 21, 'A+E NETWORKS': 22, 'A+R': 23, 'A-FILM': 24, 'AAA': 25, 'AB VIDƉO': 26, 'AB VIDEO': 26, 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)': 27, 'ABC': 27, 'ABKCO': 28, 'ABSOLUT MEDIEN': 29, 'ABSOLUTE': 30, 'ACCENT FILM ENTERTAINMENT': 31, 'ACCENTUS': 32, 'ACORN MEDIA': 33, 'AD VITAM': 34, 'ADA': 35, 'ADITYA VIDEOS': 36, 'ADSO FILMS': 37, 'AFM RECORDS': 38, 'AGFA': 39, 'AIX RECORDS': 40, 'ALAMODE FILM': 41, 'ALBA RECORDS': 42, 'ALBANY RECORDS': 43, 'ALBATROS': 44, 'ALCHEMY': 45, 'ALIVE': 46, 'ALL ANIME': 47, 'ALL INTERACTIVE ENTERTAINMENT': 48, 'ALLEGRO': 49, 'ALLIANCE': 50, 'ALPHA MUSIC': 51, 'ALTERDYSTRYBUCJA': 52, 'ALTERED INNOCENCE': 53, 'ALTITUDE FILM DISTRIBUTION': 54, 'ALUCARD RECORDS': 55, 'AMAZING D.C.': 56, 'AMAZING DC': 56, 'AMMO CONTENT': 57, 'AMUSE SOFT ENTERTAINMENT': 58, 'ANCONNECT': 59, 'ANEC': 60, 'ANIMATSU': 61, 'ANIME HOUSE': 62, 'ANIME LTD': 63, 'ANIME WORKS': 64, 'ANIMEIGO': 65, 'ANIPLEX': 66, 'ANOLIS ENTERTAINMENT': 67, 'ANOTHER WORLD ENTERTAINMENT': 68, 'AP INTERNATIONAL': 69, 'APPLE': 70, 'ARA MEDIA': 71, 'ARBELOS': 72, 'ARC ENTERTAINMENT': 73, 'ARP SƉLECTION': 74, 'ARP SELECTION': 74, 'ARROW': 75, 'ART SERVICE': 76, 'ART VISION': 77, 'ARTE ƉDITIONS': 78, 'ARTE EDITIONS': 78, 'ARTE VIDƉO': 79, 'ARTE VIDEO': 79, 'ARTHAUS MUSIK': 80, 'ARTIFICIAL EYE': 81, 'ARTSPLOITATION FILMS': 82, 'ARTUS FILMS': 83, 'ASCOT ELITE HOME ENTERTAINMENT': 84, 'ASIA VIDEO': 85, 'ASMIK ACE': 86, 'ASTRO RECORDS & FILMWORKS': 87, 'ASYLUM': 88, 'ATLANTIC FILM': 89, 'ATLANTIC RECORDS': 90, 'ATLAS FILM': 91, 'AUDIO VISUAL ENTERTAINMENT': 92, 'AURO-3D CREATIVE LABEL': 93, 'AURUM': 94, 'AV VISIONEN': 95, 'AV-JET': 96, 'AVALON': 97, 'AVENTI': 98, 'AVEX TRAX': 99, 'AXIOM': 100, 'AXIS RECORDS': 101, 'AYNGARAN': 102, 'BAC FILMS': 103, 'BACH FILMS': 104, 'BANDAI VISUAL': 105, 'BARCLAY': 106, 'BBC': 107, 'BRITISH BROADCASTING CORPORATION': 107, 'BBI FILMS': 108, 'BBI': 108, 'BCI HOME ENTERTAINMENT': 109, 'BEGGARS BANQUET': 110, 'BEL AIR CLASSIQUES': 111, 'BELGA FILMS': 112, 'BELVEDERE': 113, 'BENELUX FILM DISTRIBUTORS': 114, 'BENNETT-WATT MEDIA': 115, 'BERLIN CLASSICS': 116, 'BERLINER PHILHARMONIKER RECORDINGS': 117, 'BEST ENTERTAINMENT': 118, 'BEYOND HOME ENTERTAINMENT': 119, 'BFI VIDEO': 120, 'BFI': 120, 'BRITISH FILM INSTITUTE': 120, 'BFS ENTERTAINMENT': 121, 'BFS': 121, 'BHAVANI': 122, 'BIBER RECORDS': 123, 'BIG HOME VIDEO': 124, 'BILDSTƖRUNG': 125, 'BILDSTORUNG': 125, 'BILL ZEBUB': 126, 'BIRNENBLATT': 127, 'BIT WEL': 128, 'BLACK BOX': 129, 'BLACK HILL PICTURES': 130, 'BLACK HILL': 130, 'BLACK HOLE RECORDINGS': 131, 'BLACK HOLE': 131, 'BLAQOUT': 132, 'BLAUFIELD MUSIC': 133, 'BLAUFIELD': 133, 'BLOCKBUSTER ENTERTAINMENT': 134, 'BLOCKBUSTER': 134, 'BLU PHASE MEDIA': 135, 'BLU-RAY ONLY': 136, 'BLU-RAY': 136, 'BLURAY ONLY': 136, 'BLURAY': 136, 'BLUE GENTIAN RECORDS': 137, 'BLUE KINO': 138, 'BLUE UNDERGROUND': 139, 'BMG/ARISTA': 140, 'BMG': 140, 'BMGARISTA': 140, 'BMG ARISTA': 140, 'ARISTA': + 140, 'ARISTA/BMG': 140, 'ARISTABMG': 140, 'ARISTA BMG': 140, 'BONTON FILM': 141, 'BONTON': 141, 'BOOMERANG PICTURES': 142, 'BOOMERANG': 142, 'BQHL ƉDITIONS': 143, 'BQHL EDITIONS': 143, 'BQHL': 143, 'BREAKING GLASS': 144, 'BRIDGESTONE': 145, 'BRINK': 146, 'BROAD GREEN PICTURES': 147, 'BROAD GREEN': 147, 'BUSCH MEDIA GROUP': 148, 'BUSCH': 148, 'C MAJOR': 149, 'C.B.S.': 150, 'CAICHANG': 151, 'CALIFƓRNIA FILMES': 152, 'CALIFORNIA FILMES': 152, 'CALIFORNIA': 152, 'CAMEO': 153, 'CAMERA OBSCURA': 154, 'CAMERATA': 155, 'CAMP MOTION PICTURES': 156, 'CAMP MOTION': 156, 'CAPELIGHT PICTURES': 157, 'CAPELIGHT': 157, 'CAPITOL': 159, 'CAPITOL RECORDS': 159, 'CAPRICCI': 160, 'CARGO RECORDS': 161, 'CARLOTTA FILMS': 162, 'CARLOTTA': 162, 'CARLOTA': 162, 'CARMEN FILM': 163, 'CASCADE': 164, 'CATCHPLAY': 165, 'CAULDRON FILMS': 166, 'CAULDRON': 166, 'CBS TELEVISION STUDIOS': 167, 'CBS': 167, 'CCTV': 168, 'CCV ENTERTAINMENT': 169, 'CCV': 169, 'CD BABY': 170, 'CD LAND': 171, 'CECCHI GORI': 172, 'CENTURY MEDIA': 173, 'CHUAN XUN SHI DAI MULTIMEDIA': 174, 'CINE-ASIA': 175, 'CINƉART': 176, 'CINEART': 176, 'CINEDIGM': 177, 'CINEFIL IMAGICA': 178, 'CINEMA EPOCH': 179, 'CINEMA GUILD': 180, 'CINEMA LIBRE STUDIOS': 181, 'CINEMA MONDO': 182, 'CINEMATIC VISION': 183, 'CINEPLOIT RECORDS': 184, 'CINESTRANGE EXTREME': 185, 'CITEL VIDEO': 186, 'CITEL': 186, 'CJ ENTERTAINMENT': 187, 'CJ': 187, 'CLASSIC MEDIA': 188, 'CLASSICFLIX': 189, 'CLASSICLINE': 190, 'CLAUDIO RECORDS': 191, 'CLEAR VISION': 192, 'CLEOPATRA': 193, 'CLOSE UP': 194, 'CMS MEDIA LIMITED': 195, 'CMV LASERVISION': 196, 'CN ENTERTAINMENT': 197, 'CODE RED': 198, 'COHEN MEDIA GROUP': 199, 'COHEN': 199, 'COIN DE MIRE CINƉMA': 200, 'COIN DE MIRE CINEMA': 200, 'COLOSSEO FILM': 201, 'COLUMBIA': 203, 'COLUMBIA PICTURES': 203, 'COLUMBIA/TRI-STAR': 204, 'TRI-STAR': 204, 'COMMERCIAL MARKETING': 205, 'CONCORD MUSIC GROUP': 206, 'CONCORDE VIDEO': 207, 'CONDOR': 208, 'CONSTANTIN FILM': 209, 'CONSTANTIN': 209, 'CONSTANTINO FILMES': 210, 'CONSTANTINO': 210, 'CONSTRUCTIVE MEDIA SERVICE': 211, 'CONSTRUCTIVE': 211, 'CONTENT ZONE': 212, 'CONTENTS GATE': 213, 'COQUEIRO VERDE': 214, 'CORNERSTONE MEDIA': 215, 'CORNERSTONE': 215, 'CP DIGITAL': 216, 'CREST MOVIES': 217, 'CRITERION': 218, 'CRITERION COLLECTION': + 218, 'CC': 218, 'CRYSTAL CLASSICS': 219, 'CULT EPICS': 220, 'CULT FILMS': 221, 'CULT VIDEO': 222, 'CURZON FILM WORLD': 223, 'D FILMS': 224, "D'AILLY COMPANY": 225, 'DAILLY COMPANY': 225, 'D AILLY COMPANY': 225, "D'AILLY": 225, 'DAILLY': 225, 'D AILLY': 225, 'DA CAPO': 226, 'DA MUSIC': 227, "DALL'ANGELO PICTURES": 228, 'DALLANGELO PICTURES': 228, "DALL'ANGELO": 228, 'DALL ANGELO PICTURES': 228, 'DALL ANGELO': 228, 'DAREDO': 229, 'DARK FORCE ENTERTAINMENT': 230, 'DARK FORCE': 230, 'DARK SIDE RELEASING': 231, 'DARK SIDE': 231, 'DAZZLER MEDIA': 232, 'DAZZLER': 232, 'DCM PICTURES': 233, 'DCM': 233, 'DEAPLANETA': 234, 'DECCA': 235, 'DEEPJOY': 236, 'DEFIANT SCREEN ENTERTAINMENT': 237, 'DEFIANT SCREEN': 237, 'DEFIANT': 237, 'DELOS': 238, 'DELPHIAN RECORDS': 239, 'DELPHIAN': 239, 'DELTA MUSIC & ENTERTAINMENT': 240, 'DELTA MUSIC AND ENTERTAINMENT': 240, 'DELTA MUSIC ENTERTAINMENT': 240, 'DELTA MUSIC': 240, 'DELTAMAC CO. LTD.': 241, 'DELTAMAC CO LTD': 241, 'DELTAMAC CO': 241, 'DELTAMAC': 241, 'DEMAND MEDIA': 242, 'DEMAND': 242, 'DEP': 243, 'DEUTSCHE GRAMMOPHON': 244, 'DFW': 245, 'DGM': 246, 'DIAPHANA': 247, 'DIGIDREAMS STUDIOS': 248, 'DIGIDREAMS': 248, 'DIGITAL ENVIRONMENTS': 249, 'DIGITAL': 249, 'DISCOTEK MEDIA': 250, 'DISCOVERY CHANNEL': 251, 'DISCOVERY': 251, 'DISK KINO': 252, 'DISNEY / BUENA VISTA': 253, 'DISNEY': 253, 'BUENA VISTA': 253, 'DISNEY BUENA VISTA': 253, 'DISTRIBUTION SELECT': 254, 'DIVISA': 255, 'DNC ENTERTAINMENT': 256, 'DNC': 256, 'DOGWOOF': 257, 'DOLMEN HOME VIDEO': 258, 'DOLMEN': 258, 'DONAU FILM': 259, 'DONAU': 259, 'DORADO FILMS': 260, 'DORADO': 260, 'DRAFTHOUSE FILMS': 261, 'DRAFTHOUSE': 261, 'DRAGON FILM ENTERTAINMENT': 262, 'DRAGON ENTERTAINMENT': 262, 'DRAGON FILM': 262, 'DRAGON': 262, 'DREAMWORKS': 263, 'DRIVE ON RECORDS': 264, 'DRIVE ON': 264, 'DRIVE-ON': 264, 'DRIVEON': 264, 'DS MEDIA': 265, 'DTP ENTERTAINMENT AG': 266, 'DTP ENTERTAINMENT': 266, 'DTP AG': 266, 'DTP': 266, 'DTS ENTERTAINMENT': 267, 'DTS': 267, 'DUKE MARKETING': 268, 'DUKE VIDEO DISTRIBUTION': 269, 'DUKE': 269, 'DUTCH FILMWORKS': 270, 'DUTCH': 270, 'DVD INTERNATIONAL': 271, 'DVD': 271, 'DYBEX': 272, 'DYNAMIC': 273, 'DYNIT': 274, 'E1 ENTERTAINMENT': 275, 'E1': 275, 'EAGLE ENTERTAINMENT': 276, 'EAGLE HOME ENTERTAINMENT PVT.LTD.': + 277, 'EAGLE HOME ENTERTAINMENT PVTLTD': 277, 'EAGLE HOME ENTERTAINMENT PVT LTD': 277, 'EAGLE HOME ENTERTAINMENT': 277, 'EAGLE PICTURES': 278, 'EAGLE ROCK ENTERTAINMENT': 279, 'EAGLE ROCK': 279, 'EAGLE VISION MEDIA': 280, 'EAGLE VISION': 280, 'EARMUSIC': 281, 'EARTH ENTERTAINMENT': 282, 'EARTH': 282, 'ECHO BRIDGE ENTERTAINMENT': 283, 'ECHO BRIDGE': 283, 'EDEL GERMANY GMBH': 284, 'EDEL GERMANY': 284, 'EDEL RECORDS': 285, 'EDITION TONFILM': 286, 'EDITIONS MONTPARNASSE': 287, 'EDKO FILMS LTD.': 288, 'EDKO FILMS LTD': 288, 'EDKO FILMS': 288, 'EDKO': 288, "EIN'S M&M CO": 289, 'EINS M&M CO': 289, "EIN'S M&M": 289, 'EINS M&M': 289, 'ELEA-MEDIA': 290, 'ELEA MEDIA': 290, 'ELEA': 290, 'ELECTRIC PICTURE': 291, 'ELECTRIC': 291, 'ELEPHANT FILMS': 292, 'ELEPHANT': 292, 'ELEVATION': 293, 'EMI': 294, 'EMON': 295, 'EMS': 296, 'EMYLIA': 297, 'ENE MEDIA': 298, 'ENE': 298, 'ENTERTAINMENT IN VIDEO': 299, 'ENTERTAINMENT IN': 299, 'ENTERTAINMENT ONE': 300, 'ENTERTAINMENT ONE FILMS CANADA INC.': 301, 'ENTERTAINMENT ONE FILMS CANADA INC': 301, 'ENTERTAINMENT ONE FILMS CANADA': 301, 'ENTERTAINMENT ONE CANADA INC': 301, + 'ENTERTAINMENT ONE CANADA': 301, 'ENTERTAINMENTONE': 302, 'EONE': 303, 'EOS': 304, 'EPIC PICTURES': 305, 'EPIC': 305, 'EPIC RECORDS': 306, 'ERATO': 307, 'EROS': 308, 'ESC EDITIONS': 309, 'ESCAPI MEDIA BV': 310, 'ESOTERIC RECORDINGS': 311, 'ESPN FILMS': 312, 'EUREKA ENTERTAINMENT': 313, 'EUREKA': 313, 'EURO PICTURES': 314, 'EURO VIDEO': 315, 'EUROARTS': 316, 'EUROPA FILMES': 317, 'EUROPA': 317, 'EUROPACORP': 318, 'EUROZOOM': 319, 'EXCEL': 320, 'EXPLOSIVE MEDIA': 321, 'EXPLOSIVE': 321, 'EXTRALUCID FILMS': 322, 'EXTRALUCID': 322, 'EYE SEE MOVIES': 323, 'EYE SEE': 323, 'EYK MEDIA': 324, 'EYK': 324, 'FABULOUS FILMS': 325, 'FABULOUS': 325, 'FACTORIS FILMS': 326, 'FACTORIS': 326, 'FARAO RECORDS': 327, 'FARBFILM HOME ENTERTAINMENT': 328, 'FARBFILM ENTERTAINMENT': 328, 'FARBFILM HOME': 328, 'FARBFILM': 328, 'FEELGOOD ENTERTAINMENT': 329, 'FEELGOOD': 329, 'FERNSEHJUWELEN': 330, 'FILM CHEST': 331, 'FILM MEDIA': 332, 'FILM MOVEMENT': 333, 'FILM4': 334, 'FILMART': 335, 'FILMAURO': 336, 'FILMAX': 337, 'FILMCONFECT HOME ENTERTAINMENT': 338, 'FILMCONFECT ENTERTAINMENT': 338, 'FILMCONFECT HOME': 338, 'FILMCONFECT': 338, 'FILMEDIA': 339, 'FILMJUWELEN': 340, 'FILMOTEKA NARODAWA': 341, 'FILMRISE': 342, 'FINAL CUT ENTERTAINMENT': 343, 'FINAL CUT': 343, 'FIREHOUSE 12 RECORDS': 344, 'FIREHOUSE 12': 344, 'FIRST INTERNATIONAL PRODUCTION': 345, 'FIRST INTERNATIONAL': 345, 'FIRST LOOK STUDIOS': 346, 'FIRST LOOK': 346, 'FLAGMAN TRADE': 347, 'FLASHSTAR FILMES': 348, 'FLASHSTAR': 348, 'FLICKER ALLEY': 349, 'FNC ADD CULTURE': 350, 'FOCUS FILMES': 351, 'FOCUS': 351, 'FOKUS MEDIA': 352, 'FOKUSA': 352, 'FOX PATHE EUROPA': 353, 'FOX PATHE': 353, 'FOX EUROPA': 353, 'FOX/MGM': 354, 'FOX MGM': 354, 'MGM': 354, 'MGM/FOX': 354, 'FOX': 354, 'FPE': 355, 'FRANCE TƉLƉVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS DISTRIBUTION': 356, 'FRANCE TELEVISIONS': 356, 'FRANCE': 356, 'FREE DOLPHIN ENTERTAINMENT': 357, 'FREE DOLPHIN': 357, 'FREESTYLE DIGITAL MEDIA': 358, 'FREESTYLE DIGITAL': 358, 'FREESTYLE': 358, 'FREMANTLE HOME ENTERTAINMENT': 359, 'FREMANTLE ENTERTAINMENT': 359, 'FREMANTLE HOME': 359, 'FREMANTL': 359, 'FRENETIC FILMS': 360, 'FRENETIC': 360, 'FRONTIER WORKS': 361, 'FRONTIER': 361, 'FRONTIERS MUSIC': 362, 'FRONTIERS RECORDS': 363, 'FS FILM OY': 364, 'FS FILM': + 364, 'FULL MOON FEATURES': 365, 'FULL MOON': 365, 'FUN CITY EDITIONS': 366, 'FUN CITY': 366, 'FUNIMATION ENTERTAINMENT': 367, 'FUNIMATION': 367, 'FUSION': 368, 'FUTUREFILM': 369, 'G2 PICTURES': 370, 'G2': 370, 'GAGA COMMUNICATIONS': 371, 'GAGA': 371, 'GAIAM': 372, 'GALAPAGOS': 373, 'GAMMA HOME ENTERTAINMENT': 374, 'GAMMA ENTERTAINMENT': 374, 'GAMMA HOME': 374, 'GAMMA': 374, 'GARAGEHOUSE PICTURES': 375, 'GARAGEHOUSE': 375, 'GARAGEPLAY (車åŗ«å؛ę؂)': 376, '車åŗ«å؛ę؂': 376, 'GARAGEPLAY (Che Ku Yu Le )': 376, 'GARAGEPLAY': 376, 'Che Ku Yu Le': 376, 'GAUMONT': 377, 'GEFFEN': 378, 'GENEON ENTERTAINMENT': 379, 'GENEON': 379, 'GENEON UNIVERSAL ENTERTAINMENT': 380, 'GENERAL VIDEO RECORDING': 381, 'GLASS DOLL FILMS': 382, 'GLASS DOLL': 382, 'GLOBE MUSIC MEDIA': 383, 'GLOBE MUSIC': 383, 'GLOBE MEDIA': 383, 'GLOBE': 383, 'GO ENTERTAIN': 384, 'GO': 384, 'GOLDEN HARVEST': 385, 'GOOD!MOVIES': 386, + 'GOOD! MOVIES': 386, 'GOOD MOVIES': 386, 'GRAPEVINE VIDEO': 387, 'GRAPEVINE': 387, 'GRASSHOPPER FILM': 388, 'GRASSHOPPER FILMS': 388, 'GRASSHOPPER': 388, 'GRAVITAS VENTURES': 389, 'GRAVITAS': 389, 'GREAT MOVIES': 390, 'GREAT': 390, + 'GREEN APPLE ENTERTAINMENT': 391, 'GREEN ENTERTAINMENT': 391, 'GREEN APPLE': 391, 'GREEN': 391, 'GREENNARAE MEDIA': 392, 'GREENNARAE': 392, 'GRINDHOUSE RELEASING': 393, 'GRINDHOUSE': 393, 'GRIND HOUSE': 393, 'GRYPHON ENTERTAINMENT': 394, 'GRYPHON': 394, 'GUNPOWDER & SKY': 395, 'GUNPOWDER AND SKY': 395, 'GUNPOWDER SKY': 395, 'GUNPOWDER + SKY': 395, 'GUNPOWDER': 395, 'HANABEE ENTERTAINMENT': 396, 'HANABEE': 396, 'HANNOVER HOUSE': 397, 'HANNOVER': 397, 'HANSESOUND': 398, 'HANSE SOUND': 398, 'HANSE': 398, 'HAPPINET': 399, 'HARMONIA MUNDI': 400, 'HARMONIA': 400, 'HBO': 401, 'HDC': 402, 'HEC': 403, 'HELL & BACK RECORDINGS': 404, 'HELL AND BACK RECORDINGS': 404, 'HELL & BACK': 404, 'HELL AND BACK': 404, "HEN'S TOOTH VIDEO": 405, 'HENS TOOTH VIDEO': 405, "HEN'S TOOTH": 405, 'HENS TOOTH': 405, 'HIGH FLIERS': 406, 'HIGHLIGHT': 407, 'HILLSONG': 408, 'HISTORY CHANNEL': 409, 'HISTORY': 409, 'HK VIDƉO': 410, 'HK VIDEO': 410, 'HK': 410, 'HMH HAMBURGER MEDIEN HAUS': 411, 'HAMBURGER MEDIEN HAUS': 411, 'HMH HAMBURGER MEDIEN': 411, 'HMH HAMBURGER': 411, 'HMH': 411, 'HOLLYWOOD CLASSIC ENTERTAINMENT': 412, 'HOLLYWOOD CLASSIC': 412, 'HOLLYWOOD PICTURES': 413, 'HOLLYWOOD': 413, 'HOPSCOTCH ENTERTAINMENT': 414, 'HOPSCOTCH': 414, 'HPM': 415, 'HƄNNSLER CLASSIC': 416, 'HANNSLER CLASSIC': 416, 'HANNSLER': 416, 'I-CATCHER': 417, 'I CATCHER': 417, 'ICATCHER': 417, 'I-ON NEW MEDIA': 418, 'I ON NEW MEDIA': 418, 'ION NEW MEDIA': 418, 'ION MEDIA': 418, 'I-ON': 418, 'ION': 418, 'IAN PRODUCTIONS': 419, 'IAN': 419, 'ICESTORM': 420, 'ICON FILM DISTRIBUTION': 421, 'ICON DISTRIBUTION': 421, 'ICON FILM': 421, 'ICON': 421, 'IDEALE AUDIENCE': 422, 'IDEALE': 422, 'IFC FILMS': 423, 'IFC': 423, 'IFILM': 424, 'ILLUSIONS UNLTD.': 425, 'ILLUSIONS UNLTD': 425, 'ILLUSIONS': 425, 'IMAGE ENTERTAINMENT': 426, 'IMAGE': 426, + 'IMAGEM FILMES': 427, 'IMAGEM': 427, 'IMOVISION': 428, 'IMPERIAL CINEPIX': 429, 'IMPRINT': 430, 'IMPULS HOME ENTERTAINMENT': 431, 'IMPULS ENTERTAINMENT': 431, 'IMPULS HOME': 431, 'IMPULS': 431, 'IN-AKUSTIK': 432, 'IN AKUSTIK': 432, 'INAKUSTIK': 432, 'INCEPTION MEDIA GROUP': 433, 'INCEPTION MEDIA': 433, 'INCEPTION GROUP': 433, 'INCEPTION': 433, 'INDEPENDENT': 434, 'INDICAN': 435, 'INDIE RIGHTS': 436, 'INDIE': 436, 'INDIGO': 437, 'INFO': 438, 'INJOINGAN': 439, 'INKED PICTURES': 440, 'INKED': 440, 'INSIDE OUT MUSIC': 441, 'INSIDE MUSIC': 441, 'INSIDE OUT': 441, 'INSIDE': 441, 'INTERCOM': 442, 'INTERCONTINENTAL VIDEO': 443, 'INTERCONTINENTAL': 443, 'INTERGROOVE': 444, + 'INTERSCOPE': 445, 'INVINCIBLE PICTURES': 446, 'INVINCIBLE': 446, 'ISLAND/MERCURY': 447, 'ISLAND MERCURY': 447, 'ISLANDMERCURY': 447, 'ISLAND & MERCURY': 447, 'ISLAND AND MERCURY': 447, 'ISLAND': 447, 'ITN': 448, 'ITV DVD': 449, 'ITV': 449, 'IVC': 450, 'IVE ENTERTAINMENT': 451, 'IVE': 451, 'J&R ADVENTURES': 452, 'J&R': 452, 'JR': 452, 'JAKOB': 453, 'JONU MEDIA': 454, 'JONU': 454, 'JRB PRODUCTIONS': 455, 'JRB': 455, 'JUST BRIDGE ENTERTAINMENT': 456, 'JUST BRIDGE': 456, 'JUST ENTERTAINMENT': 456, 'JUST': 456, 'KABOOM ENTERTAINMENT': 457, 'KABOOM': 457, 'KADOKAWA ENTERTAINMENT': 458, 'KADOKAWA': 458, 'KAIROS': 459, 'KALEIDOSCOPE ENTERTAINMENT': 460, 'KALEIDOSCOPE': 460, 'KAM & RONSON ENTERPRISES': 461, 'KAM & RONSON': 461, 'KAM&RONSON ENTERPRISES': 461, 'KAM&RONSON': 461, 'KAM AND RONSON ENTERPRISES': 461, 'KAM AND RONSON': 461, 'KANA HOME VIDEO': 462, 'KARMA FILMS': 463, 'KARMA': 463, 'KATZENBERGER': 464, 'KAZE': 465, 'KBS MEDIA': 466, 'KBS': 466, 'KD MEDIA': 467, 'KD': 467, 'KING MEDIA': 468, 'KING': 468, 'KING RECORDS': 469, 'KINO LORBER': 470, 'KINO': 470, 'KINO SWIAT': 471, 'KINOKUNIYA': 472, 'KINOWELT HOME ENTERTAINMENT/DVD': 473, 'KINOWELT HOME ENTERTAINMENT': 473, 'KINOWELT ENTERTAINMENT': 473, 'KINOWELT HOME DVD': 473, 'KINOWELT ENTERTAINMENT/DVD': 473, 'KINOWELT DVD': 473, 'KINOWELT': 473, 'KIT PARKER FILMS': 474, 'KIT PARKER': 474, 'KITTY MEDIA': 475, 'KNM HOME ENTERTAINMENT': 476, 'KNM ENTERTAINMENT': 476, 'KNM HOME': 476, 'KNM': 476, 'KOBA FILMS': 477, 'KOBA': 477, 'KOCH ENTERTAINMENT': 478, 'KOCH MEDIA': 479, 'KOCH': 479, 'KRAKEN RELEASING': 480, 'KRAKEN': 480, 'KSCOPE': 481, 'KSM': 482, 'KULTUR': 483, "L'ATELIER D'IMAGES": 484, "LATELIER D'IMAGES": 484, "L'ATELIER DIMAGES": 484, 'LATELIER DIMAGES': 484, "L ATELIER D'IMAGES": 484, "L'ATELIER D IMAGES": 484, + 'L ATELIER D IMAGES': 484, "L'ATELIER": 484, 'L ATELIER': 484, 'LATELIER': 484, 'LA AVENTURA AUDIOVISUAL': 485, 'LA AVENTURA': 485, 'LACE GROUP': 486, 'LACE': 486, 'LASER PARADISE': 487, 'LAYONS': 488, 'LCJ EDITIONS': 489, 'LCJ': 489, 'LE CHAT QUI FUME': 490, 'LE PACTE': 491, 'LEDICK FILMHANDEL': 492, 'LEGEND': 493, 'LEOMARK STUDIOS': 494, 'LEOMARK': 494, 'LEONINE FILMS': 495, 'LEONINE': 495, 'LICHTUNG MEDIA LTD': 496, 'LICHTUNG LTD': 496, 'LICHTUNG MEDIA LTD.': 496, 'LICHTUNG LTD.': 496, 'LICHTUNG MEDIA': 496, 'LICHTUNG': 496, 'LIGHTHOUSE HOME ENTERTAINMENT': 497, 'LIGHTHOUSE ENTERTAINMENT': 497, 'LIGHTHOUSE HOME': 497, 'LIGHTHOUSE': 497, 'LIGHTYEAR': 498, 'LIONSGATE FILMS': 499, 'LIONSGATE': 499, 'LIZARD CINEMA TRADE': 500, 'LLAMENTOL': 501, 'LOBSTER FILMS': 502, 'LOBSTER': 502, 'LOGON': 503, 'LORBER FILMS': 504, 'LORBER': 504, 'LOS BANDITOS FILMS': 505, 'LOS BANDITOS': 505, 'LOUD & PROUD RECORDS': 506, 'LOUD AND PROUD RECORDS': 506, 'LOUD & PROUD': 506, 'LOUD AND PROUD': 506, 'LSO LIVE': 507, 'LUCASFILM': 508, 'LUCKY RED': 509, 'LUMIƈRE HOME ENTERTAINMENT': 510, 'LUMIERE HOME ENTERTAINMENT': 510, 'LUMIERE ENTERTAINMENT': 510, 'LUMIERE HOME': 510, 'LUMIERE': 510, 'M6 VIDEO': 511, 'M6': 511, 'MAD DIMENSION': 512, 'MADMAN ENTERTAINMENT': 513, 'MADMAN': 513, 'MAGIC BOX': 514, 'MAGIC PLAY': 515, 'MAGNA HOME ENTERTAINMENT': 516, 'MAGNA ENTERTAINMENT': 516, 'MAGNA HOME': 516, 'MAGNA': 516, 'MAGNOLIA PICTURES': 517, 'MAGNOLIA': 517, 'MAIDEN JAPAN': 518, 'MAIDEN': 518, 'MAJENG MEDIA': 519, 'MAJENG': 519, 'MAJESTIC HOME ENTERTAINMENT': 520, 'MAJESTIC ENTERTAINMENT': 520, 'MAJESTIC HOME': 520, 'MAJESTIC': 520, 'MANGA HOME ENTERTAINMENT': 521, 'MANGA ENTERTAINMENT': 521, 'MANGA HOME': 521, 'MANGA': 521, 'MANTA LAB': 522, 'MAPLE STUDIOS': 523, 'MAPLE': 523, 'MARCO POLO PRODUCTION': + 524, 'MARCO POLO': 524, 'MARIINSKY': 525, 'MARVEL STUDIOS': 526, 'MARVEL': 526, 'MASCOT RECORDS': 527, 'MASCOT': 527, 'MASSACRE VIDEO': 528, 'MASSACRE': 528, 'MATCHBOX': 529, 'MATRIX D': 530, 'MAXAM': 531, 'MAYA HOME ENTERTAINMENT': 532, 'MAYA ENTERTAINMENT': 532, 'MAYA HOME': 532, 'MAYAT': 532, 'MDG': 533, 'MEDIA BLASTERS': 534, 'MEDIA FACTORY': 535, 'MEDIA TARGET DISTRIBUTION': 536, 'MEDIA TARGET': 536, 'MEDIAINVISION': 537, 'MEDIATOON': 538, 'MEDIATRES ESTUDIO': 539, 'MEDIATRES STUDIO': 539, 'MEDIATRES': 539, 'MEDICI ARTS': 540, 'MEDICI CLASSICS': 541, 'MEDIUMRARE ENTERTAINMENT': 542, 'MEDIUMRARE': 542, 'MEDUSA': 543, 'MEGASTAR': 544, 'MEI AH': 545, 'MELI MƉDIAS': 546, 'MELI MEDIAS': 546, 'MEMENTO FILMS': 547, 'MEMENTO': 547, 'MENEMSHA FILMS': 548, 'MENEMSHA': 548, 'MERCURY': 549, 'MERCURY STUDIOS': 550, 'MERGE SOFT PRODUCTIONS': 551, 'MERGE PRODUCTIONS': 551, 'MERGE SOFT': 551, 'MERGE': 551, 'METAL BLADE RECORDS': 552, 'METAL BLADE': 552, 'METEOR': 553, 'METRO-GOLDWYN-MAYER': 554, 'METRO GOLDWYN MAYER': 554, 'METROGOLDWYNMAYER': 554, 'METRODOME VIDEO': 555, 'METRODOME': 555, 'METROPOLITAN': 556, 'MFA+': + 557, 'MFA': 557, 'MIG FILMGROUP': 558, 'MIG': 558, 'MILESTONE': 559, 'MILL CREEK ENTERTAINMENT': 560, 'MILL CREEK': 560, 'MILLENNIUM MEDIA': 561, 'MILLENNIUM': 561, 'MIRAGE ENTERTAINMENT': 562, 'MIRAGE': 562, 'MIRAMAX': 563, + 'MISTERIYA ZVUKA': 564, 'MK2': 565, 'MODE RECORDS': 566, 'MODE': 566, 'MOMENTUM PICTURES': 567, 'MONDO HOME ENTERTAINMENT': 568, 'MONDO ENTERTAINMENT': 568, 'MONDO HOME': 568, 'MONDO MACABRO': 569, 'MONGREL MEDIA': 570, 'MONOLIT': 571, 'MONOLITH VIDEO': 572, 'MONOLITH': 572, 'MONSTER PICTURES': 573, 'MONSTER': 573, 'MONTEREY VIDEO': 574, 'MONTEREY': 574, 'MONUMENT RELEASING': 575, 'MONUMENT': 575, 'MORNINGSTAR': 576, 'MORNING STAR': 576, 'MOSERBAER': 577, 'MOVIEMAX': 578, 'MOVINSIDE': 579, 'MPI MEDIA GROUP': 580, 'MPI MEDIA': 580, 'MPI': 580, 'MR. BONGO FILMS': 581, 'MR BONGO FILMS': 581, 'MR BONGO': 581, 'MRG (MERIDIAN)': 582, 'MRG MERIDIAN': 582, 'MRG': 582, 'MERIDIAN': 582, 'MUBI': 583, 'MUG SHOT PRODUCTIONS': 584, 'MUG SHOT': 584, 'MULTIMUSIC': 585, 'MULTI-MUSIC': 585, 'MULTI MUSIC': 585, 'MUSE': 586, 'MUSIC BOX FILMS': 587, 'MUSIC BOX': 587, 'MUSICBOX': 587, 'MUSIC BROKERS': 588, 'MUSIC THEORIES': 589, 'MUSIC VIDEO DISTRIBUTORS': 590, 'MUSIC VIDEO': 590, 'MUSTANG ENTERTAINMENT': 591, 'MUSTANG': 591, 'MVD VISUAL': 592, 'MVD': 592, 'MVD/VSC': 593, 'MVL': 594, 'MVM ENTERTAINMENT': 595, 'MVM': 595, 'MYNDFORM': 596, 'MYSTIC NIGHT PICTURES': 597, 'MYSTIC NIGHT': 597, 'NAMELESS MEDIA': 598, 'NAMELESS': 598, 'NAPALM RECORDS': 599, 'NAPALM': 599, 'NATIONAL ENTERTAINMENT MEDIA': 600, 'NATIONAL ENTERTAINMENT': 600, 'NATIONAL MEDIA': 600, 'NATIONAL FILM ARCHIVE': 601, 'NATIONAL ARCHIVE': 601, 'NATIONAL FILM': 601, 'NATIONAL GEOGRAPHIC': 602, 'NAT GEO TV': 602, 'NAT GEO': 602, 'NGO': 602, 'NAXOS': 603, 'NBCUNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBC UNIVERSAL ENTERTAINMENT JAPAN': 604, 'NBCUNIVERSAL JAPAN': 604, 'NBC UNIVERSAL JAPAN': 604, 'NBC JAPAN': 604, 'NBO ENTERTAINMENT': 605, 'NBO': 605, 'NEOS': 606, 'NETFLIX': 607, 'NETWORK': 608, 'NEW BLOOD': 609, 'NEW DISC': 610, 'NEW KSM': 611, 'NEW LINE CINEMA': 612, 'NEW LINE': 612, 'NEW MOVIE TRADING CO. LTD': 613, 'NEW MOVIE TRADING CO LTD': 613, 'NEW MOVIE TRADING CO': 613, 'NEW MOVIE TRADING': 613, 'NEW WAVE FILMS': 614, 'NEW WAVE': 614, 'NFI': 615, + 'NHK': 616, 'NIPPONART': 617, 'NIS AMERICA': 618, 'NJUTAFILMS': 619, 'NOBLE ENTERTAINMENT': 620, 'NOBLE': 620, 'NORDISK FILM': 621, 'NORDISK': 621, 'NORSK FILM': 622, 'NORSK': 622, 'NORTH AMERICAN MOTION PICTURES': 623, 'NOS AUDIOVISUAIS': 624, 'NOTORIOUS PICTURES': 625, 'NOTORIOUS': 625, 'NOVA MEDIA': 626, 'NOVA': 626, 'NOVA SALES AND DISTRIBUTION': 627, 'NOVA SALES & DISTRIBUTION': 627, 'NSM': 628, 'NSM RECORDS': 629, 'NUCLEAR BLAST': 630, 'NUCLEUS FILMS': 631, 'NUCLEUS': 631, 'OBERLIN MUSIC': 632, 'OBERLIN': 632, 'OBRAS-PRIMAS DO CINEMA': 633, 'OBRAS PRIMAS DO CINEMA': 633, 'OBRASPRIMAS DO CINEMA': 633, 'OBRAS-PRIMAS CINEMA': 633, 'OBRAS PRIMAS CINEMA': 633, 'OBRASPRIMAS CINEMA': 633, 'OBRAS-PRIMAS': 633, 'OBRAS PRIMAS': 633, 'OBRASPRIMAS': 633, 'ODEON': 634, 'OFDB FILMWORKS': 635, 'OFDB': 635, 'OLIVE FILMS': 636, 'OLIVE': 636, 'ONDINE': 637, 'ONSCREEN FILMS': 638, 'ONSCREEN': 638, 'OPENING DISTRIBUTION': 639, 'OPERA AUSTRALIA': 640, 'OPTIMUM HOME ENTERTAINMENT': 641, 'OPTIMUM ENTERTAINMENT': 641, 'OPTIMUM HOME': 641, 'OPTIMUM': 641, 'OPUS ARTE': 642, 'ORANGE STUDIO': 643, 'ORANGE': 643, 'ORLANDO EASTWOOD FILMS': 644, 'ORLANDO FILMS': 644, 'ORLANDO EASTWOOD': 644, 'ORLANDO': 644, 'ORUSTAK PICTURES': 645, 'ORUSTAK': 645, 'OSCILLOSCOPE PICTURES': 646, 'OSCILLOSCOPE': 646, 'OUTPLAY': 647, 'PALISADES TARTAN': 648, 'PAN VISION': 649, 'PANVISION': 649, 'PANAMINT CINEMA': 650, 'PANAMINT': 650, 'PANDASTORM ENTERTAINMENT': 651, 'PANDA STORM ENTERTAINMENT': 651, 'PANDASTORM': 651, 'PANDA STORM': 651, 'PANDORA FILM': 652, 'PANDORA': 652, 'PANEGYRIC': 653, 'PANORAMA': 654, 'PARADE DECK FILMS': 655, 'PARADE DECK': 655, 'PARADISE': 656, 'PARADISO FILMS': 657, 'PARADOX': 658, 'PARAMOUNT PICTURES': 659, 'PARAMOUNT': 659, 'PARIS FILMES': 660, 'PARIS FILMS': 660, 'PARIS': 660, 'PARK CIRCUS': 661, 'PARLOPHONE': 662, 'PASSION RIVER': 663, 'PATHE DISTRIBUTION': 664, 'PATHE': 664, 'PBS': 665, 'PEACE ARCH TRINITY': 666, 'PECCADILLO PICTURES': 667, 'PEPPERMINT': 668, 'PHASE 4 FILMS': 669, 'PHASE 4': 669, 'PHILHARMONIA BAROQUE': 670, 'PICTURE HOUSE ENTERTAINMENT': 671, 'PICTURE ENTERTAINMENT': 671, 'PICTURE HOUSE': 671, 'PICTURE': 671, 'PIDAX': 672, 'PINK FLOYD RECORDS': 673, 'PINK FLOYD': 673, 'PINNACLE FILMS': 674, 'PINNACLE': 674, 'PLAIN': 675, 'PLATFORM ENTERTAINMENT LIMITED': 676, 'PLATFORM ENTERTAINMENT LTD': 676, 'PLATFORM ENTERTAINMENT LTD.': 676, 'PLATFORM ENTERTAINMENT': 676, 'PLATFORM': 676, 'PLAYARTE': 677, 'PLG UK CLASSICS': 678, 'PLG UK': + 678, 'PLG': 678, 'POLYBAND & TOPPIC VIDEO/WVG': 679, 'POLYBAND AND TOPPIC VIDEO/WVG': 679, 'POLYBAND & TOPPIC VIDEO WVG': 679, 'POLYBAND & TOPPIC VIDEO AND WVG': 679, 'POLYBAND & TOPPIC VIDEO & WVG': 679, 'POLYBAND AND TOPPIC VIDEO WVG': 679, 'POLYBAND AND TOPPIC VIDEO AND WVG': 679, 'POLYBAND AND TOPPIC VIDEO & WVG': 679, 'POLYBAND & TOPPIC VIDEO': 679, 'POLYBAND AND TOPPIC VIDEO': 679, 'POLYBAND & TOPPIC': 679, 'POLYBAND AND TOPPIC': 679, 'POLYBAND': 679, 'WVG': 679, 'POLYDOR': 680, 'PONY': 681, 'PONY CANYON': 682, 'POTEMKINE': 683, 'POWERHOUSE FILMS': 684, 'POWERHOUSE': 684, 'POWERSTATIOM': 685, 'PRIDE & JOY': 686, 'PRIDE AND JOY': 686, 'PRINZ MEDIA': 687, 'PRINZ': 687, 'PRIS AUDIOVISUAIS': 688, 'PRO VIDEO': 689, 'PRO-VIDEO': 689, 'PRO-MOTION': 690, 'PRO MOTION': 690, 'PROD. JRB': 691, 'PROD JRB': 691, 'PRODISC': 692, 'PROKINO': 693, 'PROVOGUE RECORDS': 694, 'PROVOGUE': 694, 'PROWARE': 695, 'PULP VIDEO': 696, 'PULP': 696, 'PULSE VIDEO': 697, 'PULSE': 697, 'PURE AUDIO RECORDINGS': 698, 'PURE AUDIO': 698, 'PURE FLIX ENTERTAINMENT': 699, 'PURE FLIX': 699, 'PURE ENTERTAINMENT': 699, 'PYRAMIDE VIDEO': 700, 'PYRAMIDE': 700, 'QUALITY FILMS': 701, 'QUALITY': 701, 'QUARTO VALLEY RECORDS': 702, 'QUARTO VALLEY': 702, 'QUESTAR': 703, 'R SQUARED FILMS': 704, 'R SQUARED': 704, 'RAPID EYE MOVIES': 705, 'RAPID EYE': 705, 'RARO VIDEO': 706, 'RARO': 706, 'RAROVIDEO U.S.': 707, 'RAROVIDEO US': 707, 'RARO VIDEO US': 707, 'RARO VIDEO U.S.': 707, 'RARO U.S.': 707, 'RARO US': 707, 'RAVEN BANNER RELEASING': 708, 'RAVEN BANNER': 708, 'RAVEN': 708, 'RAZOR DIGITAL ENTERTAINMENT': 709, 'RAZOR DIGITAL': 709, 'RCA': 710, 'RCO LIVE': 711, 'RCO': 711, 'RCV': 712, 'REAL GONE MUSIC': 713, 'REAL GONE': 713, 'REANIMEDIA': 714, 'REANI MEDIA': 714, 'REDEMPTION': 715, 'REEL': 716, 'RELIANCE HOME VIDEO & GAMES': 717, 'RELIANCE HOME VIDEO AND GAMES': 717, 'RELIANCE HOME VIDEO': 717, 'RELIANCE VIDEO': 717, 'RELIANCE HOME': 717, 'RELIANCE': 717, 'REM CULTURE': 718, 'REMAIN IN LIGHT': 719, 'REPRISE': 720, 'RESEN': 721, 'RETROMEDIA': 722, 'REVELATION FILMS LTD.': 723, 'REVELATION FILMS LTD': 723, 'REVELATION FILMS': 723, 'REVELATION LTD.': 723, 'REVELATION LTD': 723, 'REVELATION': 723, 'REVOLVER ENTERTAINMENT': 724, 'REVOLVER': 724, 'RHINO MUSIC': 725, 'RHINO': 725, 'RHV': 726, 'RIGHT STUF': 727, 'RIMINI EDITIONS': 728, 'RISING SUN MEDIA': 729, 'RLJ ENTERTAINMENT': 730, 'RLJ': 730, 'ROADRUNNER RECORDS': 731, 'ROADSHOW ENTERTAINMENT': 732, 'ROADSHOW': 732, 'RONE': 733, 'RONIN FLIX': 734, 'ROTANA HOME ENTERTAINMENT': 735, 'ROTANA ENTERTAINMENT': 735, 'ROTANA HOME': 735, 'ROTANA': 735, 'ROUGH TRADE': 736, 'ROUNDER': 737, 'SAFFRON HILL FILMS': 738, 'SAFFRON HILL': 738, 'SAFFRON': 738, 'SAMUEL GOLDWYN FILMS': 739, 'SAMUEL GOLDWYN': 739, 'SAN FRANCISCO SYMPHONY': 740, 'SANDREW METRONOME': 741, 'SAPHRANE': 742, 'SAVOR': 743, 'SCANBOX ENTERTAINMENT': 744, 'SCANBOX': 744, 'SCENIC LABS': 745, 'SCHRƖDERMEDIA': 746, 'SCHRODERMEDIA': 746, 'SCHRODER MEDIA': 746, 'SCORPION RELEASING': 747, 'SCORPION': 747, 'SCREAM TEAM RELEASING': 748, 'SCREAM TEAM': 748, 'SCREEN MEDIA': 749, 'SCREEN': 749, 'SCREENBOUND PICTURES': 750, 'SCREENBOUND': 750, 'SCREENWAVE MEDIA': 751, 'SCREENWAVE': 751, 'SECOND RUN': 752, 'SECOND SIGHT': 753, 'SEEDSMAN GROUP': 754, 'SELECT VIDEO': 755, 'SELECTA VISION': 756, 'SENATOR': 757, 'SENTAI FILMWORKS': 758, 'SENTAI': 758, 'SEVEN7': 759, 'SEVERIN FILMS': 760, 'SEVERIN': 760, 'SEVILLE': 761, 'SEYONS ENTERTAINMENT': 762, 'SEYONS': 762, 'SF STUDIOS': 763, 'SGL ENTERTAINMENT': 764, 'SGL': 764, 'SHAMELESS': 765, 'SHAMROCK MEDIA': 766, 'SHAMROCK': 766, 'SHANGHAI EPIC MUSIC ENTERTAINMENT': 767, 'SHANGHAI EPIC ENTERTAINMENT': 767, 'SHANGHAI EPIC MUSIC': 767, 'SHANGHAI MUSIC ENTERTAINMENT': 767, 'SHANGHAI ENTERTAINMENT': 767, 'SHANGHAI MUSIC': 767, 'SHANGHAI': 767, 'SHEMAROO': 768, 'SHOCHIKU': 769, 'SHOCK': 770, 'SHOGAKU KAN': 771, 'SHOUT FACTORY': 772, 'SHOUT! FACTORY': 772, 'SHOUT': 772, 'SHOUT!': 772, 'SHOWBOX': 773, 'SHOWTIME ENTERTAINMENT': 774, 'SHOWTIME': 774, 'SHRIEK SHOW': 775, 'SHUDDER': 776, 'SIDONIS': 777, 'SIDONIS CALYSTA': 778, 'SIGNAL ONE ENTERTAINMENT': 779, 'SIGNAL ONE': 779, 'SIGNATURE ENTERTAINMENT': 780, 'SIGNATURE': 780, 'SILVER VISION': 781, 'SINISTER FILM': 782, 'SINISTER': 782, 'SIREN VISUAL ENTERTAINMENT': 783, 'SIREN VISUAL': 783, 'SIREN ENTERTAINMENT': 783, 'SIREN': 783, 'SKANI': 784, 'SKY DIGI': 785, 'SLASHER // VIDEO': 786, 'SLASHER / VIDEO': 786, 'SLASHER VIDEO': 786, 'SLASHER': 786, 'SLOVAK FILM INSTITUTE': 787, 'SLOVAK FILM': 787, + 'SFI': 787, 'SM LIFE DESIGN GROUP': 788, 'SMOOTH PICTURES': 789, 'SMOOTH': 789, 'SNAPPER MUSIC': 790, 'SNAPPER': 790, 'SODA PICTURES': 791, 'SODA': 791, 'SONO LUMINUS': 792, 'SONY MUSIC': 793, 'SONY PICTURES': 794, 'SONY': 794, 'SONY PICTURES CLASSICS': 795, 'SONY CLASSICS': 795, 'SOUL MEDIA': 796, 'SOUL': 796, 'SOULFOOD MUSIC DISTRIBUTION': 797, 'SOULFOOD DISTRIBUTION': 797, 'SOULFOOD MUSIC': 797, 'SOULFOOD': 797, 'SOYUZ': 798, 'SPECTRUM': 799, + 'SPENTZOS FILM': 800, 'SPENTZOS': 800, 'SPIRIT ENTERTAINMENT': 801, 'SPIRIT': 801, 'SPIRIT MEDIA GMBH': 802, 'SPIRIT MEDIA': 802, 'SPLENDID ENTERTAINMENT': 803, 'SPLENDID FILM': 804, 'SPO': 805, 'SQUARE ENIX': 806, 'SRI BALAJI VIDEO': 807, 'SRI BALAJI': 807, 'SRI': 807, 'SRI VIDEO': 807, 'SRS CINEMA': 808, 'SRS': 808, 'SSO RECORDINGS': 809, 'SSO': 809, 'ST2 MUSIC': 810, 'ST2': 810, 'STAR MEDIA ENTERTAINMENT': 811, 'STAR ENTERTAINMENT': 811, 'STAR MEDIA': 811, 'STAR': 811, 'STARLIGHT': 812, 'STARZ / ANCHOR BAY': 813, 'STARZ ANCHOR BAY': 813, 'STARZ': 813, 'ANCHOR BAY': 813, 'STER KINEKOR': 814, 'STERLING ENTERTAINMENT': 815, 'STERLING': 815, 'STINGRAY': 816, 'STOCKFISCH RECORDS': 817, 'STOCKFISCH': 817, 'STRAND RELEASING': 818, 'STRAND': 818, 'STUDIO 4K': 819, 'STUDIO CANAL': 820, 'STUDIO GHIBLI': 821, 'GHIBLI': 821, 'STUDIO HAMBURG ENTERPRISES': 822, 'HAMBURG ENTERPRISES': 822, 'STUDIO HAMBURG': 822, 'HAMBURG': 822, 'STUDIO S': 823, 'SUBKULTUR ENTERTAINMENT': 824, 'SUBKULTUR': 824, 'SUEVIA FILMS': 825, 'SUEVIA': 825, 'SUMMIT ENTERTAINMENT': 826, 'SUMMIT': 826, 'SUNFILM ENTERTAINMENT': 827, 'SUNFILM': 827, 'SURROUND RECORDS': 828, 'SURROUND': 828, 'SVENSK FILMINDUSTRI': 829, 'SVENSK': 829, 'SWEN FILMES': 830, 'SWEN FILMS': 830, 'SWEN': 830, 'SYNAPSE FILMS': 831, 'SYNAPSE': 831, 'SYNDICADO': 832, 'SYNERGETIC': 833, 'T- SERIES': 834, 'T-SERIES': 834, 'T SERIES': 834, 'TSERIES': 834, 'T.V.P.': 835, 'TVP': 835, 'TACET RECORDS': 836, 'TACET': 836, 'TAI SENG': 837, 'TAI SHENG': 838, 'TAKEONE': 839, 'TAKESHOBO': 840, 'TAMASA DIFFUSION': 841, 'TC ENTERTAINMENT': 842, 'TC': 842, 'TDK': 843, 'TEAM MARKETING': 844, 'TEATRO REAL': 845, 'TEMA DISTRIBUCIONES': 846, 'TEMPE DIGITAL': 847, 'TF1 VIDƉO': 848, 'TF1 VIDEO': 848, 'TF1': 848, 'THE BLU': 849, 'BLU': 849, 'THE ECSTASY OF FILMS': 850, 'THE FILM DETECTIVE': 851, 'FILM DETECTIVE': 851, 'THE JOKERS': 852, 'JOKERS': 852, 'THE ON': 853, 'ON': 853, 'THIMFILM': 854, 'THIM FILM': 854, 'THIM': 854, 'THIRD WINDOW FILMS': 855, 'THIRD WINDOW': 855, '3RD WINDOW FILMS': 855, '3RD WINDOW': 855, 'THUNDERBEAN ANIMATION': 856, 'THUNDERBEAN': 856, 'THUNDERBIRD RELEASING': 857, 'THUNDERBIRD': 857, 'TIBERIUS FILM': 858, 'TIME LIFE': 859, 'TIMELESS MEDIA GROUP': 860, 'TIMELESS MEDIA': 860, 'TIMELESS GROUP': 860, 'TIMELESS': 860, 'TLA RELEASING': 861, 'TLA': 861, 'TOBIS FILM': 862, 'TOBIS': 862, 'TOEI': 863, 'TOHO': 864, 'TOKYO SHOCK': 865, 'TOKYO': 865, 'TONPOOL MEDIEN GMBH': 866, 'TONPOOL MEDIEN': 866, 'TOPICS ENTERTAINMENT': 867, 'TOPICS': 867, 'TOUCHSTONE PICTURES': 868, 'TOUCHSTONE': 868, 'TRANSMISSION FILMS': 869, 'TRANSMISSION': 869, 'TRAVEL VIDEO STORE': 870, 'TRIART': 871, 'TRIGON FILM': 872, 'TRIGON': 872, 'TRINITY HOME ENTERTAINMENT': 873, 'TRINITY ENTERTAINMENT': 873, 'TRINITY HOME': 873, 'TRINITY': 873, 'TRIPICTURES': 874, 'TRI-PICTURES': 874, 'TRI PICTURES': 874, 'TROMA': 875, 'TURBINE MEDIEN': 876, 'TURTLE RECORDS': 877, 'TURTLE': 877, 'TVA FILMS': 878, 'TVA': 878, 'TWILIGHT TIME': 879, 'TWILIGHT': 879, 'TT': 879, 'TWIN CO., LTD.': 880, 'TWIN CO, LTD.': 880, 'TWIN CO., LTD': 880, 'TWIN CO, LTD': 880, 'TWIN CO LTD': 880, 'TWIN LTD': 880, 'TWIN CO.': 880, 'TWIN CO': 880, 'TWIN': 880, 'UCA': 881, 'UDR': 882, 'UEK': 883, 'UFA/DVD': 884, 'UFA DVD': 884, 'UFADVD': 884, 'UGC PH': 885, 'ULTIMATE3DHEAVEN': 886, 'ULTRA': 887, 'UMBRELLA ENTERTAINMENT': 888, 'UMBRELLA': 888, 'UMC': 889, "UNCORK'D ENTERTAINMENT": 890, 'UNCORKD ENTERTAINMENT': 890, 'UNCORK D ENTERTAINMENT': 890, "UNCORK'D": 890, 'UNCORK D': 890, 'UNCORKD': 890, 'UNEARTHED FILMS': 891, 'UNEARTHED': 891, 'UNI DISC': 892, 'UNIMUNDOS': 893, 'UNITEL': 894, 'UNIVERSAL MUSIC': 895, 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES ENTERTAINMENT': 896, 'UNIVERSAL SONY PICTURES HOME': 896, 'UNIVERSAL SONY PICTURES': 896, 'UNIVERSAL HOME ENTERTAINMENT': + 896, 'UNIVERSAL ENTERTAINMENT': 896, 'UNIVERSAL HOME': 896, 'UNIVERSAL STUDIOS': 897, 'UNIVERSAL': 897, 'UNIVERSE LASER & VIDEO CO.': 898, 'UNIVERSE LASER AND VIDEO CO.': 898, 'UNIVERSE LASER & VIDEO CO': 898, 'UNIVERSE LASER AND VIDEO CO': 898, 'UNIVERSE LASER CO.': 898, 'UNIVERSE LASER CO': 898, 'UNIVERSE LASER': 898, 'UNIVERSUM FILM': 899, 'UNIVERSUM': 899, 'UTV': 900, 'VAP': 901, 'VCI': 902, 'VENDETTA FILMS': 903, 'VENDETTA': 903, 'VERSƁTIL HOME VIDEO': 904, 'VERSƁTIL VIDEO': 904, 'VERSƁTIL HOME': 904, 'VERSƁTIL': 904, 'VERSATIL HOME VIDEO': 904, 'VERSATIL VIDEO': 904, 'VERSATIL HOME': 904, 'VERSATIL': 904, 'VERTICAL ENTERTAINMENT': 905, 'VERTICAL': 905, 'VƉRTICE 360Āŗ': 906, 'VƉRTICE 360': 906, 'VERTICE 360o': 906, 'VERTICE 360': 906, 'VERTIGO BERLIN': 907, 'VƉRTIGO FILMS': 908, 'VƉRTIGO': 908, 'VERTIGO FILMS': 908, 'VERTIGO': 908, 'VERVE PICTURES': 909, 'VIA VISION ENTERTAINMENT': 910, 'VIA VISION': 910, 'VICOL ENTERTAINMENT': 911, 'VICOL': 911, 'VICOM': 912, 'VICTOR ENTERTAINMENT': 913, 'VICTOR': 913, 'VIDEA CDE': 914, 'VIDEO FILM EXPRESS': 915, 'VIDEO FILM': 915, 'VIDEO EXPRESS': 915, 'VIDEO MUSIC, INC.': 916, 'VIDEO MUSIC, INC': 916, 'VIDEO MUSIC INC.': 916, 'VIDEO MUSIC INC': 916, 'VIDEO MUSIC': 916, 'VIDEO SERVICE CORP.': 917, 'VIDEO SERVICE CORP': 917, 'VIDEO SERVICE': 917, 'VIDEO TRAVEL': 918, 'VIDEOMAX': 919, 'VIDEO MAX': 919, 'VII PILLARS ENTERTAINMENT': 920, 'VII PILLARS': 920, 'VILLAGE FILMS': 921, 'VINEGAR SYNDROME': 922, 'VINEGAR': 922, 'VS': 922, 'VINNY MOVIES': 923, 'VINNY': 923, 'VIRGIL FILMS & ENTERTAINMENT': 924, 'VIRGIL FILMS AND ENTERTAINMENT': 924, 'VIRGIL ENTERTAINMENT': 924, 'VIRGIL FILMS': 924, 'VIRGIL': 924, 'VIRGIN RECORDS': 925, 'VIRGIN': 925, 'VISION FILMS': 926, 'VISION': 926, 'VISUAL ENTERTAINMENT GROUP': 927, 'VISUAL GROUP': 927, 'VISUAL ENTERTAINMENT': 927, 'VISUAL': 927, 'VIVENDI VISUAL ENTERTAINMENT': 928, 'VIVENDI VISUAL': 928, 'VIVENDI': 928, 'VIZ PICTURES': 929, 'VIZ': 929, 'VLMEDIA': 930, 'VL MEDIA': 930, 'VL': 930, 'VOLGA': 931, 'VVS FILMS': 932, + 'VVS': 932, 'VZ HANDELS GMBH': 933, 'VZ HANDELS': 933, 'WARD RECORDS': 934, 'WARD': 934, 'WARNER BROS.': 935, 'WARNER BROS': 935, 'WARNER ARCHIVE': 935, 'WARNER ARCHIVE COLLECTION': 935, 'WAC': 935, 'WARNER': 935, 'WARNER MUSIC': 936, 'WEA': 937, 'WEINSTEIN COMPANY': 938, 'WEINSTEIN': 938, 'WELL GO USA': 939, 'WELL GO': 939, 'WELTKINO FILMVERLEIH': 940, 'WEST VIDEO': 941, 'WEST': 941, 'WHITE PEARL MOVIES': 942, 'WHITE PEARL': 942, 'WICKED-VISION MEDIA': 943, 'WICKED VISION MEDIA': 943, 'WICKEDVISION MEDIA': 943, 'WICKED-VISION': 943, 'WICKED VISION': 943, 'WICKEDVISION': 943, 'WIENERWORLD': 944, 'WILD BUNCH': 945, 'WILD EYE RELEASING': 946, 'WILD EYE': 946, 'WILD SIDE VIDEO': 947, 'WILD SIDE': 947, 'WME': 948, 'WOLFE VIDEO': 949, 'WOLFE': 949, 'WORD ON FIRE': 950, 'WORKS FILM GROUP': 951, 'WORLD WRESTLING': 952, 'WVG MEDIEN': 953, 'WWE STUDIOS': 954, 'WWE': 954, 'X RATED KULT': 955, 'X-RATED KULT': 955, 'X RATED CULT': 955, 'X-RATED CULT': 955, 'X RATED': 955, 'X-RATED': 955, 'XCESS': 956, 'XLRATOR': 957, 'XT VIDEO': 958, 'XT': 958, 'YAMATO VIDEO': 959, 'YAMATO': 959, 'YASH RAJ FILMS': 960, 'YASH RAJS': 960, 'ZEITGEIST FILMS': 961, 'ZEITGEIST': 961, 'ZENITH PICTURES': 962, 'ZENITH': 962, 'ZIMA': 963, 'ZYLO': 964, 'ZYX MUSIC': 965, 'ZYX': 965 }.get(distributor, 0) return distributor_id - async def unit3d_torrent_info(self, tracker, torrent_url, id): - tmdb = imdb = tvdb = description = category = infohash = mal = None + async def prompt_user_for_id_selection(self, meta, tmdb=None, imdb=None, tvdb=None, mal=None, filename=None, tracker_name=None): + if not tracker_name: + tracker_name = "Tracker" # Fallback if tracker_name is not provided + + if imdb: + imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + # console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}[/cyan]") + + if any([tmdb, imdb, tvdb, mal]): + console.print(f"[cyan]Found the following IDs on {tracker_name}:") + if tmdb: + console.print(f"TMDb ID: {tmdb}") + if imdb: + console.print(f"IMDb ID: https://www.imdb.com/title/tt{imdb}") + if tvdb: + console.print(f"TVDb ID: {tvdb}") + if mal: + console.print(f"MAL ID: {mal}") + + if filename: + console.print(f"Filename: {filename}") # Ensure filename is printed if available + + if not meta['unattended']: + selection = input(f"Do you want to use these IDs from {tracker_name}? (Y/n): ").strip().lower() + try: + if selection == '' or selection == 'y' or selection == 'yes': + return True + else: + return False + except (KeyboardInterrupt, EOFError): + sys.exit(1) + else: + return True + + async def prompt_user_for_confirmation(self, message): + response = input(f"{message} (Y/n): ").strip().lower() + if response == '' or response == 'y': + return True + return False + + async def unit3d_torrent_info(self, tracker, torrent_url, search_url, meta, id=None, file_name=None): + tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 imagelist = [] - params = {'api_token' : self.config['TRACKERS'][tracker].get('api_key', '')} - url = f"{torrent_url}{id}" + + # Build the params for the API request + params = {'api_token': self.config['TRACKERS'][tracker].get('api_key', '')} + + # Determine the URL based on whether we're searching by file name or ID + if file_name: + url = f"{search_url}?file_name={file_name}" + console.print(f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]") + elif id: + url = f"{torrent_url}{id}?" + console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow] via {url}") + else: + console.print("[red]No ID or file name provided for search.[/red]") + return None, None, None, None, None, None, None, None, None + response = requests.get(url=url, params=params) + # console.print(f"[blue]Raw API Response: {response}[/blue]") + try: - response = response.json() - attributes = response['attributes'] - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') - - bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) - console.print(f"[green]Successfully grabbed description from {tracker}") - except Exception: - console.print(traceback.print_exc()) - console.print(f"[yellow]Invalid Response from {tracker} API.") - + json_response = response.json() + + # console.print(f"Raw API Response: {json_response}", markup=False) + + except ValueError: + return None, None, None, None, None, None, None, None, None + + try: + # Handle response when searching by file name (which might return a 'data' array) + data = json_response.get('data', []) + if data == "404": + console.print("[yellow]No data found (404). Returning None.[/yellow]") + return None, None, None, None, None, None, None, None, None + + if data and isinstance(data, list): # Ensure data is a list before accessing it + attributes = data[0].get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + tmdb = None if tmdb == 0 else tmdb + tvdb = None if tvdb == 0 else tvdb + mal = None if mal == 0 else mal + imdb = None if imdb == 0 else imdb + else: + # Handle response when searching by ID + if id and not data: + attributes = json_response.get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + tmdb = None if tmdb == 0 else tmdb + tvdb = None if tvdb == 0 else tvdb + mal = None if mal == 0 else mal + imdb = None if imdb == 0 else imdb + # Handle file name extraction + files = attributes.get('files', []) + if files: + if len(files) == 1: + file_name = files[0]['name'] + else: + file_name = [file['name'] for file in files[:5]] # Return up to 5 filenames + + console.print(f"[blue]Extracted filename(s): {file_name}[/blue]") # Print the extracted filename(s) + + # Skip the ID selection prompt if searching by ID + console.print(f"[green]Valid IDs found: TMDb: {tmdb}, IMDb: {imdb}, TVDb: {tvdb}, MAL: {mal}[/green]") + + if tmdb or imdb or tvdb: + if not id: + # Only prompt the user for ID selection if not searching by ID + try: + if not await self.prompt_user_for_id_selection(meta, tmdb, imdb, tvdb, mal, file_name): + console.print("[yellow]User chose to skip based on IDs.[/yellow]") + return None, None, None, None, None, None, None, None, None + except (KeyboardInterrupt, EOFError): + sys.exit(1) + + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"Extracted description: {description}", markup=False) + + if meta.get('unattended') or (meta.get('blu') or meta.get('aither') or meta.get('lst') or meta.get('oe') or meta.get('tik')): + meta['description'] = description + meta['saved_description'] = True + else: + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is:") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + meta['description'] = description + meta['saved_description'] = True + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + meta['description'] = description + meta['saved_description'] = True - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name + + except Exception as e: + console.print_exception() + console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") + return None, None, None, None, None, None, None, None, None async def parseCookieFile(self, cookiefile): """Parse a cookies.txt file and return a dictionary of key value pairs compatible with requests.""" cookies = {} - with open (cookiefile, 'r') as fp: + with open(cookiefile, 'r') as fp: for line in fp: if not line.startswith(("# ", "\n", "#\n")): lineFields = re.split(' |\t', line.strip()) @@ -185,45 +568,43 @@ async def parseCookieFile(self, cookiefile): cookies[lineFields[5]] = lineFields[6] return cookies - - async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): ptgen = "" url = 'https://ptgen.zhenzhen.workers.dev' if ptgen_site != '': url = ptgen_site params = {} - data={} - #get douban url + data = {} + # get douban url if int(meta.get('imdb_id', '0')) != 0: data['search'] = f"tt{meta['imdb_id']}" ptgen = requests.get(url, params=data) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break except requests.exceptions.JSONDecodeError: continue try: - params['url'] = ptgen.json()['data'][0]['link'] + params['url'] = ptgen.json()['data'][0]['link'] except Exception: console.print("[red]Unable to get data from ptgen using IMDb") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") else: console.print("[red]No IMDb id was found.") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break ptgen = ptgen.json() meta['ptgen'] = ptgen - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() ptgen = ptgen['format'] @@ -237,105 +618,462 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return "" return ptgen - - - # async def ptgen(self, meta): - # ptgen = "" - # url = "https://api.iyuu.cn/App.Movie.Ptgen" - # params = {} - # if int(meta.get('imdb_id', '0')) != 0: - # params['url'] = f"tt{meta['imdb_id']}" - # else: - # console.print("[red]No IMDb id was found.") - # params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") - # try: - # ptgen = requests.get(url, params=params) - # ptgen = ptgen.json() - # ptgen = ptgen['data']['format'] - # if "[/img]" in ptgen: - # ptgen = ptgen.split("[/img]")[1] - # ptgen = f"[img]{meta.get('imdb_info', {}).get('cover', meta.get('cover', ''))}[/img]{ptgen}" - # except: - # console.print_exception() - # console.print("[bold red]There was an error getting the ptgen") - # console.print(ptgen) - # return ptgen - - - async def filter_dupes(self, dupes, meta): + """ + Filter duplicates by applying exclusion rules. Only non-excluded entries are returned. + Everything is a dupe, until it matches a criteria to be excluded. + """ if meta['debug']: console.log("[cyan]Pre-filtered dupes") console.log(dupes) + new_dupes = [] + + has_repack_in_uuid = "repack" in meta.get('uuid', '').lower() + video_encode = meta.get("video_encode") + if video_encode is not None: + has_encoder_in_name = video_encode.lower() + normalized_encoder = self.normalize_filename(has_encoder_in_name) + else: + normalized_encoder = False + has_is_disc = bool(meta.get('is_disc', False)) + target_hdr = self.refine_hdr_terms(meta.get("hdr")) + target_season = meta.get("season") + target_episode = meta.get("episode") + target_resolution = meta.get("resolution") + tag = meta.get("tag").lower().replace("-", " ") + is_dvd = meta['is_disc'] == "DVD" + web_dl = meta.get('type') == "WEBDL" + + attribute_checks = [ + { + "key": "repack", + "uuid_flag": has_repack_in_uuid, + "condition": lambda each: meta['tag'].lower() in each and has_repack_in_uuid and "repack" not in each.lower(), + "exclude_msg": lambda each: f"Excluding result because it lacks 'repack' and matches tag '{meta['tag']}': {each}" + }, + { + "key": "remux", + "uuid_flag": "remux" in meta.get('name', '').lower(), + "condition": lambda each: "remux" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'remux' mismatch: {each}" + }, + { + "key": "uhd", + "uuid_flag": "uhd" in meta.get('name', '').lower(), + "condition": lambda each: "uhd" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'UHD' mismatch: {each}" + }, + { + "key": "hdtv", + "uuid_flag": "hdtv" in meta.get('name', '').lower(), + "condition": lambda each: "hdtv" in each.lower(), + "exclude_msg": lambda each: f"Excluding result due to 'HDTV' mismatch: {each}" + }, + ] + + def log_exclusion(reason, item): + if meta['debug']: + console.log(f"[yellow]Excluding result due to {reason}: {item}") + + def process_exclusion(each): + """ + Determine if an entry should be excluded. + Returns True if the entry should be excluded, otherwise allowed as dupe. + """ + normalized = self.normalize_filename(each) + file_hdr = self.refine_hdr_terms(normalized) + + if meta['debug']: + console.log(f"[debug] Evaluating dupe: {each}") + console.log(f"[debug] Normalized dupe: {normalized}") + console.log(f"[debug] File HDR terms: {file_hdr}") + console.log(f"[debug] Target HDR terms: {target_hdr}") + console.log(f"[debug] TAG: {tag}") + console.log("[debug] Evaluating repack condition:") + console.log(f" has_repack_in_uuid: {has_repack_in_uuid}") + console.log(f" 'repack' in each.lower(): {'repack' in each.lower()}") + console.log(f"[debug] meta['uuid']: {meta.get('uuid', '')}") + console.log(f"[debug] meta['tag']: {meta.get('tag', '').lower()}") + console.log(f"[debug] normalized encoder: {normalized_encoder}") + + if has_is_disc and each.lower().endswith(".m2ts"): + return False + + if has_is_disc and re.search(r'\.\w{2,4}$', each): + log_exclusion("file extension mismatch (is_disc=True)", each) + return True + + if not is_dvd: + if target_resolution and target_resolution not in each: + log_exclusion(f"resolution '{target_resolution}' mismatch", each) + return True + + if is_dvd: + if any(str(res) in each for res in [1080, 720, 2160]): + log_exclusion(f"resolution '{target_resolution}' mismatch", each) + return True + + for check in attribute_checks: + if check["key"] == "repack": + if has_repack_in_uuid and "repack" not in normalized: + if tag and tag in normalized: + log_exclusion("missing 'repack'", each) + return True + elif check["uuid_flag"] != check["condition"](each): + log_exclusion(f"{check['key']} mismatch", each) + return True + + if not is_dvd: + if not self.has_matching_hdr(file_hdr, target_hdr, meta): + log_exclusion(f"HDR mismatch: Expected {target_hdr}, got {file_hdr}", each) + return True + + season_episode_match = self.is_season_episode_match(normalized, target_season, target_episode) + if meta['debug']: + console.log(f"[debug] Season/Episode match result: {season_episode_match}") + if not season_episode_match: + log_exclusion("season/episode mismatch", each) + return True + + if not is_dvd: + if normalized_encoder and normalized_encoder in each: + log_exclusion(f"Encoder '{has_encoder_in_name}' mismatch", each) + return False + + if web_dl and ("web-dl" in normalized or "webdl" in normalized or "web dl" in normalized): + return False + + console.log(f"[debug] Passed all checks: {each}") + return False + for each in dupes: - if meta.get('sd', 0) == 1: - remove_set = set() - else: - remove_set = set({meta['resolution']}) - search_combos = [ - { - 'search' : meta['hdr'], - 'search_for' : {'HDR', 'PQ10'}, - 'update' : {'HDR|PQ10'} - }, - { - 'search' : meta['hdr'], - 'search_for' : {'DV'}, - 'update' : {'DV|DoVi'} - }, - { - 'search' : meta['hdr'], - 'search_not' : {'DV', 'DoVi', 'HDR', 'PQ10'}, - 'update' : {'!(DV)|(DoVi)|(HDR)|(PQ10)'} - }, - { - 'search' : str(meta.get('tv_pack', 0)), - 'search_for' : '1', - 'update' : {f"{meta['season']}(?!E\d+)"} - }, - { - 'search' : meta['episode'], - 'search_for' : meta['episode'], - 'update' : {meta['season'], meta['episode']} - } - ] - search_matches = [ - { - 'if' : {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, - 'in' : meta['type'] - } - ] - for s in search_combos: - if s.get('search_for') not in (None, ''): - if any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_for']): - remove_set.update(s['update']) - if s.get('search_not') not in (None, ''): - if not any(re.search(x, s['search'], flags=re.IGNORECASE) for x in s['search_not']): - remove_set.update(s['update']) - for sm in search_matches: - for a in sm['if']: - if a in sm['in']: - remove_set.add(a) - - search = each.lower().replace('-', '').replace(' ', '').replace('.', '') - for x in remove_set.copy(): - if "|" in x: - look_for = x.split('|') - for y in look_for: - if y.lower() in search: - if x in remove_set: - remove_set.remove(x) - remove_set.add(y) - - allow = True - for x in remove_set: - if not x.startswith("!"): - if not re.search(x, search, flags=re.I): - allow = False - else: - if re.search(x.replace("!", "", 1), search, flags=re.I) not in (None, False): - allow = False - if allow and each not in new_dupes: + console.log(f"[debug] Evaluating dupe: {each}") + if not process_exclusion(each): new_dupes.append(each) + + if meta['debug']: + console.log(f"[cyan]Final dupes: {new_dupes}") + return new_dupes + + def normalize_filename(self, filename): + """ + Normalize a filename for easier matching. + Retain season/episode information in the format SxxExx. + """ + normalized = filename.lower().replace("-", " ").replace(" ", " ").replace(".", " ") + + return normalized + + def is_season_episode_match(self, filename, target_season, target_episode): + """ + Check if the filename matches the given season and episode. + """ + if target_season: + target_season = int(str(target_season).lstrip('sS')) + if target_episode: + target_episode = int(str(target_episode).lstrip('eE')) + + season_pattern = f"s{target_season:02}" if target_season else None + episode_pattern = f"e{target_episode:02}" if target_episode else None + + if season_pattern and episode_pattern: + return season_pattern in filename and episode_pattern in filename + if season_pattern: + return season_pattern in filename + if episode_pattern: + return episode_pattern in filename + return True + + def refine_hdr_terms(self, hdr): + """ + Normalize HDR terms for consistent comparison. + Simplifies all HDR entries to 'HDR' and DV entries to 'DV'. + """ + if hdr is None: + return set() + hdr = hdr.upper() + terms = set() + if "DV" in hdr or "DOVI" in hdr: + terms.add("DV") + if "HDR" in hdr: # Any HDR-related term is normalized to 'HDR' + terms.add("HDR") + return terms + + def has_matching_hdr(self, file_hdr, target_hdr, meta): + """ + Check if the HDR terms match or are compatible. + """ + def simplify_hdr(hdr_set): + """Simplify HDR terms to just HDR and DV.""" + simplified = set() + if any(h in hdr_set for h in {"HDR", "HDR10", "HDR10+"}): + simplified.add("HDR") + if "DV" in hdr_set or "DOVI" in hdr_set: + simplified.add("DV") + if "framestor" in meta['tag'].lower(): + simplified.add("HDR") + return simplified + + file_hdr_simple = simplify_hdr(file_hdr) + target_hdr_simple = simplify_hdr(target_hdr) + + if file_hdr_simple == {"DV", "HDR"} or file_hdr_simple == {"HDR", "DV"}: + file_hdr_simple = {"HDR"} + if target_hdr_simple == {"DV", "HDR"} or target_hdr_simple == {"HDR", "DV"}: + target_hdr_simple = {"HDR"} + + return file_hdr_simple == target_hdr_simple + + class MediaInfoParser: + # Language to ISO country code mapping + LANGUAGE_CODE_MAP = { + "afrikaans": ("https://ptpimg.me/i9pt6k.png", "20"), + "albanian": ("https://ptpimg.me/sfhik8.png", "20"), + "amharic": ("https://ptpimg.me/zm816y.png", "20"), + "arabic": ("https://ptpimg.me/5g8i9u.png", "26x10"), + "armenian": ("https://ptpimg.me/zm816y.png", "20"), + "azerbaijani": ("https://ptpimg.me/h3rbe0.png", "20"), + "basque": ("https://ptpimg.me/xj51b9.png", "20"), + "belarusian": ("https://ptpimg.me/iushg1.png", "20"), + "bengali": ("https://ptpimg.me/jq996n.png", "20"), + "bosnian": ("https://ptpimg.me/19t9rv.png", "20"), + "brazilian": ("https://ptpimg.me/p8sgla.png", "20"), + "bulgarian": ("https://ptpimg.me/un9dc6.png", "20"), + "catalan": ("https://ptpimg.me/v4h5bf.png", "20"), + "chinese": ("https://ptpimg.me/ea3yv3.png", "20"), + "croatian": ("https://ptpimg.me/rxi533.png", "20"), + "czech": ("https://ptpimg.me/5m75n3.png", "20"), + "danish": ("https://ptpimg.me/m35c41.png", "20"), + "dutch": ("https://ptpimg.me/6nmwpx.png", "20"), + "dzongkha": ("https://ptpimg.me/56e7y5.png", "20"), + "english": ("https://ptpimg.me/ine2fd.png", "25x10"), + "english (gb)": ("https://ptpimg.me/a9w539.png", "20"), + "estonian": ("https://ptpimg.me/z25pmk.png", "20"), + "filipino": ("https://ptpimg.me/9d3z9w.png", "20"), + "finnish": ("https://ptpimg.me/p4354c.png", "20"), + "french (canada)": ("https://ptpimg.me/ei4s6u.png", "20"), + "french canadian": ("https://ptpimg.me/ei4s6u.png", "20"), + "french": ("https://ptpimg.me/m7mfoi.png", "20"), + "galician": ("https://ptpimg.me/xj51b9.png", "20"), + "georgian": ("https://ptpimg.me/pp412q.png", "20"), + "german": ("https://ptpimg.me/dw8d04.png", "30x10"), + "greek": ("https://ptpimg.me/px1u3e.png", "20"), + "gujarati": ("https://ptpimg.me/d0l479.png", "20"), + "haitian creole": ("https://ptpimg.me/f64wlp.png", "20"), + "hebrew": ("https://ptpimg.me/5jw1jp.png", "20"), + "hindi": ("https://ptpimg.me/d0l479.png", "20"), + "hungarian": ("https://ptpimg.me/fr4aj7.png", "30x10"), + "icelandic": ("https://ptpimg.me/40o553.png", "20"), + "indonesian": ("https://ptpimg.me/f00c8u.png", "20"), + "irish": ("https://ptpimg.me/71x9mk.png", "20"), + "italian": ("https://ptpimg.me/ao762a.png", "20"), + "japanese": ("https://ptpimg.me/o1amm3.png", "20"), + "kannada": ("https://ptpimg.me/d0l479.png", "20"), + "kazakh": ("https://ptpimg.me/tq1h8b.png", "20"), + "khmer": ("https://ptpimg.me/0p1tli.png", "20"), + "korean": ("https://ptpimg.me/2tvwgn.png", "20"), + "kurdish": ("https://ptpimg.me/g290wo.png", "20"), + "kyrgyz": ("https://ptpimg.me/336unh.png", "20"), + "lao": ("https://ptpimg.me/n3nan1.png", "20"), + "latin american": ("https://ptpimg.me/11350x.png", "20"), + "latvian": ("https://ptpimg.me/3x2y1b.png", "25x10"), + "lithuanian": ("https://ptpimg.me/b444z8.png", "20"), + "luxembourgish": ("https://ptpimg.me/52x189.png", "20"), + "macedonian": ("https://ptpimg.me/2g5lva.png", "20"), + "malagasy": ("https://ptpimg.me/n5120r.png", "20"), + "malay": ("https://ptpimg.me/02e17w.png", "30x10"), + "malayalam": ("https://ptpimg.me/d0l479.png", "20"), + "maltese": ("https://ptpimg.me/ua46c2.png", "20"), + "maori": ("https://ptpimg.me/2fw03g.png", "20"), + "marathi": ("https://ptpimg.me/d0l479.png", "20"), + "mongolian": ("https://ptpimg.me/z2h682.png", "20"), + "nepali": ("https://ptpimg.me/5yd3sp.png", "20"), + "norwegian": ("https://ptpimg.me/1t11u4.png", "20"), + "pashto": ("https://ptpimg.me/i9pt6k.png", "20"), + "persian": ("https://ptpimg.me/i0y103.png", "20"), + "polish": ("https://ptpimg.me/m73uwa.png", "20"), + "portuguese": ("https://ptpimg.me/5j1a7q.png", "20"), + "portuguese (brazil)": ("https://ptpimg.me/p8sgla.png", "20"), + "punjabi": ("https://ptpimg.me/d0l479.png", "20"), + "romanian": ("https://ptpimg.me/ux94x0.png", "20"), + "russian": ("https://ptpimg.me/v33j64.png", "20"), + "samoan": ("https://ptpimg.me/8nt3zq.png", "20"), + "serbian": ("https://ptpimg.me/2139p2.png", "20"), + "slovak": ("https://ptpimg.me/70994n.png", "20"), + "slovenian": ("https://ptpimg.me/61yp81.png", "25x10"), + "somali": ("https://ptpimg.me/320pa6.png", "20"), + "spanish": ("https://ptpimg.me/xj51b9.png", "20"), + "spanish (latin america)": ("https://ptpimg.me/11350x.png", "20"), + "swahili": ("https://ptpimg.me/d0l479.png", "20"), + "swedish": ("https://ptpimg.me/082090.png", "20"), + "tamil": ("https://ptpimg.me/d0l479.png", "20"), + "telugu": ("https://ptpimg.me/d0l479.png", "20"), + "thai": ("https://ptpimg.me/38ru43.png", "20"), + "turkish": ("https://ptpimg.me/g4jg39.png", "20"), + "ukrainian": ("https://ptpimg.me/d8fp6k.png", "20"), + "urdu": ("https://ptpimg.me/z23gg5.png", "20"), + "uzbek": ("https://ptpimg.me/89854s.png", "20"), + "vietnamese": ("https://ptpimg.me/qnuya2.png", "20"), + "welsh": ("https://ptpimg.me/a9w539.png", "20"), + "xhosa": ("https://ptpimg.me/7teg09.png", "20"), + "yiddish": ("https://ptpimg.me/5jw1jp.png", "20"), + "yoruba": ("https://ptpimg.me/9l34il.png", "20"), + "zulu": ("https://ptpimg.me/7teg09.png", "20") + } + + def parse_mediainfo(self, mediainfo_text): + # Patterns for matching sections and fields + section_pattern = re.compile(r"^(General|Video|Audio|Text|Menu)(?:\s#\d+)?", re.IGNORECASE) + parsed_data = {"general": {}, "video": [], "audio": [], "text": []} + current_section = None + current_track = {} + + # Field lists based on PHP definitions + general_fields = {'file_name', 'format', 'duration', 'file_size', 'bit_rate'} + video_fields = { + 'format', 'format_version', 'codec', 'width', 'height', 'stream_size', + 'framerate_mode', 'frame_rate', 'aspect_ratio', 'bit_rate', 'bit_rate_mode', 'bit_rate_nominal', + 'bit_pixel_frame', 'bit_depth', 'language', 'format_profile', + 'color_primaries', 'title', 'scan_type', 'transfer_characteristics', 'hdr_format' + } + audio_fields = { + 'codec', 'format', 'bit_rate', 'channels', 'title', 'language', 'format_profile', 'stream_size' + } + # text_fields = {'title', 'language'} + + # Split MediaInfo by lines and process each line + for line in mediainfo_text.splitlines(): + line = line.strip() + + # Detect a new section + section_match = section_pattern.match(line) + if section_match: + # Save the last track data if moving to a new section + if current_section and current_track: + if current_section in ["video", "audio", "text"]: + parsed_data[current_section].append(current_track) + else: + parsed_data[current_section] = current_track + # Debug output for finalizing the current track data + # print(f"Final processed track data for section '{current_section}': {current_track}") + current_track = {} # Reset current track + + # Update the current section + current_section = section_match.group(1).lower() + continue + + # Split each line on the first colon to separate property and value + if ":" in line: + property_name, property_value = map(str.strip, line.split(":", 1)) + property_name = property_name.lower().replace(" ", "_") + + # Add property if it's a recognized field for the current section + if current_section == "general" and property_name in general_fields: + current_track[property_name] = property_value + elif current_section == "video" and property_name in video_fields: + current_track[property_name] = property_value + elif current_section == "audio" and property_name in audio_fields: + current_track[property_name] = property_value + elif current_section == "text": + # Processing specific properties for text + # Process title field + if property_name == "title" and "title" not in current_track: + title_lower = property_value.lower() + # print(f"\nProcessing Title: '{property_value}'") # Debugging output + + # Store the title as-is since it should remain descriptive + current_track["title"] = property_value + # print(f"Stored title: '{property_value}'") + + # If there's an exact match in LANGUAGE_CODE_MAP, add country code to language field + if title_lower in self.LANGUAGE_CODE_MAP: + country_code, size = self.LANGUAGE_CODE_MAP[title_lower] + current_track["language"] = f"[img={size}]{country_code}[/img]" + # print(f"Exact match found for title '{title_lower}' with country code: {country_code}") + + # Process language field only if it hasn't already been set + elif property_name == "language" and "language" not in current_track: + language_lower = property_value.lower() + # print(f"\nProcessing Language: '{property_value}'") # Debugging output + + if language_lower in self.LANGUAGE_CODE_MAP: + country_code, size = self.LANGUAGE_CODE_MAP[language_lower] + current_track["language"] = f"[img={size}]{country_code}[/img]" + # print(f"Matched language '{language_lower}' to country code: {country_code}") + else: + # If no match in LANGUAGE_CODE_MAP, store language as-is + current_track["language"] = property_value + # print(f"No match found for language '{property_value}', stored as-is.") + + # Append the last track to the parsed data if it exists + if current_section and current_track: + if current_section in ["video", "audio", "text"]: + parsed_data[current_section].append(current_track) + else: + parsed_data[current_section] = current_track + # Final debug output for the last track data + # print(f"Final processed track data for last section '{current_section}': {current_track}") + + # Debug output for the complete parsed_data + # print("\nComplete Parsed Data:") + # for section, data in parsed_data.items(): + # print(f"{section}: {data}") + + return parsed_data + + def format_bbcode(self, parsed_mediainfo): + bbcode_output = "\n" + + # Format General Section + if "general" in parsed_mediainfo: + bbcode_output += "[b]General[/b]\n" + for prop, value in parsed_mediainfo["general"].items(): + bbcode_output += f"[b]{prop.replace('_', ' ').capitalize()}:[/b] {value}\n" + + # Format Video Section + if "video" in parsed_mediainfo: + bbcode_output += "\n[b]Video[/b]\n" + for track in parsed_mediainfo["video"]: + for prop, value in track.items(): + bbcode_output += f"[b]{prop.replace('_', ' ').capitalize()}:[/b] {value}\n" + + # Format Audio Section + if "audio" in parsed_mediainfo: + bbcode_output += "\n[b]Audio[/b]\n" + for index, track in enumerate(parsed_mediainfo["audio"], start=1): # Start enumeration at 1 + parts = [f"{index}."] # Start with track number without a trailing slash + + # Language flag image + language = track.get("language", "").lower() + result = self.LANGUAGE_CODE_MAP.get(language) + + # Check if the language was found in LANGUAGE_CODE_MAP + if result is not None: + country_code, size = result + parts.append(f"[img={size}]{country_code}[/img]") + else: + # If language is not found, use a fallback or display the language as plain text + parts.append(language.capitalize() if language else "") + + # Other properties to concatenate + properties = ["language", "codec", "format", "channels", "bit_rate", "format_profile", "stream_size"] + for prop in properties: + if prop in track and track[prop]: # Only add non-empty properties + parts.append(track[prop]) + + # Join parts (starting from index 1, after the track number) with slashes and add to bbcode_output + bbcode_output += f"{parts[0]} " + " / ".join(parts[1:]) + "\n" + + # Format Text Section - Centered with flags or text, spaced apart + if "text" in parsed_mediainfo: + bbcode_output += "\n[b]Subtitles[/b]\n" + subtitle_entries = [] + for track in parsed_mediainfo["text"]: + language_display = track.get("language", "") + subtitle_entries.append(language_display) + bbcode_output += " ".join(subtitle_entries) + + bbcode_output += "\n" + return bbcode_output diff --git a/src/trackers/FL.py b/src/trackers/FL.py index 06cd4bb0b..5d376c917 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -2,20 +2,19 @@ import asyncio import re import os -from pathlib import Path -import distutils.util -import json +from str2bool import str2bool import glob import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse import cli_ui from bs4 import BeautifulSoup from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console + class FL(): def __init__(self, config): @@ -28,7 +27,6 @@ def __init__(self, config): self.uploader_name = config['TRACKERS'][self.tracker].get('uploader_name') self.signature = None self.banned_groups = [""] - async def get_category_id(self, meta): has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) @@ -51,7 +49,7 @@ async def get_category_id(self, meta): if has_ro_sub and meta.get('sd', 0) == 0 and meta['resolution'] != '2160p': # 19 = Movie + RO cat_id = 19 - + if meta['category'] == 'TV': # 21 = TV HD cat_id = 21 @@ -61,15 +59,15 @@ async def get_category_id(self, meta): elif meta.get('sd', 0) == 1: # 23 = TV SD cat_id = 23 - + if meta['is_disc'] == "DVD": # 2 = DVD cat_id = 2 if has_ro_sub: - # 3 = DVD + RO + # 3 = DVD + RO cat_id = 3 - if meta.get('anime', False) == True: + if meta.get('anime', False) is True: # 24 = Anime cat_id = 24 return cat_id @@ -98,28 +96,23 @@ async def edit_name(self, meta): fl_name = fl_name.replace('DTS7.1', 'DTS').replace('DTS5.1', 'DTS').replace('DTS2.0', 'DTS').replace('DTS1.0', 'DTS') fl_name = fl_name.replace('Dubbed', '').replace('Dual-Audio', '') fl_name = ' '.join(fl_name.split()) - fl_name = re.sub("[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", fl_name) + fl_name = re.sub(r"[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", fl_name) fl_name = fl_name.replace(' ', '.').replace('..', '.') - return fl_name - - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### + return fl_name - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) fl_name = await self.edit_name(meta) cat_id = await self.get_category_id(meta) has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) - + # Confirm the correct naming order for FL cli_ui.info(f"Filelist name: {fl_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: fl_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if fl_confirm != True: + if fl_confirm is not True: fl_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if fl_name_manually == "": console.print('No proper name given') @@ -130,38 +123,38 @@ async def upload(self, meta): # Torrent File Naming # Note: Don't Edit .torrent filename after creation, SubsPlease anime releases (because of their weird naming) are an exception - if meta.get('anime', True) == True and meta.get('tag', '') == '-SubsPlease': + if meta.get('anime', True) is True and meta.get('tag', '') == '-SubsPlease': torrentFileName = fl_name else: - if meta.get('isdir', False) == False: + if meta.get('isdir', False) is False: torrentFileName = meta.get('uuid') torrentFileName = os.path.splitext(torrentFileName)[0] else: torrentFileName = meta.get('uuid') # Download new .torrent from site - fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() + fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(torrentFileName) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'name' : fl_name, - 'type' : cat_id, - 'descr' : fl_desc.strip(), - 'nfo' : mi_dump + 'name': fl_name, + 'type': cat_id, + 'descr': fl_desc.strip(), + 'nfo': mi_dump } if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' @@ -184,7 +177,7 @@ async def upload(self, meta): session.cookies.update(pickle.load(cf)) up = session.post(url=url, data=data, files=files) torrentFile.close() - + # Match url to verify successful upload match = re.match(r".*?filelist\.io/details\.php\?id=(\d+)&uploaded=(\d+)", up.url) if match: @@ -194,31 +187,30 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) - - search_url = f"https://filelist.io/browse.php" + + search_url = "https://filelist.io/browse.php" if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'search' : meta['imdb_id'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '3' + 'search': meta['imdb_id'], + 'cat': await self.get_category_id(meta), + 'searchin': '3' } else: params = { - 'search' : meta['title'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '0' + 'search': meta['title'], + 'cat': await self.get_category_id(meta), + 'searchin': '0' } - + r = session.get(search_url, params=params) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') @@ -229,18 +221,15 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -249,8 +238,7 @@ async def validate_credentials(self, meta): else: return False return True - - + async def validate_cookies(self, meta, cookiefile): url = "https://filelist.io/index.php" if os.path.exists(cookiefile): @@ -268,18 +256,18 @@ async def validate_cookies(self, meta, cookiefile): return False else: return False - + async def login(self, cookiefile): with requests.Session() as session: r = session.get("https://filelist.io/login.php") await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - validator = soup.find('input', {'name' : 'validator'}).get('value') + validator = soup.find('input', {'name': 'validator'}).get('value') data = { - 'validator' : validator, - 'username' : self.username, - 'password' : self.password, - 'unlock' : '1', + 'validator': validator, + 'username': self.username, + 'password': self.password, + 'unlock': '1', } response = session.post('https://filelist.io/takelogin.php', data=data) await asyncio.sleep(0.5) @@ -306,24 +294,22 @@ async def download_new_torrent(self, session, id, torrent_path): console.print(r.text) return - - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: from src.bbcode import BBCODE bbcode = BBCODE() - + desc = base desc = bbcode.remove_spoiler(desc) desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_comparison_to_centered(desc, 900) desc = desc.replace('[img]', '[img]').replace('[/img]', '[/img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { - 'mediainfo' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), + 'mediainfo': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), } if int(meta['imdb_id'].replace('tt', '')) != 0: data['imdbURL'] = f"tt{meta['imdb_id']}" @@ -336,10 +322,10 @@ async def edit_desc(self, meta): else: # BD Description Generator final_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_EXT.txt", 'r', encoding='utf-8').read() - if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it + if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it final_desc = final_desc.replace('[/pre][/quote]', f'[/pre][/quote]\n\n{desc}\n', 1) final_desc = final_desc.replace('DISC INFO:', '[pre][quote=BD_Info][b][color=#FF0000]DISC INFO:[/color][/b]').replace('PLAYLIST REPORT:', '[b][color=#FF0000]PLAYLIST REPORT:[/color][/b]').replace('VIDEO:', '[b][color=#FF0000]VIDEO:[/color][/b]').replace('AUDIO:', '[b][color=#FF0000]AUDIO:[/color][/b]').replace('SUBTITLES:', '[b][color=#FF0000]SUBTITLES:[/color][/b]') - final_desc += "[/pre][/quote]\n" # Closed bbcode tags + final_desc += "[/pre][/quote]\n" # Closed bbcode tags # Upload screens and append to the end of the description url = "https://up.img4k.net/api/description" screen_glob = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['filename']}-*.png") @@ -350,11 +336,10 @@ async def edit_desc(self, meta): final_desc += response.text.replace('\r\n', '\n') descfile.write(final_desc) - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - async def get_ro_tracks(self, meta): has_ro_audio = has_ro_sub = False if meta.get('is_disc', '') != 'BDMV': diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py new file mode 100644 index 000000000..eac00ec0b --- /dev/null +++ b/src/trackers/FNP.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class FNP(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'FNP' + self.source_flag = 'FnP' + self.upload_url = 'https://fearnopeer.com/api/torrents/upload' + self.search_url = 'https://fearnopeer.com/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on FNP...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 7fab14991..decfb328d 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -3,15 +3,16 @@ import re import os from pathlib import Path -import traceback import json import glob from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON -from src.bbcode import BBCODE -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console +from datetime import datetime +from torf import Torrent + class HDB(): @@ -21,10 +22,9 @@ def __init__(self, config): self.source_flag = 'HDBits' self.username = config['TRACKERS']['HDB'].get('username', '').strip() self.passkey = config['TRACKERS']['HDB'].get('passkey', '').strip() - self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', False) + self.rehost_images = config['TRACKERS']['HDB'].get('img_rehost', True) self.signature = None self.banned_groups = [""] - async def get_type_category_id(self, meta): cat_id = "EXIT" @@ -46,12 +46,12 @@ async def get_type_category_id(self, meta): async def get_type_codec_id(self, meta): codecmap = { - "AVC" : 1, "H.264" : 1, - "HEVC" : 5, "H.265" : 5, - "MPEG-2" : 2, - "VC-1" : 3, - "XviD" : 4, - "VP9" : 6 + "AVC": 1, "H.264": 1, + "HEVC": 5, "H.265": 5, + "MPEG-2": 2, + "VC-1": 3, + "XviD": 4, + "VP9": 6 } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec_id = codecmap.get(searchcodec, "EXIT") @@ -65,8 +65,8 @@ async def get_type_medium_id(self, meta): # 4 = Capture if meta.get('type', '') == "HDTV": medium_id = 4 - if meta.get('has_encode_settings', False) == True: - medium_id = 3 + if meta.get('has_encode_settings', False) is True: + medium_id = 3 # 3 = Encode if meta.get('type', '') in ("ENCODE", "WEBRIP"): medium_id = 3 @@ -80,18 +80,18 @@ async def get_type_medium_id(self, meta): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_tags(self, meta): @@ -99,27 +99,27 @@ async def get_tags(self, meta): # Web Services: service_dict = { - "AMZN" : 28, - "NF" : 29, - "HULU" : 34, - "DSNP" : 33, - "HMAX" : 30, - "ATVP" : 27, - "iT" : 38, - "iP" : 56, - "STAN" : 32, - "PCOK" : 31, - "CR" : 72, - "PMTP" : 69, - "MA" : 77, - "SHO" : 76, - "BCORE" : 66, "CORE" : 66, - "CRKL" : 73, - "FUNI" : 74, - "HLMK" : 71, - "HTSR" : 79, - "CRAV" : 80, - 'MAX' : 88 + "AMZN": 28, + "NF": 29, + "HULU": 34, + "DSNP": 33, + "HMAX": 30, + "ATVP": 27, + "iT": 38, + "iP": 56, + "STAN": 32, + "PCOK": 31, + "CR": 72, + "PMTP": 69, + "MA": 77, + "SHO": 76, + "BCORE": 66, "CORE": 66, + "CRKL": 73, + "FUNI": 74, + "HLMK": 71, + "HTSR": 79, + "CRAV": 80, + 'MAX': 88 } if meta.get('service') in service_dict.keys(): tags.append(service_dict.get(meta['service'])) @@ -127,19 +127,18 @@ async def get_tags(self, meta): # Collections # Masters of Cinema, The Criterion Collection, Warner Archive Collection distributor_dict = { - "WARNER ARCHIVE" : 68, "WARNER ARCHIVE COLLECTION" : 68, "WAC" : 68, - "CRITERION" : 18, "CRITERION COLLECTION" : 18, "CC" : 18, - "MASTERS OF CINEMA" : 19, "MOC" : 19, - "KINO LORBER" : 55, "KINO" : 55, - "BFI VIDEO" : 63, "BFI" : 63, "BRITISH FILM INSTITUTE" : 63, - "STUDIO CANAL" : 65, - "ARROW" : 64 + "WARNER ARCHIVE": 68, "WARNER ARCHIVE COLLECTION": 68, "WAC": 68, + "CRITERION": 18, "CRITERION COLLECTION": 18, "CC": 18, + "MASTERS OF CINEMA": 19, "MOC": 19, + "KINO LORBER": 55, "KINO": 55, + "BFI VIDEO": 63, "BFI": 63, "BRITISH FILM INSTITUTE": 63, + "STUDIO CANAL": 65, + "ARROW": 64 } if meta.get('distributor') in distributor_dict.keys(): tags.append(distributor_dict.get(meta['distributor'])) - - # 4K Remaster, + # 4K Remaster, if "IMAX" in meta.get('edition', ''): tags.append(14) if "OPEN MATTE" in meta.get('edition', '').upper(): @@ -151,20 +150,20 @@ async def get_tags(self, meta): tags.append(7) if "Atmos" in meta['audio']: tags.append(5) - if meta.get('silent', False) == True: - console.print('[yellow]zxx audio track found, suggesting you tag as silent') #57 + if meta.get('silent', False) is True: + console.print('[yellow]zxx audio track found, suggesting you tag as silent') # 57 # Video Metadata - # HDR10, HDR10+, Dolby Vision, 10-bit, + # HDR10, HDR10+, Dolby Vision, 10-bit, if "HDR" in meta.get('hdr', ''): if "HDR10+" in meta['hdr']: - tags.append(25) #HDR10+ + tags.append(25) # HDR10+ else: - tags.append(9) #HDR10 + tags.append(9) # HDR10 if "DV" in meta.get('hdr', ''): - tags.append(6) #DV + tags.append(6) # DV if "HLG" in meta.get('hdr', ''): - tags.append(10) #HLG + tags.append(10) # HLG return tags @@ -192,17 +191,12 @@ async def edit_name(self, meta): hdb_name = hdb_name.replace('Dubbed', '').replace('Dual-Audio', '') hdb_name = hdb_name.replace('REMUX', 'Remux') hdb_name = ' '.join(hdb_name.split()) - hdb_name = re.sub("[^0-9a-zA-Zƀ-Ćæ. :&+'\-\[\]]+", "", hdb_name) + hdb_name = re.sub(r"[^0-9a-zA-Zƀ-Ćæ. :&+'\-\[\]]+", "", hdb_name) hdb_name = hdb_name.replace(' .', '.').replace('..', '.') - return hdb_name - + return hdb_name - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -216,50 +210,76 @@ async def upload(self, meta): if each == "EXIT": console.print("[bold red]Something didn't map correctly, or this content is not allowed on HDB") return - if "Dual-Audio" in meta['audio'] and meta['is_disc'] not in ("BDMV", "HDDVD", "DVD"): - console.print("[bold red]Dual-Audio Encodes are not allowed") + if "Dual-Audio" in meta['audio']: + if not (meta['anime'] or meta['is_disc']): + console.print("[bold red]Dual-Audio Encodes are not allowed for non-anime and non-disc content") return - # FORM - # file : .torent file (needs renaming) - # name : name - # type_category : get_type_category_id - # type_codec : get_type_codec_id - # type_medium : get_type_medium_id - # type_origin : 0 unless internal (1) - # descr : description - # techinfo : mediainfo only, no bdinfo - # tags[] : get_tags - # imdb : imdb link - # tvdb_id : tvdb id - # season : season number - # episode : episode number - # anidb_id - # POST > upload/upload # Download new .torrent from site - hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + torrent = Torrent.read(torrent_path) + + # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed + if torrent.piece_size > 16777216: # 16 MiB in bytes + console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") + + # Import Prep and regenerate the torrent with 16 MiB piece size limit + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + if meta['is_disc'] == 1: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 16 MiB + new_torrent = prep.CustomTorrent( + meta=meta, + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 16777216 # 16 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(torrent_path, overwrite=True) + + # Proceed with the upload process with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorrent") } data = { - 'name' : hdb_name, - 'category' : cat_id, - 'codec' : codec_id, - 'medium' : medium_id, - 'origin' : 0, - 'descr' : hdb_desc.rstrip(), - 'techinfo' : '', - 'tags[]' : hdb_tags, + 'name': hdb_name, + 'category': cat_id, + 'codec': codec_id, + 'medium': medium_id, + 'origin': 0, + 'descr': hdb_desc.rstrip(), + 'techinfo': '', + 'tags[]': hdb_tags, } # If internal, set 1 - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 # If not BDMV fill mediainfo @@ -275,7 +295,6 @@ async def upload(self, meta): data['tvdb_episode'] = int(meta.get('episode_int', 1)) # aniDB - url = "https://hdbits.org/upload/upload" # Submit if meta['debug']: @@ -297,66 +316,62 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HDB...") url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'category' : await self.get_type_category_id(meta), - 'codec' : await self.get_type_codec_id(meta), - 'medium' : await self.get_type_medium_id(meta), - 'search' : meta['resolution'] + 'username': self.username, + 'passkey': self.passkey, + 'category': await self.get_type_category_id(meta), + 'codec': await self.get_type_codec_id(meta), + 'medium': await self.get_type_medium_id(meta), + 'search': meta['resolution'] } if int(meta.get('imdb_id', '0').replace('tt', '0')) != 0: - data['imdb'] = {'id' : meta['imdb_id']} + data['imdb'] = {'id': meta['imdb_id']} if int(meta.get('tvdb_id', '0')) != 0: - data['tvdb'] = {'id' : meta['tvdb_id']} + data['tvdb'] = {'id': meta['tvdb_id']} try: response = requests.get(url=url, data=json.dumps(data)) response = response.json() for each in response['data']: result = each['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect') await asyncio.sleep(5) return dupes - - - async def validate_credentials(self, meta): - vapi = await self.validate_api() + vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your passkey is valid.') return False - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True - + async def validate_api(self): url = "https://hdbits.org/api/test" data = { - 'username' : self.username, - 'passkey' : self.passkey + 'username': self.username, + 'passkey': self.passkey } try: r = requests.post(url, data=json.dumps(data)).json() if r.get('status', 5) == 0: return True return False - except: + except Exception: return False - + async def validate_cookies(self, meta): common = COMMON(config=self.config) url = "https://hdbits.org" @@ -382,9 +397,9 @@ async def download_new_torrent(self, id, torrent_path): # Get HDB .torrent filename api_url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'id' : id + 'username': self.username, + 'passkey': self.passkey, + 'id': id } r = requests.get(url=api_url, data=json.dumps(data)) filename = r.json()['data'][0]['filename'] @@ -392,8 +407,8 @@ async def download_new_torrent(self, id, torrent_path): # Download new .torrent download_url = f"https://hdbits.org/download.php/{quote(filename)}" params = { - 'passkey' : self.passkey, - 'id' : id + 'passkey': self.passkey, + 'id': id } r = requests.get(url=download_url, params=params) @@ -402,11 +417,11 @@ async def download_new_torrent(self, id, torrent_path): return async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][quote]This release is sourced from {meta['service_longname']}[/quote][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -431,101 +446,142 @@ async def edit_desc(self, meta): desc = bbcode.convert_code_to_quote(desc) desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - if self.rehost_images == True: + if self.rehost_images is True: console.print("[green]Rehosting Images...") hdbimg_bbcode = await self.hdbimg_upload(meta) descfile.write(f"{hdbimg_bbcode}") else: images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): img_url = images[each]['img_url'] web_url = images[each]['web_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - async def hdbimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") url = "https://img.hdbits.org/upload_api.php" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'galleryoption' : 1, - 'galleryname' : meta['name'], - 'thumbsize' : 'w300' + 'username': self.username, + 'passkey': self.passkey, + 'galleryoption': 1, + 'galleryname': meta['name'], + 'thumbsize': 'w300' } files = {} # Set maximum screenshots to 3 for tv singles and 6 for everthing else - hdbimg_screen_count = 3 if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 else 6 + hdbimg_screen_count = 3 if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 else 6 if len(images) < hdbimg_screen_count: - hdbimg_screen_count = len(images) + hdbimg_screen_count = len(images) for i in range(hdbimg_screen_count): files[f'images_files[{i}]'] = open(images[i], 'rb') r = requests.post(url=url, data=data, files=files) image_bbcode = r.text return image_bbcode - - async def get_info_from_torrent_id(self, hdb_id): hdb_imdb = hdb_name = hdb_torrenthash = None url = "https://hdbits.org/api/torrents" data = { - "username" : self.username, - "passkey" : self.passkey, - "id" : hdb_id + "username": self.username, + "passkey": self.passkey, + "id": hdb_id } response = requests.get(url, json=data) if response.ok: try: response = response.json() if response['data'] != []: - hdb_imdb = response['data'][0].get('imdb', {'id' : None}).get('id') - hdb_tvdb = response['data'][0].get('tvdb', {'id' : None}).get('id') + hdb_imdb = response['data'][0].get('imdb', {'id': None}).get('id') + hdb_tvdb = response['data'][0].get('tvdb', {'id': None}).get('id') hdb_name = response['data'][0]['name'] hdb_torrenthash = response['data'][0]['hash'] - except: + except Exception: console.print_exception() else: console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash - async def search_filename(self, filelist): + async def search_filename(self, search_term, search_file_folder, meta): hdb_imdb = hdb_tvdb = hdb_name = hdb_torrenthash = hdb_id = None url = "https://hdbits.org/api/torrents" - data = { - "username" : self.username, - "passkey" : self.passkey, - "limit" : 100, - "file_in_torrent" : os.path.basename(filelist[0]) - } + + # Handle disc case + if search_file_folder == 'folder' and meta.get('is_disc'): + bd_summary_path = os.path.join(meta['base_dir'], 'tmp', meta['uuid'], 'BD_SUMMARY_00.txt') + bd_summary = None + + # Parse the BD_SUMMARY_00.txt file to extract the Disc Title + try: + with open(bd_summary_path, 'r', encoding='utf-8') as file: + for line in file: + if "Disc Title:" in line: + bd_summary = line.split("Disc Title:")[1].strip() + break + + if bd_summary: + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "search": bd_summary # Using the Disc Title for search + } + console.print(f"[green]Searching HDB for disc title: [bold yellow]{bd_summary}[/bold yellow]") + # console.print(f"[yellow]Using this data: {data}") + else: + console.print(f"[red]Error: 'Disc Title' not found in {bd_summary_path}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + except FileNotFoundError: + console.print(f"[red]Error: File not found at {bd_summary_path}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + else: # Handling non-disc case + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "file_in_torrent": os.path.basename(search_term) + } + console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + # console.print(f"[yellow]Using this data: {data}") + response = requests.get(url, json=data) - console.print(f"[green]Searching HDB for: [bold yellow]{os.path.basename(filelist[0])}[/bold yellow]") + if response.ok: try: - response = response.json() - if response['data'] != []: - for each in response['data']: - if each['numfiles'] == len(filelist): - hdb_imdb = each.get('imdb', {'id' : None}).get('id') - hdb_tvdb = each.get('tvdb', {'id' : None}).get('id') - hdb_name = each['name'] - hdb_torrenthash = each['hash'] - hdb_id = each['id'] - console.print(f'[bold green]Matched release with HDB ID: [yellow]{hdb_id}[/yellow][/bold green]') - return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id - except: + response_json = response.json() + # console.print(f"[green]HDB API response: {response_json}[/green]") # Log the entire response for debugging + + if 'data' not in response_json: + console.print(f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + if response_json['data'] != []: + for each in response_json['data']: + hdb_imdb = each.get('imdb', {'id': None}).get('id') + hdb_tvdb = each.get('tvdb', {'id': None}).get('id') + hdb_name = each['name'] + hdb_torrenthash = each['hash'] + hdb_id = each['id'] + console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + else: + console.print('[yellow]No data found in the HDB API response[/yellow]') + except Exception as e: console.print_exception() + console.print(f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]") else: - console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") - console.print(f'[yellow]Could not find a matching release on HDB') - return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id \ No newline at end of file + console.print(f"[red]Failed to get info from HDB. Status code: {response.status_code}, Reason: {response.reason}[/red]") + + console.print('[yellow]Could not find a matching release on HDB[/yellow]') + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 6b9fa0320..735a7f5ff 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -2,22 +2,18 @@ import asyncio import re import os -import json -import glob import cli_ui -import pickle -import distutils -from pathlib import Path +from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo - from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console + class HDT(): - + def __init__(self, config): self.config = config self.tracker = 'HDT' @@ -26,7 +22,7 @@ def __init__(self, config): self.password = config['TRACKERS'][self.tracker].get('password', '').strip() self.signature = None self.banned_groups = [""] - + async def get_category_id(self, meta): if meta['category'] == 'MOVIE': # BDMV @@ -37,7 +33,7 @@ async def get_category_id(self, meta): if meta['resolution'] in ('1080p', '1080i'): # 1 = Movie/Blu-Ray cat_id = 1 - + # REMUX if meta.get('type', '') == 'REMUX': if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': @@ -46,7 +42,7 @@ async def get_category_id(self, meta): else: # 2 = Movie/Remux cat_id = 2 - + # REST OF THE STUFF if meta.get('type', '') not in ("DISC", "REMUX"): if meta['resolution'] == '2160p': @@ -68,7 +64,7 @@ async def get_category_id(self, meta): if meta['resolution'] in ('1080p', '1080i'): # 59 = TV Show/Blu-ray cat_id = 59 - + # REMUX if meta.get('type', '') == 'REMUX': if meta.get('uhd', '') == 'UHD' and meta['resolution'] == '2160p': @@ -77,7 +73,7 @@ async def get_category_id(self, meta): else: # 60 = TV Show/Remux cat_id = 60 - + # REST OF THE STUFF if meta.get('type', '') not in ("DISC", "REMUX"): if meta['resolution'] == '2160p': @@ -89,11 +85,8 @@ async def get_category_id(self, meta): elif meta['resolution'] == '720p': # 38 = TV Show/720p cat_id = 38 - - return cat_id - - + return cat_id async def edit_name(self, meta): hdt_name = meta['name'] @@ -103,17 +96,13 @@ async def edit_name(self, meta): hdt_name = hdt_name.replace(meta['audio'], meta['audio'].replace(' ', '', 1)) if 'DV' in meta.get('hdr', ''): hdt_name = hdt_name.replace(' DV ', ' DoVi ') - + hdt_name = ' '.join(hdt_name.split()) - hdt_name = re.sub("[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", hdt_name) + hdt_name = re.sub(r"[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", hdt_name) hdt_name = hdt_name.replace(':', '').replace('..', ' ').replace(' ', ' ') return hdt_name - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) @@ -122,9 +111,9 @@ async def upload(self, meta): # Confirm the correct naming order for HDT cli_ui.info(f"HDT name: {hdt_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: hdt_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if hdt_confirm != True: + if hdt_confirm is not True: hdt_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if hdt_name_manually == "": console.print('No proper name given') @@ -132,26 +121,26 @@ async def upload(self, meta): return else: hdt_name = hdt_name_manually - + # Upload - hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() + hdt_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(hdt_name) files = { - 'torrent' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'torrent': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'filename' : hdt_name, - 'category' : cat_id, - 'info' : hdt_desc.strip() + 'filename': hdt_name, + 'category': cat_id, + 'info': hdt_desc.strip() } # 3D if "3D" in meta.get('3d', ''): data['3d'] = 'true' - + # HDR if "HDR" in meta.get('hdr', ''): if "HDR10+" in meta['hdr']: @@ -161,74 +150,96 @@ async def upload(self, meta): data['HDR10'] = 'true' if "DV" in meta.get('hdr', ''): data['DolbyVision'] = 'true' - + # IMDB if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['infosite'] = f"https://www.imdb.com/title/tt{meta['imdb_id']}/" - + # Full Season Pack if int(meta.get('tv_pack', '0')) != 0: data['season'] = 'true' else: data['season'] = 'false' - + # Anonymous check - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' # Send - url = "https://hd-torrents.org/upload.php" + url = "https://hd-torrents.net/upload.php" if meta['debug']: console.print(url) + console.print("Data to be sent:", style="bold blue") console.print(data) - else: - with requests.Session() as session: - cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") - - session.cookies.update(await common.parseCookieFile(cookiefile)) - up = session.post(url=url, data=data, files=files) - torrentFile.close() - - # Match url to verify successful upload - search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text).group(1) - if search: - # modding existing torrent for adding to client instead of downloading torrent from site. - await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "https://hd-torrents.org/details.php?id=" + search) - else: - console.print(data) - console.print("\n\n") - console.print(up.text) - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + console.print("Files being sent:", style="bold blue") + console.print(files) + with requests.Session() as session: + cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") + + if meta['debug']: + console.print(f"Cookie file path: {cookiefile}") + + session.cookies.update(await common.parseCookieFile(cookiefile)) + + if meta['debug']: + console.print(f"Session cookies: {session.cookies}") + + up = session.post(url=url, data=data, files=files) + torrentFile.close() + + # Debug response + if meta['debug']: + console.print(f"Response URL: {up.url}") + console.print(f"Response Status Code: {up.status_code}") + console.print("Response Headers:", style="bold blue") + console.print(up.headers) + console.print("Response Text (truncated):", style="dim") + console.print(up.text[:500] + "...") + + # Match url to verify successful upload + search = re.search(r"download\.php\?id\=([a-z0-9]+)", up.text) + if search: + torrent_id = search.group(1) + if meta['debug']: + console.print(f"Upload Successful: Torrent ID {torrent_id}", style="bold green") + + # Modding existing torrent for adding to client instead of downloading torrent from site + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS']['HDT'].get('my_announce_url'), "") + else: + console.print(data) + console.print("Failed to find download link in response text.", style="bold red") + console.print("Response Data (full):", style="dim") + console.print(up.text) + raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - - - async def search_existing(self, meta): + + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") session.cookies.update(await common.parseCookieFile(cookiefile)) - - search_url = f"https://hd-torrents.org/torrents.php" + + search_url = "https://hd-torrents.net/torrents.php" csrfToken = await self.get_csrfToken(session, search_url) if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'csrfToken' : csrfToken, - 'search' : meta['imdb_id'], - 'active' : '0', - 'options' : '2', - 'category[]' : await self.get_category_id(meta) + 'csrfToken': csrfToken, + 'search': meta['imdb_id'], + 'active': '0', + 'options': '2', + 'category[]': await self.get_category_id(meta) } else: params = { - 'csrfToken' : csrfToken, - 'search' : meta['title'], - 'category[]' : await self.get_category_id(meta), - 'options' : '3' + 'csrfToken': csrfToken, + 'search': meta['title'], + 'category[]': await self.get_category_id(meta), + 'options': '3' } - + r = session.get(search_url, params=params) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') @@ -236,22 +247,20 @@ async def search_existing(self, meta): for each in find: if each['href'].startswith('details.php?id='): dupes.append(each.text) - + return dupes - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site') return False return True - - + async def validate_cookies(self, meta, cookiefile): common = COMMON(config=self.config) - url = "https://hd-torrents.org/index.php" + url = "https://hd-torrents.net/index.php" cookiefile = f"{meta['base_dir']}/data/cookies/HDT.txt" if os.path.exists(cookiefile): with requests.Session() as session: @@ -267,66 +276,34 @@ async def validate_cookies(self, meta, cookiefile): return False else: return False - - - - """ - Old login method, disabled because of site's DDOS protection. Better to use exported cookies. - - async def login(self, cookiefile): - with requests.Session() as session: - url = "https://hd-torrents.org/login.php" - csrfToken = await self.get_csrfToken(session, url) - data = { - 'csrfToken' : csrfToken, - 'uid' : self.username, - 'pwd' : self.password, - 'submit' : 'Confirm' - } - response = session.post('https://hd-torrents.org/login.php', data=data) - await asyncio.sleep(0.5) - index = 'https://hd-torrents.org/index.php' - response = session.get(index) - if response.text.find("Logout") != -1: - console.print('[green]Successfully logged into HDT') - with open(cookiefile, 'wb') as cf: - pickle.dump(session.cookies, cf) - else: - console.print('[bold red]Something went wrong while trying to log into HDT. Make sure your username and password are correct') - await asyncio.sleep(1) - console.print(response.url) - return - """ - - async def get_csrfToken(self, session, url): r = session.get(url) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - csrfToken = soup.find('input', {'name' : 'csrfToken'}).get('value') + csrfToken = soup.find('input', {'name': 'csrfToken'}).get('value') return csrfToken - + async def edit_desc(self, meta): # base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='', encoding='utf-8') as descfile: if meta['is_disc'] != 'BDMV': # Beautify MediaInfo for HDT using custom template video = meta['filelist'][0] mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform" : f"file://{mi_template}"}) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) descfile.write(f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""") else: console.print("[bold red]Couldn't find the MediaInfo template") console.print("[green]Using normal MediaInfo for the description.") - + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8') as MI: descfile.write(f"""[left][font=consolas]\n{MI.read()}\n[/font][/left]\n\n""") else: with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as BD_SUMMARY: descfile.write(f"""[left][font=consolas]\n{BD_SUMMARY.read()}\n[/font][/left]\n\n""") - + # Add Screenshots images = meta['image_list'] if len(images) > 0: @@ -336,4 +313,3 @@ async def edit_desc(self, meta): descfile.write(f' ') descfile.close() - diff --git a/src/trackers/HHD.py b/src/trackers/HHD.py new file mode 100644 index 000000000..67630ea9b --- /dev/null +++ b/src/trackers/HHD.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class HHD(): + def __init__(self, config): + self.config = config + self.tracker = 'HHD' + self.source_flag = 'HHD' + self.upload_url = 'https://homiehelpdesk.net/api/torrents/upload' + self.search_url = 'https://homiehelpdesk.net/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [ + 'aXXo', 'BONE', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'dAV1nci', 'd3g', 'DNL', 'FaNGDiNG0', 'GalaxyTV', 'HD2DVD', 'HDTime', 'iHYTECH', 'ION10', + 'iPlanet', 'KiNGDOM', 'LAMA', 'MeGusta', 'mHD', 'mSD', 'NaNi', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'OFT', 'PRODJi', 'RARBG', 'Rifftrax', 'SANTi', 'SasukeducK', + 'ShAaNiG', 'Sicario', 'STUTTERSHIT', 'TGALAXY', 'TORRENTGALAXY', 'TSP', 'TSPxL', 'ViSION', 'VXT', 'WAF', 'WKS', 'x0r', 'YAWNiX', 'YIFY', 'YTS', 'PSA'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9', + 'Other': '10' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on FNP...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 250e9e851..d458c572d 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,12 +2,13 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON -from src.console import console +from src.console import console + class HP(): """ @@ -18,9 +19,6 @@ class HP(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'HP' @@ -30,46 +28,42 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -78,49 +72,49 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -129,44 +123,40 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HP...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] - + try: response = requests.get(url=self.search_url, params=params) response = response.json() @@ -175,8 +165,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index 0bd8c746d..8eabb871c 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -2,15 +2,17 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher -import distutils.util +from str2bool import str2bool import os import re import platform +import bencodepy +import cli_ui from src.trackers.COMMON import COMMON from src.console import console + class HUNO(): """ Edit for Tracker: @@ -25,54 +27,48 @@ def __init__(self, config): self.source_flag = 'HUNO' self.search_url = 'https://hawke.uno/api/torrents/filter' self.upload_url = 'https://hawke.uno/api/torrents/upload' - self.signature = "\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by HUNO's Upload Assistant[/url][/center]" - self.banned_groups = [""] + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.unit3d_edit_desc(meta, self.tracker, self.signature) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(distutils.util.strtobool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) is False: anon = 0 else: anon = 1 - # adding logic to check if its an encode or webrip and not HEVC as only HEVC encodes and webrips are allowed - if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): - console.print(f'[bold red]Only x265/HEVC encodes are allowed') - return - - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : await self.get_name(meta), - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : await self.is_plex_friendly(meta), - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': await self.get_name(meta), + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': await self.is_plex_friendly(meta), + 'sd': meta['sd'], + 'keywords': meta['keywords'], 'season_pack': meta.get('tv_pack', 0), # 'featured' : 0, # 'free' : 0, @@ -89,24 +85,24 @@ async def upload(self, meta): data['internal'] = 0 headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': tracker_config['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) # adding torrent link to comment of torrent file t_id = response.json()['data'].split(".")[1].split("/")[3] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hawke.uno/torrents/" + t_id) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -118,12 +114,46 @@ def get_audio(self, meta): if dual: language = "DUAL" - elif 'mediainfo' in meta: - language = next(x for x in meta["mediainfo"]["media"]["track"] if x["@type"] == "Audio").get('Language_String', "English") - language = re.sub(r'\(.+\)', '', language) + else: + if meta['is_disc'] == "BDMV": + summary_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt" + with open(summary_path, 'r', encoding='utf-8') as f: + summary_text = f.read() + + audio_tracks = re.findall(r'Audio:\s*(.+)', summary_text) + if audio_tracks: + first_audio = audio_tracks[0] + language_match = re.search(r'([A-Za-z]+)\s*/', first_audio) + if language_match: + language = language_match.group(1).strip() + else: + print("DEBUG: No language found in the first audio track.") + + else: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + audio_sections = re.findall(r'Audio\s+.*?(?=\n\n|Text|Menu|$)', media_info_text, re.DOTALL) + if audio_sections: + first_audio_section = audio_sections[0] + language_match = re.search(r'Language\s*:\s*(\w+.*)', first_audio_section) + + if language_match: + language = language_match.group(1).strip() + language = re.sub(r'\(.+\)', '', language) + else: + print("DEBUG: No Language match found in the first audio section.") + else: + print("DEBUG: No Audio sections found in MEDIAINFO.txt.") + + if language == "zxx": + language = "Silent" + elif not language: + language = cli_ui.ask_string('No audio language present, you must enter one:') return f'{codec} {channels} {language}' - + def get_basename(self, meta): path = next(iter(meta['filelist']), meta['path']) return os.path.basename(path) @@ -133,9 +163,10 @@ async def get_name(self, meta): # It was much easier to build the name from scratch than to alter the existing name. basename = self.get_basename(meta) - type = meta.get('type', "") - title = meta.get('title',"") - alt_title = meta.get('aka', "") + hc = meta.get('hardcoded-subs') + type = meta.get('type', "").upper() + title = meta.get('title', "") + alt_title = meta.get('aka', "") # noqa F841 year = meta.get('year', "") resolution = meta.get('resolution', "") audio = self.get_audio(meta) @@ -154,7 +185,7 @@ async def get_name(self, meta): hdr = meta.get('hdr', "") if not hdr.strip(): hdr = "SDR" - distributor = meta.get('distributor', "") + distributor = meta.get('distributor', "") # noqa F841 video_codec = meta.get('video_codec', "") video_encode = meta.get('video_encode', "").replace(".", "") if 'x265' in basename: @@ -168,63 +199,63 @@ async def get_name(self, meta): search_year = year scale = "DS4K" if "DS4K" in basename.upper() else "RM4K" if "RM4K" in basename.upper() else "" - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({year}) {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'DVD': name = f"{title} ({year}) {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({year}) {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux + elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} ({year}) {edition} (DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "ENCODE": #Encode + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} ({year}) {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" + elif type == "ENCODE": # Encode name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({year}) {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" if meta['is_disc'] == 'DVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux - name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "ENCODE": #Encode - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type == "REMUX" and source == "BluRay": # BluRay Remux + name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "ENCODE": # Encode + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" + if hc: + name = re.sub(r'((\([0-9]{4}\)))', r'\1 Ensubbed', name) return ' '.join(name.split()).replace(": ", " - ") - async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id - async def get_type_id(self, meta): - basename = self.get_basename(meta) - type = meta['type'] + type = meta.get('type').upper() + video_encode = meta.get('video_encode') if type == 'REMUX': return '2' elif type in ('WEBDL', 'WEBRIP'): - return '15' if 'x265' in basename else '3' + return '15' if 'x265' in video_encode else '3' elif type in ('ENCODE', 'HDTV'): return '15' elif type == 'DISC': @@ -232,43 +263,44 @@ async def get_type_id(self, meta): else: return '0' - async def get_res_id(self, resolution): resolution_id = { - 'Other':'10', + 'Other': '10', '4320p': '1', '2160p': '2', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - async def is_plex_friendly(self, meta): lossy_audio_codecs = ["AAC", "DD", "DD+", "OPUS"] - if any(l in meta["audio"] for l in lossy_audio_codecs): + if any(l in meta["audio"] for l in lossy_audio_codecs): # noqa E741 return 1 return 0 - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP" or meta['type'] == "DVDRIP"): + console.print('[bold red]Only x265/HEVC encodes are allowed') + meta['skipping'] = "HUNO" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on HUNO...") params = { - 'api_token' : self.config['TRACKERS']['HUNO']['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -282,8 +314,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 354b1be1a..4b1aae56a 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -19,12 +19,6 @@ class JPTV(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'JPTV' @@ -34,55 +28,47 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, meta): category_id = { - 'MOVIE': '1', - 'TV': '2', + 'MOVIE': '1', + 'TV': '2', }.get(meta['category'], '0') if meta['anime']: category_id = { - 'MOVIE': '7', - 'TV': '9', + 'MOVIE': '7', + 'TV': '9', }.get(meta['category'], '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '16', + 'DISC': '16', 'REMUX': '18', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - # DVDISO 17 - # DVDRIP 1 - # TS (Raw) 14 - # Re-encode 15 + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta) @@ -92,50 +78,50 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = "" for each in meta['discs']: mi_dump = mi_dump + each['summary'].strip() + "\n\n" else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() # bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : jptv_name, - 'description' : desc, - 'mediainfo' : mi_dump, - # 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': jptv_name, + 'description': desc, + 'mediainfo': mi_dump, + # 'bdinfo' : bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -144,38 +130,34 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on JPTV...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -190,21 +172,20 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes - async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') - source = meta.get('source') - is_disc = meta.get('is_disc') + source = meta.get('source') # noqa F841 + is_disc = meta.get('is_disc') # noqa F841 if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' @@ -217,4 +198,43 @@ async def edit_name(self, meta): name = name.replace(audio.strip().replace(" ", " "), audio.replace(" ", "")) name = name.replace("DD+ ", "DD+") - return name \ No newline at end of file + return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index 5c3f14309..463fe2ea6 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,15 +2,16 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console - class LCD(): """ Edit for Tracker: @@ -25,12 +26,12 @@ def __init__(self, config): self.source_flag = 'LOCADORA' self.search_url = 'https://locadora.cc/api/torrents/filter' self.torrent_url = 'https://locadora.cc/api/torrents/' - self.upload_url = 'https://locadora.cc/api/torrents/upload' - self.signature = f"\n[center]Criado usando L4G's Upload Assistant[/center]" + self.upload_url = 'https://locadora.cc/api/torrents/upload' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -40,46 +41,55 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -91,79 +101,72 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition, meta): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', 'ANIMES': '6' - }.get(category_name, '0') - if meta['anime'] == True and category_id == '2': + }.get(category_name, '0') + if meta['anime'] is True and category_id == '2': category_id = '6' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { -# '8640p':'10', - '4320p': '1', - '2160p': '2', -# '1440p' : '2', + # '8640p':'10', + '4320p': '1', + '2160p': '2', + # '1440p' : '2', '1080p': '3', - '1080i':'34', - '720p': '5', - '576p': '6', + '1080i': '34', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -177,15 +180,53 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]NĆ£o foi possivel buscar no tracker torrents duplicados. O tracker estĆ” offline ou sua api estĆ” incorreta') await asyncio.sleep(5) return dupes async def edit_name(self, meta): - - - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.264").replace("DD+7 1","DD+7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('-C A A','-C.A.A') - + + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), + return name + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 21368bd39..503ded332 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,9 +2,11 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -19,28 +21,28 @@ class LST(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LST' self.source_flag = 'LST.GG' self.upload_url = 'https://lst.gg/api/torrents/upload' self.search_url = 'https://lst.gg/api/torrents/filter' - self.signature = f"\n[center]Created by L4G's Upload Assistant[/center]" - self.banned_groups = [""] + self.torrent_url = 'https://lst.gg/api/torrents/' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', + 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', + 'x0r', 'YIFY', 'Sicario', 'RARBG', 'MeGusta', 'TSP', 'TSPxL', 'GalaxyTV', 'TGALAXY', 'TORRENTGALAXY', 'NaNi', + 'BONE', 'dAV1nci', 'iHYTECH', 'LAMA', 'Rifftrax', 'SasukeducK', 'ShAaNiG', 'WKS', 'YTS', 'HDT', 'FGT', + ['EVO', 'Raw Content Only'], + ] pass - + async def get_cat_id(self, category_name, keywords, service): category_id = { - 'MOVIE': '1', + 'MOVIE': '1', 'TV': '2', - 'Anime': '6', - }.get(category_name, '0') + 'Anime': '6', + }.get(category_name, '0') if category_name == 'TV' and 'anime' in keywords: category_id = '6' elif category_name == 'TV' and 'hentai' in service: @@ -49,93 +51,103 @@ async def get_cat_id(self, category_name, keywords, service): async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') + 'ENCODE': '3', + 'DVDRIP': '3' + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + modq = await self.get_flag(meta, 'modq') + draft = await self.get_flag(meta, 'draft') + name = await self.edit_name(meta) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() if meta.get('service') == "hentai": - desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + f"\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + f"\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc - + desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + 'mod_queue_opt_in': modq, + 'draft_queue_opt_in': draft, } - - + # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -144,38 +156,57 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() + async def edit_name(self, meta): + lst_name = meta['name'] + resolution = meta.get('resolution') + video_encode = meta.get('video_encode') + name_type = meta.get('type', "") + + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + lst_name = lst_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + lst_name = lst_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + else: + lst_name = lst_name.replace(f"{meta['source']}", f"{resolution}", 1) + lst_name = lst_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) + + return lst_name - + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 + return 1 if meta.get(flag_name, False) else 0 - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on LST...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -189,8 +220,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 2e06a0df2..0a698c165 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,9 +2,11 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console @@ -19,158 +21,187 @@ class LT(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LT' self.source_flag = 'Lat-Team "Poder Latino"' self.upload_url = 'https://lat-team.com/api/torrents/upload' self.search_url = 'https://lat-team.com/api/torrents/filter' - self.signature = '' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - - async def get_cat_id(self, category_name): + + async def get_cat_id(self, category_name, meta): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + 'ANIME': '5', + 'TELENOVELAS': '8', + 'Doramas & Turcas': '20', + }.get(category_name, '0') + # if is anime + if meta['anime'] is True and category_id == '2': + category_id = '5' + # elif is telenovela + elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): + category_id = '8' + # if is TURCAS o Doramas + # elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : + # category_id = '20' return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_name(self, meta): - lt_name = meta['name'] - lt_name = lt_name.replace('Dubbed', '').replace('Dual-Audio', '') - return lt_name + lt_name = meta['name'].replace('Dual-Audio', '').replace('Dubbed', '').replace(meta['aka'], '').replace(' ', ' ').strip() + if meta['type'] != 'DISC': # DISC don't have mediainfo + # Check if is HYBRID (Copied from BLU.py) + if 'hybrid' in meta.get('uuid').lower(): + if "repack" in meta.get('uuid').lower(): + lt_name = lt_name.replace('REPACK', 'Hybrid REPACK') + else: + lt_name = lt_name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") + # Check if original language is "es" if true replace title for AKA if available + if meta.get('original_language') == 'es' and meta.get('aka') != "": + lt_name = lt_name.replace(meta.get('title'), meta.get('aka').replace('AKA', '')).strip() + # Check if audio Spanish exists + # Get all the audios 'es-419' or 'es' + audios = [ + audio for audio in meta['mediainfo']['media']['track'][2:] + if audio.get('@type') == 'Audio' + and audio.get('Language') in {'es-419', 'es'} + and "commentary" not in str(audio.get('Title', '')).lower() + ] + if len(audios) > 0: # If there is at least 1 audio spanish + lt_name = lt_name + # if not audio Spanish exists, add "[SUBS]" + elif not meta.get('tag'): + lt_name = lt_name + " [SUBS]" + else: + lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### + return lt_name - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) + cat_id = await self.get_cat_id(meta['category'], meta) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) + # region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : lt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': lt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id + if distributor_id != 0: data['distributor_id'] = distributor_id if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') + data['season_number'] = int(meta.get('season_int', '0')) + data['episode_number'] = int(meta.get('episode_int', '0')) headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on LT...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -184,8 +215,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index d4de506d1..fc5aa09d3 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,9 +8,14 @@ import cli_ui import pickle import re -import distutils.util from pathlib import Path +from str2bool import str2bool from src.trackers.COMMON import COMMON +from datetime import datetime +import glob +import multiprocessing +from urllib.parse import urlparse + class MTV(): """ @@ -29,74 +34,141 @@ def __init__(self, config): self.forum_link = 'https://www.morethantv.me/wiki.php?action=article&id=73' self.search_url = 'https://www.morethantv.me/api/torznab' self.banned_groups = [ - '3LTON', 'mRS', 'CM8', 'BRrip', 'Leffe', 'aXXo', 'FRDS', 'XS', 'KiNGDOM', 'WAF', 'nHD', - 'h65', 'CrEwSaDe', 'TM', 'ViSiON', 'x0r', 'PandaRG', 'HD2DVD', 'iPlanet', 'JIVE', 'ELiTE', - 'nikt0', 'STUTTERSHIT', 'ION10', 'RARBG', 'FaNGDiNG0', 'YIFY', 'FUM', 'ViSION', 'NhaNc3', - 'nSD', 'PRODJi', 'DNL', 'DeadFish', 'HDTime', 'mHD', 'TERMiNAL', - '[Oj]', 'QxR', 'ZmN', 'RDN', 'mSD', 'LOAD', 'BDP', 'SANTi', 'ZKBL', ['EVO', 'WEB-DL Only'] + 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'iPlanet', + 'KiNGDOM', 'Leffe', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'PRODJi', 'RDN', 'SANTi', + 'STUTTERSHIT', 'TERMiNAL', 'ViSION', 'WAF', 'x0r', 'YIFY', ['EVO', 'WEB-DL Only'] ] pass - async def upload(self, meta): + def match_host(self, hostname, approved_hosts): + for approved_host in approved_hosts: + if hostname == approved_host or hostname.endswith(f".{approved_host}"): + return approved_host + return hostname + + async def upload(self, meta, disctype): common = COMMON(config=self.config) cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") + await self.upload_with_retry(meta, cookiefile, common) + + async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + if not os.path.exists(torrent_file_path): + torrent_filename = "BASE" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent" + torrent = Torrent.read(torrent_path) + + if torrent.piece_size > 8388608: + tracker_config = self.config['TRACKERS'].get(self.tracker, {}) + if str(tracker_config.get('skip_if_rehash', 'false')).lower() == "false": + console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + + meta['max_piece_size'] = '8' + if meta['is_disc']: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + new_torrent = prep.CustomTorrent( + meta=meta, + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + new_torrent.piece_size = 8 * 1024 * 1024 + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) + + torrent_filename = "MTV" + + else: + console.print("[red]Piece size is OVER 8M and skip_if_rehash enabled. Skipping upload.") + return + await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) + + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "imgbox.com": "imgbox", + } + + for image in meta['image_list']: + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + if meta['debug']: + if mapped_host in approved_image_hosts: + console.print(f"[green]URL '{raw_url}' is correctly matched to approved host '{mapped_host}'.") + else: + console.print(f"[red]URL '{raw_url}' is not recognized as part of an approved host.") + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta['image_list'] + ): + console.print("[green]Images are already hosted on an approved image host. Skipping re-upload.") + image_list = meta['image_list'] + + else: + images_reuploaded = False + while img_host_index <= len(approved_image_hosts): + image_list, retry_mode, images_reuploaded = await self.handle_image_upload(meta, img_host_index, approved_image_hosts) + + if retry_mode: + console.print(f"[yellow]Switching to the next image host. Current index: {img_host_index}") + img_host_index += 1 + continue + + new_images_key = 'mtv_images_key' + if image_list is not None: + image_list = meta[new_images_key] + break + + if image_list is None: + console.print("[red]All image hosts failed. Please check your configuration.") + return - torrent_filename = "BASE" - if not Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent").piece_size <= 8388608: - console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - prep.create_torrent(meta, Path(meta['path']), "MTV", piece_size_max=8) - torrent_filename = "MTV" - # Hash to f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) - - # getting category HD Episode, HD Movies, SD Season, HD Season, SD Episode, SD Movies cat_id = await self.get_cat_id(meta) - # res 480 720 1080 1440 2160 4k 6k Other resolution_id = await self.get_res_id(meta['resolution']) - # getting source HDTV SDTV TV Rip DVD DVD Rip VHS BluRay BDRip WebDL WebRip Mixed Unknown source_id = await self.get_source_id(meta) - # get Origin Internal Scene P2P User Mixed Other. P2P will be selected if not scene origin_id = await self.get_origin_id(meta) - # getting tags des_tags = await self.get_tags(meta) - # check for approved imghosts - approved_imghosts = ['ptpimg', 'imgbox', 'empornium', 'ibb'] - if not all(any(x in image['raw_url'] for x in approved_imghosts) for image in meta['image_list']): - console.print("[red]Unsupported image host detected, please use one of the approved imagehosts") - return - # getting description await self.edit_desc(meta) - # getting groups des so things like imdb link, tmdb link etc.. group_desc = await self.edit_group_desc(meta) - #poster is optional so no longer getting it as its a pain with having to use supported image provider - # poster = await self.get_poster(meta) - - #edit name to match MTV standards mtv_name = await self.edit_name(meta) - # anon - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: - anon = 0 - else: - anon = 1 + anon = 1 if meta['anon'] != 0 or bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) else 0 - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc = open(desc_path, 'r', encoding='utf-8').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as f: + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + with open(torrent_file_path, 'rb') as f: tfile = f.read() - f.close() - ## todo need to check the torrent and make sure its not more than 8MB - - # need to pass the name of the file along with the torrent files = { 'file_input': (f"{meta['name']}.torrent", tfile) } data = { - # 'image': poster, 'image': '', 'title': mtv_name, 'category': cat_id, @@ -116,24 +188,7 @@ async def upload(self, meta): 'submit': 'true', } - # cookie = {'sid': self.config['TRACKERS'][self.tracker].get('sid'), 'cid': self.config['TRACKERS'][self.tracker].get('cid')} - - param = { - } - - if meta['imdb_id'] not in ("0", "", None): - param['imdbID'] = "tt" + meta['imdb_id'] - if meta['tmdb'] != 0: - param['tmdbID'] = meta['tmdb'] - if meta['tvdb_id'] != 0: - param['thetvdbID'] = meta['tvdb_id'] - if meta['tvmaze_id'] != 0: - param['tvmazeID'] = meta['tvmaze_id'] - # if meta['mal_id'] != 0: - # param['malid'] = meta['mal_id'] - - - if meta['debug'] == False: + if not meta['debug']: with requests.Session() as session: with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) @@ -143,53 +198,182 @@ async def upload(self, meta): console.print(response.url) else: if "authkey.php" in response.url: - console.print(f"[red]No DL link in response, So unable to download torrent but It may have uploaded, go check") - print(response.content) - console.print(f"[red]Got response code = {response.status_code}") - print(data) + console.print("[red]No DL link in response, It may have uploaded, check manually.") else: - console.print(f"[red]Upload Failed, Doesnt look like you are logged in") - print(response.content) - print(data) - except: - console.print(f"[red]It may have uploaded, go check") - console.print(data) + console.print("[red]Upload Failed. Either you are not logged in......") + console.print("[red]You are hitting this site bug: https://www.morethantv.me/forum/thread/3338?") + console.print("[red]Or you hit some other error with the torrent upload.") + except Exception: + console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) return + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None, file=None): + if approved_image_hosts is None: + approved_image_hosts = ['ptpimg', 'imgbox', 'imgbb'] + + url_host_mapping = { + "ibb.co": "imgbb", + "ptpimg.me": "ptpimg", + "imgbox.com": "imgbox", + } + + retry_mode = False + images_reuploaded = False + new_images_key = 'mtv_images_key' + discs = meta.get('discs', []) # noqa F841 + filelist = meta.get('video', []) + filename = meta['filename'] + path = meta['path'] + + if isinstance(filelist, str): + filelist = [filelist] + + multi_screens = int(self.config['DEFAULT'].get('screens', 6)) + base_dir = meta['base_dir'] + folder_id = meta['uuid'] + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + meta[new_images_key] = [] + + screenshots_dir = os.path.join(base_dir, 'tmp', folder_id) + all_screenshots = [] + + for i, file in enumerate(filelist): + filename_pattern = f"{filename}*.png" + + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + if len(existing_screens) < multi_screens: + if meta.get('debug'): + console.print("[yellow]The image host of existing images is not supported.") + console.print(f"[yellow]Insufficient screenshots found: generating {multi_screens} screenshots.") + + if meta['is_disc'] == "BDMV": + s = multiprocessing.Process( + target=prep.disc_screenshots, + args=(f"FILE_{img_host_index}", meta['bdinfo'], folder_id, base_dir, + meta.get('vapoursynth', False), [], meta.get('ffdebug', False), img_host_index) + ) + elif meta['is_disc'] == "DVD": + s = multiprocessing.Process( + target=prep.dvd_screenshots, + args=(meta, 0, None, True) + ) + else: + s = multiprocessing.Process( + target=prep.screenshots, + args=(path, f"{filename}", meta['uuid'], base_dir, + meta, multi_screens + 1, True, None) + ) + + s.start() + while s.is_alive(): + await asyncio.sleep(1) + + if meta['is_disc'] == "DVD": + existing_screens = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][0]['name']}-*.png") + else: + existing_screens = glob.glob(os.path.join(screenshots_dir, filename_pattern)) + + all_screenshots.extend(existing_screens) + + if not all_screenshots: + console.print("[red]No screenshots were generated or found. Please check the screenshot generation process.") + return [], True, images_reuploaded + + uploaded_images = [] + while True: + current_img_host_key = f'img_host_{img_host_index}' + current_img_host = self.config.get('DEFAULT', {}).get(current_img_host_key) + + if not current_img_host: + console.print("[red]No more image hosts left to try.") + raise Exception("No valid image host found in the config.") + + if current_img_host not in approved_image_hosts: + console.print(f"[red]Your preferred image host '{current_img_host}' is not supported at MTV, trying next host.") + retry_mode = True + images_reuploaded = True + img_host_index += 1 + continue + else: + meta['imghost'] = current_img_host + console.print(f"[green]Uploading to approved host '{current_img_host}'.") + break + + uploaded_images, _ = prep.upload_screens( + meta, multi_screens, img_host_index, 0, multi_screens, + all_screenshots, {new_images_key: meta[new_images_key]}, retry_mode + ) + + if uploaded_images: + meta[new_images_key] = uploaded_images + + if meta['debug']: + for image in uploaded_images: + console.print(f"[debug] Response in upload_image_task: {image['img_url']}, {image['raw_url']}, {image['web_url']}") + + for image in meta.get(new_images_key, []): + raw_url = image['raw_url'] + parsed_url = urlparse(raw_url) + hostname = parsed_url.netloc + mapped_host = self.match_host(hostname, url_host_mapping.keys()) + mapped_host = url_host_mapping.get(mapped_host, mapped_host) + + if mapped_host not in approved_image_hosts: + console.print(f"[red]Unsupported image host detected in URL '{raw_url}'. Please use one of the approved image hosts.") + return meta[new_images_key], True, images_reuploaded # Trigger retry_mode if switching hosts + + if all( + url_host_mapping.get( + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + self.match_host(urlparse(image['raw_url']).netloc, url_host_mapping.keys()), + ) in approved_image_hosts + for image in meta[new_images_key] + ): + + return meta[new_images_key], False, images_reuploaded async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: - # adding bd_dump to description if it exits and adding empty string to mediainfo - if meta['bdinfo'] != None: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()[:-65].strip() bd_dump = None + if bd_dump: desc.write("[mediainfo]" + bd_dump + "[/mediainfo]\n\n") elif mi_dump: desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") - images = meta['image_list'] + + if 'mtv_images_key' in meta: + images = meta['mtv_images_key'] + else: + images = meta['image_list'] if len(images) > 0: - desc.write(f"[spoiler=Screenshots]") - for each in range(len(images)): - raw_url = images[each]['raw_url'] - img_url = images[each]['img_url'] + for image in images: + raw_url = image['raw_url'] + img_url = image['img_url'] desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") - desc.write(f"[/spoiler]") + desc.write(f"\n\n{base}") desc.close() return async def edit_group_desc(self, meta): description = "" - if meta['imdb_id'] not in ("0", "", None): + if meta['imdb_id'] not in ("0", "", None): description += f"https://www.imdb.com/title/tt{meta['imdb_id']}" if meta['tmdb'] != 0: description += f"\nhttps://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}" @@ -202,13 +386,12 @@ async def edit_group_desc(self, meta): return description - async def edit_name(self, meta): - mtv_name = meta['uuid'] - # Try to use original filename if possible - if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): - if not meta['isdir']: - mtv_name = os.path.splitext(mtv_name)[0] + if meta['scene'] is True: + if meta.get('scene_name') != "": + mtv_name = meta.get('scene_name') + else: + mtv_name = meta['uuid'] else: mtv_name = meta['name'] if meta.get('type') in ('WEBDL', 'WEBRIP', 'ENCODE') and "DD" in meta['audio']: @@ -219,69 +402,34 @@ async def edit_name(self, meta): if 'DD+' in meta.get('audio', '') and 'DDP' in meta['uuid']: mtv_name = mtv_name.replace('DD+', 'DDP') mtv_name = mtv_name.replace('Dubbed', '').replace('Dual-Audio', 'DUAL') + if meta['source'].lower().replace('-', '') in mtv_name.replace('-', '').lower(): + if not meta['isdir']: + # Check if there is a valid file extension, otherwise, skip the split + if '.' in mtv_name and mtv_name.split('.')[-1].isalpha() and len(mtv_name.split('.')[-1]) <= 4: + mtv_name = os.path.splitext(mtv_name)[0] # Add -NoGrp if missing tag if meta['tag'] == "": mtv_name = f"{mtv_name}-NoGrp" mtv_name = ' '.join(mtv_name.split()) - mtv_name = re.sub("[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", mtv_name) + mtv_name = re.sub(r"[^0-9a-zA-Zƀ-Ćæ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') + console.print(f"[yellow]Sent this name: {mtv_name}[/yellow]") return mtv_name - - - # Not needed as its optional - # async def get_poster(self, meta): - # if 'poster_image' in meta: - # return meta['poster_image'] - # else: - # if meta['poster'] is not None: - # poster = meta['poster'] - # else: - # if 'cover' in meta['imdb_info'] and meta['imdb_info']['cover'] is not None: - # poster = meta['imdb_info']['cover'] - # else: - # console.print(f'[red]No poster can be found for this EXITING!!') - # return - # with requests.get(url=poster, stream=True) as r: - # with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['clean_name']}-poster.jpg", - # 'wb') as f: - # shutil.copyfileobj(r.raw, f) - # - # url = "https://api.imgbb.com/1/upload" - # data = { - # 'key': self.config['DEFAULT']['imgbb_api'], - # 'image': base64.b64encode(open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['clean_name']}-poster.jpg", "rb").read()).decode('utf8') - # } - # try: - # console.print("[yellow]uploading poster to imgbb") - # response = requests.post(url, data=data) - # response = response.json() - # if response.get('success') != True: - # console.print(response, 'red') - # img_url = response['data'].get('medium', response['data']['image'])['url'] - # th_url = response['data']['thumb']['url'] - # web_url = response['data']['url_viewer'] - # raw_url = response['data']['image']['url'] - # meta['poster_image'] = raw_url - # console.print(f'[green]{raw_url} ') - # except Exception: - # console.print("[yellow]imgbb failed to upload cover") - # - # return raw_url async def get_res_id(self, resolution): resolution_id = { - '8640p':'0', + '8640p': '0', '4320p': '4000', '2160p': '2160', - '1440p' : '1440', + '1440p': '1440', '1080p': '1080', - '1080i':'1080', + '1080i': '1080', '720p': '720', '576p': '0', '576i': '0', '480p': '480', '480i': '480' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_cat_id(self, meta): @@ -302,7 +450,6 @@ async def get_cat_id(self, meta): else: return 3 - async def get_source_id(self, meta): if meta['is_disc'] == 'DVD': return '1' @@ -323,10 +470,9 @@ async def get_source_id(self, meta): 'MIXED': '11', 'Unknown': '12', 'ENCODE': '7' - }.get(meta['type'], '0') + }.get(meta['type'], '0') return type_id - async def get_origin_id(self, meta): if meta['personalrelease']: return '4' @@ -336,11 +482,12 @@ async def get_origin_id(self, meta): else: return '3' - async def get_tags(self, meta): tags = [] # Genres - tags.extend([x.strip().lower() for x in meta['genres'].split()]) + # MTV takes issue with some of the pulled TMDB tags, and I'm not hand checking and attempting + # to regex however many tags need changing, so they're just geting skipped + # tags.extend([x.strip(', ').lower().replace(' ', '.') for x in meta['genres'].split(',')]) # Resolution tags.append(meta['resolution'].lower()) if meta['sd'] == 1: @@ -350,37 +497,37 @@ async def get_tags(self, meta): else: tags.append('hd') # Streaming Service + # disney+ should be disneyplus, assume every other service is same. + # If I'm wrong, then they can either allowing editing tags or service will just get skipped also if str(meta['service_longname']) != "": - tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") + service_name = meta['service_longname'].lower().replace(' ', '.') + service_name = service_name.replace('+', 'plus') # Replace '+' with 'plus' + tags.append(f"{service_name}.source") # Release Type/Source for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): tags.append(each) - - # series tags if meta['category'] == "TV": if meta.get('tv_pack', 0) == 0: # Episodes if meta['sd'] == 1: - tags.extend(['episode.release', 'sd.episode']) + tags.extend(['sd.episode']) else: - tags.extend(['episode.release', 'hd.episode']) + tags.extend(['hd.episode']) else: # Seasons if meta['sd'] == 1: tags.append('sd.season') else: tags.append('hd.season') - + # movie tags if meta['category'] == 'MOVIE': if meta['sd'] == 1: tags.append('sd.movie') else: tags.append('hd.movie') - - # Audio tags audio_tag = "" @@ -417,17 +564,15 @@ async def get_tags(self, meta): tags = ' '.join(tags) return tags - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/MTV.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -436,14 +581,14 @@ async def validate_credentials(self, meta): else: return False vapi = await self.validate_api() - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your API key is valid.') return True async def validate_api(self): url = self.search_url params = { - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } try: r = requests.get(url, params=params) @@ -452,7 +597,7 @@ async def validate_api(self): console.print("[red]Invalid API Key") return False return True - except: + except Exception: return False async def validate_cookies(self, meta, cookiefile): @@ -487,12 +632,12 @@ async def login(self, cookiefile): with requests.Session() as session: url = 'https://www.morethantv.me/login' payload = { - 'username' : self.config['TRACKERS'][self.tracker].get('username'), - 'password' : self.config['TRACKERS'][self.tracker].get('password'), - 'keeploggedin' : 1, - 'cinfo' : '1920|1080|24|0', - 'submit' : 'login', - 'iplocked' : 1, + 'username': self.config['TRACKERS'][self.tracker].get('username'), + 'password': self.config['TRACKERS'][self.tracker].get('password'), + 'keeploggedin': 1, + 'cinfo': '1920|1080|24|0', + 'submit': 'login', + 'iplocked': 1, # 'ssl' : 'yes' } res = session.get(url="https://www.morethantv.me/login") @@ -509,11 +654,11 @@ async def login(self, cookiefile): mfa_code = pyotp.parse_uri(otp_uri).now() else: mfa_code = console.input('[yellow]MTV 2FA Code: ') - + two_factor_payload = { - 'token' : resp.text.rsplit('name="token" value="', 1)[1][:48], - 'code' : mfa_code, - 'submit' : 'login' + 'token': resp.text.rsplit('name="token" value="', 1)[1][:48], + 'code': mfa_code, + 'submit': 'login' } resp = session.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) # checking if logged in @@ -527,13 +672,13 @@ async def login(self, cookiefile): console.print(resp.url) return - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on MTV...") params = { - 't' : 'search', - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'q' : "" + 't': 'search', + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'q': "" } if meta['imdb_id'] not in ("0", "", None): params['imdbid'] = "tt" + meta['imdb_id'] @@ -557,9 +702,9 @@ async def search_existing(self, meta): console.print(f"[yellow]{rr.get('status_message')}") await asyncio.sleep(5) else: - console.print(f"[red]Site Seems to be down or not responding to API") - except: - console.print(f"[red]Unable to search for existing torrents on site. Most likely the site is down.") + console.print("[red]Site Seems to be down or not responding to API") + except Exception: + console.print("[red]Unable to search for existing torrents on site. Most likely the site is down.") dupes.append("FAILED SEARCH") print(traceback.print_exc()) await asyncio.sleep(5) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 489c21902..0f58e2fb7 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,7 @@ # import discord import asyncio import requests -import distutils.util -import os -from guessit import guessit +from guessit import guessit from src.trackers.COMMON import COMMON from src.console import console @@ -18,13 +16,6 @@ class NBL(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'NBL' @@ -32,9 +23,14 @@ def __init__(self, config): self.upload_url = 'https://nebulance.io/upload.php' self.search_url = 'https://nebulance.io/api.php' self.api_key = self.config['TRACKERS'][self.tracker]['api_key'].strip() - self.banned_groups = ['0neshot', '3LTON', '4yEo', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime', 'Time', 'AnimeRG', 'AniURL', 'ASW', 'BakedFish', 'bonkai77', 'Cleo', 'DeadFish', 'DeeJayAhmed', 'ELiTE', 'EMBER', 'eSc', 'FGT', 'FUM', 'GERMini', 'HAiKU', 'Hi10', 'ION10', 'JacobSwaggedUp', 'JIVE', 'Judas', 'LOAD', 'MeGusta', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NhaNc3', 'NOIVTC', 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = ['0neshot', '3LTON', '4yEo', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'ASW', 'BakedFish', + 'bonkai77', 'Cleo', 'DeadFish', 'DeeJayAhmed', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'FGT', 'FUM', 'GERMini', 'HAiKU', 'Hi10', 'ION10', + 'JacobSwaggedUp', 'JIVE', 'Judas', 'LOAD', 'MeGusta', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NhaNc3', 'NOIVTC', + 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', + 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', + 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + pass - async def get_cat_id(self, meta): if meta.get('tv_pack', 0) == 1: @@ -43,35 +39,29 @@ async def get_cat_id(self, meta): cat_id = 1 return cat_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### async def edit_desc(self, meta): # Leave this in so manual works return - async def upload(self, meta): - if meta['category'] != 'TV': - console.print("[red]Only TV Is allowed at NBL") - return + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read()[:-65].strip() + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read()[:-65].strip() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file_input': open_torrent} data = { - 'api_key' : self.api_key, - 'tvmazeid' : int(meta.get('tvmaze_id', 0)), - 'mediainfo' : mi_dump, - 'category' : await self.get_cat_id(meta), - 'ignoredupes' : 'on' + 'api_key': self.api_key, + 'tvmazeid': int(meta.get('tvmaze_id', 0)), + 'mediainfo': mi_dump, + 'category': await self.get_cat_id(meta), + 'ignoredupes': 'on' } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data) try: if response.ok: @@ -80,34 +70,38 @@ async def upload(self, meta): else: console.print(response) console.print(response.text) - except: + except Exception: console.print_exception() console.print("[bold yellow]It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + if meta['category'] != 'TV': + console.print("[red]Only TV Is allowed at NBL") + meta['skipping'] = "NBL" + return + if meta.get('is_disc') is not None: + console.print('[bold red]This site does not allow raw discs') + meta['skipping'] = "NBL" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on NBL...") if int(meta.get('tvmaze_id', 0)) != 0: - search_term = {'tvmaze' : int(meta['tvmaze_id'])} + search_term = {'tvmaze': int(meta['tvmaze_id'])} elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: - search_term = {'imdb' : meta.get('imdb_id', '0').replace('tt', '')} + search_term = {'imdb': meta.get('imdb_id', '0').replace('tt', '')} else: - search_term = {'series' : meta['title']} + search_term = {'series': meta['title']} json = { - 'jsonrpc' : '2.0', - 'id' : 1, - 'method' : 'getTorrents', - 'params' : [ - self.api_key, + 'jsonrpc': '2.0', + 'id': 1, + 'method': 'getTorrents', + 'params': [ + self.api_key, search_term ] } @@ -137,4 +131,4 @@ async def search_existing(self, meta): except Exception: console.print_exception() - return dupes \ No newline at end of file + return dupes diff --git a/src/trackers/OE.py b/src/trackers/OE.py index bb69a3e02..91ac0b6f2 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -2,14 +2,16 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher -import distutils.util -import json -import os +from str2bool import str2bool import platform - +import re +import os +import cli_ui +from src.bbcode import BBCODE from src.trackers.COMMON import COMMON from src.console import console +import bencodepy + class OE(): """ @@ -25,104 +27,175 @@ def __init__(self, config): self.source_flag = 'OE' self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://onlyencodes.cc/pages/1]OnlyEncodes Uploader - Powered by L4G's Upload Assistant[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.torrent_url = 'https://onlyencodes.cc/api/torrents/' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [ + '0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', + 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', + 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DE3PM', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', + 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', + 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', + 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', + 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', + 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', + 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', + 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', + 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'URANiME', 'UTR', + 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', + 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT' + ] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + await self.edit_desc(meta, self.tracker, self.signature) cat_id = await self.get_cat_id(meta['category']) + if meta.get('type') == "DVDRIP": + meta['type'] = "ENCODE" type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : oe_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': oe_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): oe_name = meta.get('name') + media_info_tracks = meta.get('media_info_tracks', []) # noqa #F841 + resolution = meta.get('resolution') + video_encode = meta.get('video_encode') + name_type = meta.get('type', "") + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + + if name_type == "DVDRIP": + if meta.get('category') == "MOVIE": + oe_name = oe_name.replace(f"{meta['source']}{meta['video_encode']}", f"{resolution}", 1) + oe_name = oe_name.replace((meta['audio']), f"{meta['audio']}{video_encode}", 1) + else: + oe_name = oe_name.replace(f"{meta['source']}", f"{resolution}", 1) + oe_name = oe_name.replace(f"{meta['video_codec']}", f"{meta['audio']} {meta['video_codec']}", 1) + + if not meta['is_disc']: + def has_english_audio(media_info_text=None): + if media_info_text: + audio_section = re.findall(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + for i, language in enumerate(audio_section): + language = language.lower().strip() + if language.lower().startswith('en'): # Check if it's English + return True + return False + + def get_audio_lang(media_info_text=None): + if media_info_text: + match = re.search(r'Audio[\s\S]+?Language\s+:\s+(\w+)', media_info_text) + if match: + return match.group(1).upper() + return "" + + try: + media_info_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + with open(media_info_path, 'r', encoding='utf-8') as f: + media_info_text = f.read() + + if not has_english_audio(media_info_text=media_info_text): + audio_lang = get_audio_lang(media_info_text=media_info_text) + if audio_lang: + oe_name = oe_name.replace(meta['resolution'], f"{audio_lang} {meta['resolution']}", 1) + except (FileNotFoundError, KeyError) as e: + print(f"Error processing MEDIAINFO.txt: {e}") + + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + oe_name = re.sub(f"-{invalid_tag}", "", oe_name, flags=re.IGNORECASE) + oe_name = f"{oe_name}-NOGRP" + return oe_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, video_codec, category): type_id = { - 'DISC': '19', + 'DISC': '19', 'REMUX': '20', 'WEBDL': '21', - }.get(type, '0') - if type == "WEBRIP": + }.get(type, '0') + if type == "WEBRIP": if video_codec == "HEVC": # x265 Encode type_id = '10' @@ -146,34 +219,116 @@ async def get_type_id(self, type, tv_pack, video_codec, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id + async def edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: + if desc_header != "": + descfile.write(desc_header) - + if not meta['is_disc']: + def process_languages(tracks): + audio_languages = [] + subtitle_languages = [] + for track in tracks: + if track.get('@type') == 'Audio': + language = track.get('Language') + if not language or language is None: + audio_lang = cli_ui.ask_string('No audio language present, you must enter one:') + if audio_lang: + audio_languages.append(audio_lang) + else: + audio_languages.append("") + if track.get('@type') == 'Text': + language = track.get('Language') + if not language or language is None: + subtitle_lang = cli_ui.ask_string('No subtitle language present, you must enter one:') + if subtitle_lang: + subtitle_languages.append(subtitle_lang) + else: + subtitle_languages.append("") - async def search_existing(self, meta): + return audio_languages, subtitle_languages + + media_data = meta.get('mediainfo', {}) + if media_data: + tracks = media_data.get('media', {}).get('track', []) + if tracks: + audio_languages, subtitle_languages = process_languages(tracks) + if audio_languages: + descfile.write(f"Audio Language: {', '.join(audio_languages)}\n") + + subtitle_tracks = [track for track in tracks if track.get('@type') == 'Text'] + if subtitle_tracks and subtitle_languages: + descfile.write(f"Subtitle Language: {', '.join(subtitle_languages)}\n") + else: + console.print("[red]No media information available in meta.[/red]") + + bbcode = BBCODE() + if meta.get('discs', []) != []: + discs = meta['discs'] + if discs[0]['type'] == "DVD": + descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n\n") + if len(discs) >= 2: + for each in discs[1:]: + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n\n") + elif each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n\n") + elif each['type'] == "HDDVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['largest_evo'])}][code][{each['evo_mi']}[/code][/spoiler]\n\n") + + desc = base + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + desc = bbcode.convert_comparison_to_collapse(desc, 1000) + + desc = desc.replace('[img]', '[img=300]') + descfile.write(desc) + images = meta['image_list'] + if len(images) > 0: + descfile.write("[center]") + for each in range(len(images[:int(meta['screens'])])): + web_url = images[each]['web_url'] + raw_url = images[each]['raw_url'] + descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") + descfile.write("[/center]") + + if signature is not None: + descfile.write(signature) + return + + async def search_existing(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + meta['skipping'] = "OE" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on OE...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -185,8 +340,47 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py new file mode 100644 index 000000000..db4457614 --- /dev/null +++ b/src/trackers/OTW.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class OTW(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'OTW' + self.source_flag = 'OLD' + self.upload_url = 'https://oldtoons.world/api/torrents/upload' + self.search_url = 'https://oldtoons.world/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on OTW...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/PSS.py b/src/trackers/PSS.py new file mode 100644 index 000000000..c4abe60f4 --- /dev/null +++ b/src/trackers/PSS.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import platform +from str2bool import str2bool +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class PSS(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'PSS' + self.source_flag = 'PSS' + self.upload_url = 'https://privatesilverscreen.cc/api/torrents/upload' + self.search_url = 'https://privatesilverscreen.cc/api/torrents/filter' + self.signature = '\n[center][url=https://privatesilverscreen.cc/pages/1]Please Seed[/url][/center]' + self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'NeXus', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', + 'QxR', 'Ralphy', 'RARBG', 'SAMPA', 'Sicario', 'Silence', 'STUTTERSHIT', 'Tigole', 'TSP', 'TSPxL', 'Will1869', 'x0r', 'YIFY', 'core', 'ZMNT', + 'msd', 'nikt0', 'aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'DNL', 'FaNGDiNG0', 'FRDS', 'HD2DVD', 'HDTime', 'Leffe', 'mHD', 'mSD', 'nHD', 'nSD', 'NhaNc3', 'PRODJi', + 'RDN', 'SANTi', 'ViSION', 'WAF', 'YTS', 'FROZEN', 'UTR', 'Grym', 'GrymLegacy', 'CK4', 'ProRes', 'MezRips', 'GalaxyRG', 'RCDiVX', 'LycanHD'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'ENCODE': '3', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on PSS...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index b9fcecfa0..88fea3a80 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -1,19 +1,16 @@ from bs4 import BeautifulSoup import requests -import asyncio import re import os from pathlib import Path -import traceback import json import glob -import distutils.util -import cli_ui +from str2bool import str2bool import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa E403 from src.console import console @@ -23,23 +20,23 @@ def __init__(self, config): self.config = config self.tracker = 'PTER' self.source_flag = 'PTER' - self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() + self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() self.username = config['TRACKERS']['PTER'].get('username', '').strip() self.password = config['TRACKERS']['PTER'].get('password', '').strip() self.rehost_images = config['TRACKERS']['PTER'].get('img_rehost', False) self.ptgen_api = config['TRACKERS']['PTER'].get('ptgen_api').strip() - self.ptgen_retry=3 + self.ptgen_retry = 3 self.signature = None self.banned_groups = [""] async def validate_credentials(self, meta): vcookie = await self.validate_cookies(meta) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True - + async def validate_cookies(self, meta): common = COMMON(config=self.config) url = "https://pterclub.com" @@ -48,7 +45,7 @@ async def validate_cookies(self, meta): with requests.Session() as session: session.cookies.update(await common.parseCookieFile(cookiefile)) resp = session.get(url=url) - + if meta['debug']: console.print('[cyan]Cookies:') console.print(session.cookies.get_dict()) @@ -61,8 +58,8 @@ async def validate_cookies(self, meta): else: console.print("[bold red]Missing Cookie File. (data/cookies/PTER.txt)") return False - - async def search_existing(self, meta): + + async def search_existing(self, meta, disctype): dupes = [] common = COMMON(config=self.config) cookiefile = f"{meta['base_dir']}/data/cookies/PTER.txt" @@ -79,9 +76,9 @@ async def search_existing(self, meta): soup = BeautifulSoup(r.text, 'lxml') rows = soup.select('table.torrents > tr:has(table.torrentname)') for row in rows: - text=row.select_one('a[href^="details.php?id="]') - if text != None: - release=text.attrs['title'] + text = row.select_one('a[href^="details.php?id="]') + if text is not None: + release = text.attrs['title'] if release: dupes.append(release) else: @@ -91,27 +88,27 @@ async def search_existing(self, meta): async def get_type_category_id(self, meta): cat_id = "EXIT" - + if meta['category'] == 'MOVIE': cat_id = 401 - + if meta['category'] == 'TV': cat_id = 404 - + if 'documentary' in meta.get("genres", "").lower() or 'documentary' in meta.get("keywords", "").lower(): cat_id = 402 - + if 'Animation' in meta.get("genres", "").lower() or 'Animation' in meta.get("keywords", "").lower(): cat_id = 403 - + return cat_id - + async def get_area_id(self, meta): - - area_id=8 - area_map = { #To do + + area_id = 8 + area_map = { # To do "äø­å›½å¤§é™†": 1, "äø­å›½é¦™ęøÆ": 2, "äø­å›½å°ę¹¾": 3, "ē¾Žå›½": 4, "ę—„ęœ¬": 6, "韩国": 5, - "印åŗ¦": 7, "ę³•å›½": 4, "ę„å¤§åˆ©": 4, "德国": 4, "č„æē­ē‰™": 4, "葔萄ē‰™": 4, + "印åŗ¦": 7, "ę³•å›½": 4, "ę„å¤§åˆ©": 4, "德国": 4, "č„æē­ē‰™": 4, "葔萄ē‰™": 4, "č‹±å›½": 4, "é˜æę ¹å»·": 8, "ę¾³å¤§åˆ©äŗš": 4, "ęÆ”åˆ©ę—¶": 4, "å·“č„æ": 8, "加ę‹æ大": 4, "ē‘žå£«": 4, "ę™ŗ利": 8, } @@ -120,25 +117,23 @@ async def get_area_id(self, meta): if area in regions: return area_map[area] return area_id - - async def get_type_medium_id(self, meta): medium_id = "EXIT" # 1 = UHD Discs if meta.get('is_disc', '') in ("BDMV", "HD DVD"): - if meta['resolution']=='2160p': + if meta['resolution'] == '2160p': medium_id = 1 else: - medium_id = 2 #BD Discs - + medium_id = 2 # BD Discs + if meta.get('is_disc', '') == "DVD": - medium_id = 7 - + medium_id = 7 + # 4 = HDTV if meta.get('type', '') == "HDTV": medium_id = 4 - + # 6 = Encode if meta.get('type', '') in ("ENCODE", "WEBRIP"): medium_id = 6 @@ -154,8 +149,8 @@ async def get_type_medium_id(self, meta): return medium_id async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON common = COMMON(config=self.config) @@ -163,9 +158,8 @@ async def edit_desc(self, meta): if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: ptgen = await common.ptgen(meta, self.ptgen_api, self.ptgen_retry) if ptgen.strip() != '': - descfile.write(ptgen) + descfile.write(ptgen) - bbcode = BBCODE() if meta.get('discs', []) != []: discs = meta['discs'] @@ -187,35 +181,35 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - - if self.rehost_images == True: + + if self.rehost_images is True: console.print("[green]Rehosting Images...") images = await self.pterimg_upload(meta) - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") - descfile.write("[/center]") + descfile.write("[/center]") else: images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - - if self.signature != None: + + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() - async def get_auth_token(self,meta): + async def get_auth_token(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): Path(f"{meta['base_dir']}/data/cookies").mkdir(parents=True, exist_ok=True) cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" @@ -228,23 +222,23 @@ async def get_auth_token(self,meta): loggedIn = await self.validate_login(r) else: console.print("[yellow]Pterimg Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: auth_token = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] else: data = { - 'login-subject': self.username, - 'password': self.password, + 'login-subject': self.username, + 'password': self.password, 'keep-login': 1 } r = session.get("https://s3.pterclub.com") data['auth_token'] = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] - loginresponse = session.post(url='https://s3.pterclub.com/login',data=data) + loginresponse = session.post(url='https://s3.pterclub.com/login', data=data) if not loginresponse.ok: - raise LoginException("Failed to login to Pterimg. ") + raise LoginException("Failed to login to Pterimg. ") # noqa #F405 auth_token = re.search(r'auth_token = *?\"(\w+)\"', loginresponse.text).groups()[0] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) - + return auth_token async def validate_login(self, response): @@ -256,14 +250,14 @@ async def validate_login(self, response): async def pterimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") - url='https://s3.pterclub.com' - image_list=[] + url = 'https://s3.pterclub.com' + image_list = [] data = { 'type': 'file', - 'action': 'upload', - 'nsfw': 0, + 'action': 'upload', + 'nsfw': 0, 'auth_token': await self.get_auth_token(meta) - } + } cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" with requests.Session() as session: if os.path.exists(cookiefile): @@ -278,17 +272,17 @@ async def pterimg_upload(self, meta): except json.decoder.JSONDecodeError: res = {} if not req.ok: - if res['error']['message'] in ('重复äøŠä¼ ','Duplicated upload'): + if res['error']['message'] in ('重复äøŠä¼ ', 'Duplicated upload'): continue - raise(f'HTTP {req.status_code}, reason: {res["error"]["message"]}') + raise (f'HTTP {req.status_code}, reason: {res["error"]["message"]}') image_dict = {} image_dict['web_url'] = res['image']['url'] image_dict['img_url'] = res['image']['url'] - image_list.append(image_dict) + image_list.append(image_dict) return image_list async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' @@ -304,11 +298,11 @@ async def edit_name(self, meta): pter_name = pter_name.replace(meta["aka"], '') pter_name = pter_name.replace('PQ10', 'HDR') - if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) == True: + if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) is True: pter_name = pter_name.replace('H.264', 'x264') return pter_name - + async def is_zhongzi(self, meta): if meta.get('is_disc', '') != 'BDMV': mi = meta['mediainfo'] @@ -316,51 +310,51 @@ async def is_zhongzi(self, meta): if track['@type'] == "Text": language = track.get('Language') if language == "zh": - return 'yes' + return 'yes' else: for language in meta['bdinfo']['subtitles']: if language == "Chinese": - return 'yes' + return 'yes' return None - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - desc_file=f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" if not os.path.exists(desc_file): await self.edit_desc(meta) - + pter_name = await self.edit_name(meta) - - if meta['bdinfo'] != None: + + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') pter_desc = open(desc_file, 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - + with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), } - #use chinese small_descr + # use chinese small_descr if meta['ptgen']["trans_title"] != ['']: - small_descr='' + small_descr = '' for title_ in meta['ptgen']["trans_title"]: - small_descr+=f'{title_} / ' - small_descr+="| ē±»åˆ«:"+meta['ptgen']["genre"][0] - small_descr=small_descr.replace('/ |','|') + small_descr += f'{title_} / ' + small_descr += "| ē±»åˆ«:" + meta['ptgen']["genre"][0] + small_descr = small_descr.replace('/ |', '|') else: - small_descr=meta['title'] - data= { + small_descr = meta['title'] + data = { "name": pter_name, "small_descr": small_descr, "descr": pter_desc, @@ -371,11 +365,11 @@ async def upload(self, meta): "zhongzi": await self.is_zhongzi(meta) } - if meta.get('personalrelease', False) == True: - data["pr"] = "yes" + if meta.get('personalrelease', False) is True: + data["pr"] = "yes" url = "https://pterclub.com/takeupload.php" - + # Submit if meta['debug']: console.print(url) @@ -388,15 +382,15 @@ async def upload(self, meta): up = session.post(url=url, data=data, files=files) torrentFile.close() mi_dump.close() - + if up.url.startswith("https://pterclub.com/details.php?id="): - console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1','')}[/yellow][/green]") + console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1', '')}[/yellow][/green]") id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(id, torrent_path) else: console.print(data) console.print("\n\n") - raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return async def download_new_torrent(self, id, torrent_path): @@ -408,6 +402,3 @@ async def download_new_torrent(self, id, torrent_path): else: console.print("[red]There was an issue downloading the new .torrent from pter") console.print(r.text) - - - \ No newline at end of file diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 01ec975fd..41865687a 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -2,24 +2,22 @@ import requests import asyncio import re -import distutils.util import os from pathlib import Path -import time -import traceback +from str2bool import str2bool import json import glob import multiprocessing import platform import pickle +import click from pymediainfo import MediaInfo - - from src.trackers.COMMON import COMMON from src.bbcode import BBCODE -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console - +from torf import Torrent +from datetime import datetime class PTP(): @@ -30,121 +28,139 @@ def __init__(self, config): self.source_flag = 'PTP' self.api_user = config['TRACKERS']['PTP'].get('ApiUser', '').strip() self.api_key = config['TRACKERS']['PTP'].get('ApiKey', '').strip() - self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() - self.username = config['TRACKERS']['PTP'].get('username', '').strip() + self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() + self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() - self.web_source = distutils.util.strtobool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) + self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - self.banned_groups = ['aXXo', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'd3g', 'x0r', 'YIFY', 'BMDru'] - + self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', + 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', + 'WAF', 'x0r', 'YIFY', 'LAMA', 'WORLD'] + self.sub_lang_map = { - ("Arabic", "ara", "ar") : 22, - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 49, - ("Bulgarian", "bul", "bg") : 29, - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 14, - ("Croatian", "hrv", "hr", "scr") : 23, - ("Czech", "cze", "cz", "cs") : 30, - ("Danish", "dan", "da") : 10, - ("Dutch", "dut", "nl") : 9, - ("English", "eng", "en", "English (CC)", "English - SDH") : 3, - ("English - Forced", "English (Forced)", "en (Forced)") : 50, - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 51, - ("Estonian", "est", "et") : 38, - ("Finnish", "fin", "fi") : 15, - ("French", "fre", "fr") : 5, - ("German", "ger", "de") : 6, - ("Greek", "gre", "el") : 26, - ("Hebrew", "heb", "he") : 40, - ("Hindi" "hin", "hi") : 41, - ("Hungarian", "hun", "hu") : 24, - ("Icelandic", "ice", "is") : 28, - ("Indonesian", "ind", "id") : 47, - ("Italian", "ita", "it") : 16, - ("Japanese", "jpn", "ja") : 8, - ("Korean", "kor", "ko") : 19, - ("Latvian", "lav", "lv") : 37, - ("Lithuanian", "lit", "lt") : 39, - ("Norwegian", "nor", "no") : 12, - ("Persian", "fa", "far") : 52, - ("Polish", "pol", "pl") : 17, - ("Portuguese", "por", "pt") : 21, - ("Romanian", "rum", "ro") : 13, - ("Russian", "rus", "ru") : 7, - ("Serbian", "srp", "sr", "scc") : 31, - ("Slovak", "slo", "sk") : 42, - ("Slovenian", "slv", "sl") : 43, - ("Spanish", "spa", "es") : 4, - ("Swedish", "swe", "sv") : 11, - ("Thai", "tha", "th") : 20, - ("Turkish", "tur", "tr") : 18, - ("Ukrainian", "ukr", "uk") : 34, - ("Vietnamese", "vie", "vi") : 25, + ("Arabic", "ara", "ar"): 22, + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 49, + ("Bulgarian", "bul", "bg"): 29, + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 14, + ("Croatian", "hrv", "hr", "scr"): 23, + ("Czech", "cze", "cz", "cs"): 30, + ("Danish", "dan", "da"): 10, + ("Dutch", "dut", "nl"): 9, + ("English", "eng", "en", "en-US", "English (CC)", "English - SDH"): 3, + ("English - Forced", "English (Forced)", "en (Forced)", "en-US (Forced)"): 50, + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)", "en-US (Intertitles)"): 51, + ("Estonian", "est", "et"): 38, + ("Finnish", "fin", "fi"): 15, + ("French", "fre", "fr"): 5, + ("German", "ger", "de"): 6, + ("Greek", "gre", "el"): 26, + ("Hebrew", "heb", "he"): 40, + ("Hindi" "hin", "hi"): 41, + ("Hungarian", "hun", "hu"): 24, + ("Icelandic", "ice", "is"): 28, + ("Indonesian", "ind", "id"): 47, + ("Italian", "ita", "it"): 16, + ("Japanese", "jpn", "ja"): 8, + ("Korean", "kor", "ko"): 19, + ("Latvian", "lav", "lv"): 37, + ("Lithuanian", "lit", "lt"): 39, + ("Norwegian", "nor", "no"): 12, + ("Persian", "fa", "far"): 52, + ("Polish", "pol", "pl"): 17, + ("Portuguese", "por", "pt"): 21, + ("Romanian", "rum", "ro"): 13, + ("Russian", "rus", "ru"): 7, + ("Serbian", "srp", "sr", "scc"): 31, + ("Slovak", "slo", "sk"): 42, + ("Slovenian", "slv", "sl"): 43, + ("Spanish", "spa", "es"): 4, + ("Swedish", "swe", "sv"): 11, + ("Thai", "tha", "th"): 20, + ("Turkish", "tur", "tr"): 18, + ("Ukrainian", "ukr", "uk"): 34, + ("Vietnamese", "vie", "vi"): 25, } - - - - async def get_ptp_id_imdb(self, search_term, search_file_folder): + async def get_ptp_id_imdb(self, search_term, search_file_folder, meta): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) params = { - 'filelist' : filename + 'filelist': filename } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) console.print(f"[green]Searching PTP for: [bold yellow]{filename}[/bold yellow]") + try: if response.status_code == 200: response = response.json() + # console.print(f"[blue]Raw API Response: {response}[/blue]") + if int(response['TotalResults']) >= 1: for movie in response['Movies']: if len(movie['Torrents']) >= 1: for torrent in movie['Torrents']: - if search_file_folder == 'file': - for file in torrent['FileList']: - if file['Path'] == filename: - imdb_id = movie['ImdbId'] - ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash = await self.get_imdb_from_torrent_id(ptp_torrent_id) - console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') - return imdb_id, ptp_torrent_id, ptp_torrent_hash - if search_file_folder == 'folder': - if str(torrent['FilePath']) == filename: + # First, try matching in filelist > path + for file in torrent['FileList']: + if file.get('Path') == filename: imdb_id = movie['ImdbId'] ptp_torrent_id = torrent['Id'] - dummy, ptp_torrent_hash = await self.get_imdb_from_torrent_id(ptp_torrent_id) + dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + + # Call get_torrent_info and print the results + tinfo = await self.get_torrent_info(imdb_id, meta) + console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") + return imdb_id, ptp_torrent_id, ptp_torrent_hash - else: - console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') - return None, None, None - elif int(response.status_code) in [400, 401, 403]: + + # If no match in filelist > path, check directly in filepath + if torrent.get('FilePath') == filename: + imdb_id = movie['ImdbId'] + ptp_torrent_id = torrent['Id'] + dummy, ptp_torrent_hash, *_ = await self.get_imdb_from_torrent_id(ptp_torrent_id) + console.print(f'[bold green]Matched release with PTP ID: [yellow]{ptp_torrent_id}[/yellow][/bold green]') + + # Call get_torrent_info and print the results + tinfo = await self.get_torrent_info(imdb_id, meta) + console.print(f"[cyan]Torrent Info: {tinfo}[/cyan]") + + return imdb_id, ptp_torrent_id, ptp_torrent_hash + + console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') + return None, None, None + + elif response.status_code in [400, 401, 403]: console.print(f"[bold red]PTP: {response.text}") return None, None, None - elif int(response.status_code) == 503: + + elif response.status_code == 503: console.print("[bold yellow]PTP Unavailable (503)") return None, None, None + else: return None, None, None - except Exception: - pass + + except Exception as e: + console.print(f'[red]An error occurred: {str(e)}[/red]') + console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None - + async def get_imdb_from_torrent_id(self, ptp_torrent_id): params = { - 'torrentid' : ptp_torrent_id + 'torrentid': ptp_torrent_id } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) @@ -153,59 +169,88 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): if response.status_code == 200: response = response.json() imdb_id = response['ImdbId'] + ptp_infohash = None for torrent in response['Torrents']: if torrent.get('Id', 0) == str(ptp_torrent_id): ptp_infohash = torrent.get('InfoHash', None) - return imdb_id, ptp_infohash + return imdb_id, ptp_infohash, None elif int(response.status_code) in [400, 401, 403]: console.print(response.text) - return None, None + return None, None, None elif int(response.status_code) == 503: console.print("[bold yellow]PTP Unavailable (503)") - return None, None + return None, None, None else: - return None, None + return None, None, None except Exception: - return None, None - - async def get_ptp_description(self, ptp_torrent_id, is_disc): + return None, None, None + + async def get_ptp_description(self, ptp_torrent_id, meta, is_disc): params = { - 'id' : ptp_torrent_id, - 'action' : 'get_description' + 'id': ptp_torrent_id, + 'action': 'get_description' } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' + console.print(f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}") response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) + ptp_desc = response.text + # console.print(f"[yellow]Raw description received:\n{ptp_desc[:6800]}...") # Show first 500 characters for brevity + bbcode = BBCODE() - desc = bbcode.clean_ptp_description(ptp_desc, is_disc) - console.print(f"[bold green]Successfully grabbed description from PTP") - return desc - + desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) + console.print("[bold green]Successfully grabbed description from PTP") + console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...", markup=False) # Show first 1000 characters for brevity + + if not meta.get('skipit') and not meta['unattended']: + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ") + + if edit_choice.lower() == 'e': + edited_description = click.edit(desc) + if edited_description: + desc = edited_description.strip() + meta['description'] = desc + meta['saved_description'] = True + console.print(f"[green]Final description after editing:[/green] {desc}") + elif edit_choice.lower() == 'd': + desc = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + meta['description'] = ptp_desc + meta['saved_description'] = True + else: + meta['description'] = ptp_desc + meta['saved_description'] = True + + return desc, imagelist async def get_group_by_imdb(self, imdb): params = { - 'imdb' : imdb, + 'imdb': imdb, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) await asyncio.sleep(1) try: response = response.json() - if response.get("Page") == "Browse": # No Releases on Site with ID + if response.get("Page") == "Browse": # No Releases on Site with ID return None - elif response.get('Page') == "Details": # Group Found + elif response.get('Page') == "Details": # Group Found groupID = response.get('GroupId') console.print(f"[green]Matched IMDb: [yellow]tt{imdb}[/yellow] to Group ID: [yellow]{groupID}[/yellow][/green]") console.print(f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])") @@ -215,17 +260,16 @@ async def get_group_by_imdb(self, imdb): console.print("[red]Please check that the site is online and your ApiUser/ApiKey values are correct") return None - async def get_torrent_info(self, imdb, meta): params = { - 'imdb' : imdb, - 'action' : 'torrent_info', - 'fast' : 1 + 'imdb': imdb, + 'action': 'torrent_info', + 'fast': 1 } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = "https://passthepopcorn.me/ajax.php" response = requests.get(url=url, params=params, headers=headers) @@ -233,6 +277,7 @@ async def get_torrent_info(self, imdb, meta): tinfo = {} try: response = response.json() + # console.print(f"[blue]Raw info API Response: {response}[/blue]") # title, plot, art, year, tags, Countries, Languages for key, value in response[0].items(): if value not in (None, ""): @@ -246,9 +291,9 @@ async def get_torrent_info(self, imdb, meta): async def get_torrent_info_tmdb(self, meta): tinfo = { - "title" : meta.get("title", ""), - "year" : meta.get("year", ""), - "album_desc" : meta.get("overview", ""), + "title": meta.get("title", ""), + "year": meta.get("year", ""), + "album_desc": meta.get("overview", ""), } tags = await self.get_tags([meta.get("genres", ""), meta.get("keywords", "")]) tinfo['tags'] = ", ".join(tags) @@ -269,24 +314,23 @@ async def get_tags(self, check_against): tags.append(each) return tags - async def search_existing(self, groupID, meta): + async def search_existing(self, groupID, meta, disctype): # Map resolutions to SD / HD / UHD quality = None - if meta.get('sd', 0) == 1: # 1 is SD + if meta.get('sd', 0) == 1: # 1 is SD quality = "Standard Definition" elif meta['resolution'] in ["1440p", "1080p", "1080i", "720p"]: quality = "High Definition" elif meta['resolution'] in ["2160p", "4320p", "8640p"]: quality = "Ultra High Definition" - params = { - 'id' : groupID, + 'id': groupID, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) @@ -297,20 +341,19 @@ async def search_existing(self, groupID, meta): torrents = response.get('Torrents', []) if len(torrents) != 0: for torrent in torrents: - if torrent.get('Quality') == quality and quality != None: + if torrent.get('Quality') == quality and quality is not None: existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") except Exception: console.print("[red]An error has occured trying to find existing releases") return existing - async def ptpimg_url_rehost(self, image_url): payload = { - 'format' : 'json', - 'api_key' : self.config["DEFAULT"]["ptpimg_api"], - 'link-upload' : image_url + 'format': 'json', + 'api_key': self.config["DEFAULT"]["ptpimg_api"], + 'link-upload': image_url } - headers = { 'referer': 'https://ptpimg.me/index.php'} + headers = {'referer': 'https://ptpimg.me/index.php'} url = "https://ptpimg.me/upload.php" response = requests.post(url, headers=headers, data=payload) @@ -319,13 +362,12 @@ async def ptpimg_url_rehost(self, image_url): ptpimg_code = response[0]['code'] ptpimg_ext = response[0]['ext'] img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - except: + except Exception: console.print("[red]PTPIMG image rehost failed") img_url = image_url # img_url = ptpimg_upload(image_url, ptpimg_api) return img_url - def get_type(self, imdb_info, meta): ptpType = None if imdb_info['type'] is not None: @@ -359,7 +401,7 @@ def get_type(self, imdb_info, meta): ptpType = "Stand-up Comedy" elif "concert" in keywords: ptpType = "Concert" - if ptpType == None: + if ptpType is None: if meta.get('mode', 'discord') == 'cli': ptpTypeList = ["Feature Film", "Short Film", "Miniseries", "Stand-up Comedy", "Concert", "Movie Collection"] ptpType = cli_ui.ask_choice("Select the proper type", choices=ptpTypeList) @@ -380,14 +422,14 @@ def get_codec(self, meta): codec = "DVD9" else: codecmap = { - "AVC" : "H.264", - "H.264" : "H.264", - "HEVC" : "H.265", - "H.265" : "H.265", + "AVC": "H.264", + "H.264": "H.264", + "HEVC": "H.265", + "H.265": "H.265", } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec = codecmap.get(searchcodec, searchcodec) - if meta.get('has_encode_settings') == True: + if meta.get('has_encode_settings') is True: codec = codec.replace("H.", "x") return codec @@ -411,29 +453,27 @@ def get_container(self, meta): else: ext = os.path.splitext(meta['filelist'][0])[1] containermap = { - '.mkv' : "MKV", - '.mp4' : 'MP4' + '.mkv': "MKV", + '.mp4': 'MP4' } container = containermap.get(ext, 'Other') return container - def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HD DVD" : "HD-DVD", - "HDDVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - 'UHDTV' : 'HDTV', - "NTSC" : "DVD", - "PAL" : "DVD" + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HD DVD": "HD-DVD", + "HDDVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + 'UHDTV': 'HDTV', + "NTSC": "DVD", + "PAL": "DVD" } source_id = sources.get(source, "OtherR") return source_id - def get_subtitles(self, meta): sub_lang_map = self.sub_lang_map @@ -448,7 +488,8 @@ def get_subtitles(self, meta): if language == "en": if track.get('Forced', "") == "Yes": language = "en (Forced)" - if "intertitles" in track.get('Title', "").lower(): + title = track.get('Title', "") + if isinstance(title, str) and "intertitles" in title.lower(): language = "en (Intertitles)" for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: @@ -458,29 +499,29 @@ def get_subtitles(self, meta): for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) - + if sub_langs == []: - sub_langs = [44] # No Subtitle + sub_langs = [44] # No Subtitle return sub_langs def get_trumpable(self, sub_langs): trumpable_values = { - "English Hardcoded Subs (Full)" : 4, - "English Hardcoded Subs (Forced)" : 50, - "No English Subs" : 14, - "English Softsubs Exist (Mislabeled)" : None, - "Hardcoded Subs (Non-English)" : "OTHER" + "English Hardcoded Subs (Full)": 4, + "English Hardcoded Subs (Forced)": 50, + "No English Subs": 14, + "English Softsubs Exist (Mislabeled)": None, + "Hardcoded Subs (Non-English)": "OTHER" } opts = cli_ui.select_choices("English subtitles not found. Please select any/all applicable options:", choices=list(trumpable_values.keys())) trumpable = [] if opts: for t, v in trumpable_values.items(): if t in ''.join(opts): - if v == None: + if v is None: break - elif v != 50: # Hardcoded, Forced + elif v != 50: # Hardcoded, Forced trumpable.append(v) - elif v == "OTHER": #Hardcoded, Non-English + elif v == "OTHER": # Hardcoded, Non-English trumpable.append(14) hc_sub_langs = cli_ui.ask_string("Enter language code for HC Subtitle languages") for lang, subID in self.sub_lang_map.items(): @@ -489,7 +530,7 @@ def get_trumpable(self, sub_langs): else: sub_langs.append(v) trumpable.append(4) - + sub_langs = list(set(sub_langs)) trumpable = list(set(trumpable)) if trumpable == []: @@ -506,7 +547,7 @@ def get_remaster_title(self, meta): remaster_title.append('The Criterion Collection') elif meta.get('distributor') in ('MASTERS OF CINEMA', 'MOC'): remaster_title.append('Masters of Cinema') - + # Editions # Director's Cut, Extended Edition, Rifftrax, Theatrical Cut, Uncut, Unrated if "director's cut" in meta.get('edition', '').lower(): @@ -527,7 +568,7 @@ def get_remaster_title(self, meta): # Features # 2-Disc Set, 2in1, 2D/3D Edition, 3D Anaglyph, 3D Full SBS, 3D Half OU, 3D Half SBS, - # 4K Restoration, 4K Remaster, + # 4K Restoration, 4K Remaster, # Extras, Remux, if meta.get('type') == "REMUX": remaster_title.append("Remux") @@ -541,11 +582,10 @@ def get_remaster_title(self, meta): remaster_title.append('Dual Audio') if "Dubbed" in meta['audio']: remaster_title.append('English Dub') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: remaster_title.append('With Commentary') - - # HDR10, HDR10+, Dolby Vision, 10-bit, + # HDR10, HDR10+, Dolby Vision, 10-bit, # if "Hi10P" in meta.get('video_encode', ''): # remaster_title.append('10-bit') if meta.get('hdr', '').strip() == '' and meta.get('bit_depth') == '10': @@ -578,94 +618,215 @@ async def edit_desc(self, meta): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding="utf-8").read() + multi_screens = int(self.config['DEFAULT'].get('multiScreens', 2)) + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc: images = meta['image_list'] discs = meta.get('discs', []) - # For Discs - if len(discs) >= 1: - for i in range(len(discs)): - each = discs[i] + filelist = meta.get('filelist', []) + + # Handle single disc case + if len(discs) == 1: + each = discs[0] + new_screens = [] + if each['type'] == "BDMV": + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + elif each['type'] == "DVD": + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + # Handle multiple discs case + elif len(discs) > 1: + if 'retry_count' not in meta: + meta['retry_count'] = 0 + if multi_screens < 2: + multi_screens = 2 + console.print("[yellow]PTP requires at least 2 screenshots for multi disc content, overriding config") + for i, each in enumerate(discs): + new_images_key = f'new_images_disc_{i}' if each['type'] == "BDMV": - desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") if i == 0: + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") - mi_dump = each['summary'] + for img_index in range(min(multi_screens, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: - mi_dump = each['summary'] - if meta.get('vapoursynth', False) == True: - use_vs = True + desc.write(f"[mediainfo]{each['summary']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: - use_vs = False - ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) - ds.start() - while ds.is_alive() == True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if each['type'] == "DVD": - desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") - desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") - desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n") - desc.write("\n") + meta['retry_count'] += 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if not new_screens: + use_vs = meta.get('vapoursynth', False) + ds = multiprocessing.Process(target=prep.disc_screenshots, args=(meta, f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), multi_screens, True)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + elif each['type'] == "DVD": if i == 0: + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) desc.write("\n\n") + for img_index in range(min(multi_screens, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: - ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) - ds.start() - while ds.is_alive() == True: - await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] - desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") - # For non-discs - elif len(meta.get('filelist', [])) >= 1: - for i in range(len(meta['filelist'])): - file = meta['filelist'][i] + desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") + desc.write(f"[mediainfo]{each['ifo_mi_full']}[/mediainfo]\n") + desc.write(f"[mediainfo]{each['vob_mi_full']}[/mediainfo]\n\n") + base2ptp = self.convert_bbcode(base) + if base2ptp.strip() != "": + desc.write(base2ptp) + desc.write("\n\n") + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + else: + meta['retry_count'] += 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if not new_screens: + ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, multi_screens, True)) + ds.start() + while ds.is_alive() is True: + await asyncio.sleep(1) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) + + # Handle single file case + elif len(filelist) == 1: + file = filelist[0] + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: + desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + for img_index in range(len(images[:int(meta['screens'])])): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + + # Handle multiple files case + elif len(filelist) > 1: + if multi_screens < 2: + multi_screens = 2 + console.print("[yellow]PTP requires at least 2 screenshots for multi disc/file content, overriding config") + for i in range(len(filelist)): + file = filelist[i] if i == 0: - # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None and self.web_source == True: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + for img_index in range(min(multi_screens, len(meta['image_list']))): + raw_url = meta['image_list'][img_index]['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") else: - # Export Mediainfo - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version' : '1'}) - # mi_dump = mi_dump.replace(file, os.path.basename(file)) + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "r", encoding="utf-8").read() - # Generate and upload screens for other files - s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2)) - s.start() - while s.is_alive() == True: - await asyncio.sleep(3) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") - if i == 0: - base2ptp = self.convert_bbcode(base) - if base2ptp.strip() != "": - desc.write(base2ptp) - desc.write("\n\n") - if len(images) > 0: - for each in range(len(images[:int(meta['screens'])])): - raw_url = images[each]['raw_url'] - desc.write(f"[img]{raw_url}[/img]\n") - desc.write("\n") + desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") + new_images_key = f'new_images_file_{i}' + if new_images_key in meta and meta[new_images_key]: + for img in meta[new_images_key]: + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") + else: + meta['retry_count'] = meta.get('retry_count', 0) + 1 + meta[new_images_key] = [] + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if not new_screens: + s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, multi_screens, True, None)) + s.start() + while s.is_alive() is True: + await asyncio.sleep(3) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + if new_screens: + uploaded_images, _ = prep.upload_screens(meta, multi_screens, 1, 0, 2, new_screens, {new_images_key: meta[new_images_key]}) + for img in uploaded_images: + meta[new_images_key].append({ + 'img_url': img['img_url'], + 'raw_url': img['raw_url'], + 'web_url': img['web_url'] + }) + raw_url = img['raw_url'] + desc.write(f"[img]{raw_url}[/img]\n") + desc.write("\n") - + meta_filename = f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json" + with open(meta_filename, 'w') as f: + json.dump(meta, f, indent=4) async def get_AntiCsrfToken(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): @@ -680,17 +841,17 @@ async def get_AntiCsrfToken(self, meta): loggedIn = await self.validate_login(uploadresponse) else: console.print("[yellow]PTP Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: AntiCsrfToken = re.search(r'data-AntiCsrfToken="(.*)"', uploadresponse.text).group(1) else: - passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce",self.announce_url).group(1) + passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce", self.announce_url).group(1) data = { "username": self.username, "password": self.password, "passkey": passKey, "keeplogged": "1", } - headers = {"User-Agent" : self.user_agent} + headers = {"User-Agent": self.user_agent} loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) await asyncio.sleep(2) try: @@ -703,14 +864,14 @@ async def get_AntiCsrfToken(self, meta): resp = loginresponse.json() try: if resp["Result"] != "Ok": - raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") + raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") # noqa F405 AntiCsrfToken = resp["AntiCsrfToken"] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 return AntiCsrfToken async def validate_login(self, response): @@ -718,7 +879,7 @@ async def validate_login(self, response): if response.text.find("""""") != -1: console.print("Looks like you are not logged in to PTP. Probably due to the bad user name, password, or expired session.") elif "Your popcorn quota has been reached, come back later!" in response.text: - raise LoginException("Your PTP request/popcorn quota has been reached, try again later") + raise LoginException("Your PTP request/popcorn quota has been reached, try again later") # noqa F405 else: loggedIn = True return loggedIn @@ -728,7 +889,7 @@ async def fill_upload_form(self, groupID, meta): await common.edit_torrent(meta, self.tracker, self.source_flag) resolution, other_resolution = self.get_resolution(meta) await self.edit_desc(meta) - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r").read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() ptp_subtitles = self.get_subtitles(meta) ptp_trumpable = None if not any(x in [3, 50] for x in ptp_subtitles) or meta['hardcoded-subs']: @@ -736,26 +897,26 @@ async def fill_upload_form(self, groupID, meta): data = { "submit": "true", "remaster_year": "", - "remaster_title": self.get_remaster_title(meta), #Eg.: Hardcoded English + "remaster_title": self.get_remaster_title(meta), # Eg.: Hardcoded English "type": self.get_type(meta['imdb_info'], meta), - "codec": "Other", # Sending the codec as custom. + "codec": "Other", # Sending the codec as custom. "other_codec": self.get_codec(meta), "container": "Other", "other_container": self.get_container(meta), "resolution": resolution, - "source": "Other", # Sending the source as custom. + "source": "Other", # Sending the source as custom. "other_source": self.get_source(meta['source']), "release_desc": desc, "nfo_text": "", - "subtitles[]" : ptp_subtitles, - "trumpable[]" : ptp_trumpable, - "AntiCsrfToken" : await self.get_AntiCsrfToken(meta) - } + "subtitles[]": ptp_subtitles, + "trumpable[]": ptp_trumpable, + "AntiCsrfToken": await self.get_AntiCsrfToken(meta) + } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" if resolution == "Other": data["other_resolution"] = other_resolution - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: data["internalrip"] = "on" # IF SPECIAL (idk how to check for this automatically) # data["special"] = "on" @@ -764,19 +925,18 @@ async def fill_upload_form(self, groupID, meta): else: data["imdb"] = meta["imdb_id"] - - if groupID == None: # If need to make new group + if groupID is None: # If need to make new group url = "https://passthepopcorn.me/upload.php" if data["imdb"] == "0": tinfo = await self.get_torrent_info_tmdb(meta) else: tinfo = await self.get_torrent_info(meta.get("imdb_id", "0"), meta) cover = meta["imdb_info"].get("cover") - if cover == None: + if cover is None: cover = meta.get('poster') - if cover != None and "ptpimg" not in cover: + if cover is not None and "ptpimg" not in cover: cover = await self.ptpimg_url_rehost(cover) - while cover == None: + while cover is None: cover = cli_ui.ask_string("No Poster was found. Please input a link to a poster: \n", default="") if "ptpimg" not in str(cover) and str(cover).endswith(('.jpg', '.png')): cover = await self.ptpimg_url_rehost(cover) @@ -791,29 +951,74 @@ async def fill_upload_form(self, groupID, meta): if new_data['year'] in ['', '0', 0, None] and meta.get('manual_year') not in [0, '', None]: new_data['year'] = meta['manual_year'] while new_data["tags"] == "": - if meta.get('mode', 'discord') == 'cli': + if meta.get('mode', 'discord') == 'cli': console.print('[yellow]Unable to match any tags') console.print("Valid tags can be found on the PTP upload form") new_data["tags"] = console.input("Please enter at least one tag. Comma seperated (action, animation, short):") data.update(new_data) - if meta["imdb_info"].get("directors", None) != None: + if meta["imdb_info"].get("directors", None) is not None: data["artist[]"] = tuple(meta['imdb_info'].get('directors')) data["importance[]"] = "1" - else: # Upload on existing group + else: # Upload on existing group url = f"https://passthepopcorn.me/upload.php?groupid={groupID}" data["groupid"] = groupID return url, data - async def upload(self, meta, url, data): - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as torrentFile: + async def upload(self, meta, url, data, disctype): + torrent_filename = f"[{self.tracker}]{meta['clean_name']}.torrent" + torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/{torrent_filename}" + torrent = Torrent.read(torrent_path) + + # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed + if torrent.piece_size > 16777216: # 16 MiB in bytes + console.print("[red]Piece size is OVER 16M and does not work on PTP. Generating a new .torrent") + + # Import Prep and regenerate the torrent with 16 MiB piece size limit + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + if meta['is_disc']: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 8 MiB + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + new_torrent = prep.CustomTorrent( + meta=meta, + path=Path(meta['path']), + trackers=[self.announce_url], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 16777216 # 16 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(torrent_path, overwrite=True) + + # Proceed with the upload process + with open(torrent_path, 'rb') as torrentFile: files = { - "file_input" : ("placeholder.torrent", torrentFile, "application/x-bittorent") + "file_input": ("placeholder.torrent", torrentFile, "application/x-bittorent") } headers = { # 'ApiUser' : self.api_user, # 'ApiKey' : self.api_key, - "User-Agent": self.user_agent + "User-Agent": self.user_agent } if meta['debug']: console.log(url) @@ -826,23 +1031,19 @@ async def upload(self, meta, url, data): response = session.post(url=url, data=data, headers=headers, files=files) console.print(f"[cyan]{response.url}") responsetext = response.text - # If the repsonse contains our announce url then we are on the upload page and the upload wasn't successful. + # If the response contains our announce URL, then we are on the upload page and the upload wasn't successful. if responsetext.find(self.announce_url) != -1: # Get the error message. - #
No torrent file uploaded, or file is empty.
errorMessage = "" match = re.search(r"""
= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 67d33c997..c340c17dc 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -2,16 +2,17 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher -import distutils.util -import json +from str2bool import str2bool import tmdbsimple as tmdb -import os import platform +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console + class R4E(): """ Edit for Tracker: @@ -28,43 +29,52 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category'], meta['tmdb']) type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], # 'personal_release' : int(meta.get('personalrelease', False)), NOT IMPLEMENTED on R4E # 'internal' : 0, # 'featured' : 0, @@ -73,27 +83,25 @@ async def upload(self, meta): # 'sticky' : 0, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } url = f"https://racing4everyone.eu/api/torrents/upload?api_token={self.config['TRACKERS']['R4E']['api_key'].strip()}" if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): name = meta['name'] return name @@ -103,34 +111,34 @@ async def get_cat_id(self, category_name, tmdb_id): movie = tmdb.Movies(tmdb_id) movie_info = movie.info() is_docu = self.is_docu(movie_info['genres']) - category_id = '70' # Motorsports Movie + category_id = '70' # Motorsports Movie if is_docu: - category_id = '66' # Documentary + category_id = '66' # Documentary elif category_name == 'TV': tv = tmdb.TV(tmdb_id) tv_info = tv.info() is_docu = self.is_docu(tv_info['genres']) - category_id = '79' # TV Series + category_id = '79' # TV Series if is_docu: - category_id = '2' # TV Documentary + category_id = '2' # TV Documentary else: - category_id = '24' + category_id = '24' return category_id async def get_type_id(self, type): type_id = { - '8640p':'2160p', - '4320p': '2160p', - '2160p': '2160p', - '1440p' : '1080p', + '8640p': '2160p', + '4320p': '2160p', + '2160p': '2160p', + '1440p': '1080p', '1080p': '1080p', - '1080i':'1080i', - '720p': '720p', - '576p': 'SD', + '1080i': '1080i', + '720p': '720p', + '576p': 'SD', '576i': 'SD', - '480p': 'SD', + '480p': 'SD', '480i': 'SD' - }.get(type, '10') + }.get(type, '10') return type_id async def is_docu(self, genres): @@ -138,18 +146,18 @@ async def is_docu(self, genres): for each in genres: if each['id'] == 99: is_docu = True - return is_docu + return is_docu - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on R4E...") url = "https://racing4everyone.eu/api/torrents/filter" params = { - 'api_token' : self.config['TRACKERS']['R4E']['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'name' : "" + 'api_token': self.config['TRACKERS']['R4E']['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -163,8 +171,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/RF.py b/src/trackers/RF.py index ca94837b9..306432cac 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,13 +2,17 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +import re +from str2bool import str2bool +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console + class RF(): """ Edit for Tracker: @@ -18,105 +22,115 @@ class RF(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RF' self.source_flag = 'ReelFliX' self.upload_url = 'https://reelflix.xyz/api/torrents/upload' self.search_url = 'https://reelflix.xyz/api/torrents/filter' - self.forum_link = "\n[center][url=https://github.com/L4GSP1KE/Upload-Assistant]Created by Upload Assistant[/url][/center]" + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) - stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + rf_name = await self.edit_name(meta) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : stt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': rf_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta.get('category') == "TV": - console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() + async def edit_name(self, meta): + rf_name = meta['name'] + tag_lower = meta['tag'].lower() + invalid_tags = ["nogrp", "nogroup", "unknown", "-unk-"] + if meta['tag'] == "" or any(invalid_tag in tag_lower for invalid_tag in invalid_tags): + for invalid_tag in invalid_tags: + rf_name = re.sub(f"-{invalid_tag}", "", rf_name, flags=re.IGNORECASE) + rf_name = f"{rf_name}-NoGroup" - async def edit_name(self, meta): - stt_name = meta['name'] - return stt_name + return rf_name async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -125,10 +139,10 @@ async def get_type_id(self, type): 'REMUX': '40', 'WEBDL': '42', 'WEBRIP': '45', - #'FANRES': '6', + # 'FANRES': '6', 'ENCODE': '41', 'HDTV': '35', - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -139,29 +153,34 @@ async def get_res_id(self, resolution): # '1440p' : '3', '1080p': '3', '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') return resolution_id - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]Erotic not allowed.') + meta['skipping'] = "RF" + return + if meta.get('category') == "TV": + console.print('[bold red]This site only ALLOWS Movies.') + meta['skipping'] = "RF" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on RF...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } - if meta['category'] == 'TV': - console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies') - # params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" if meta.get('edition', "") != "": params['name'] = params['name'] + meta['edition'] try: @@ -172,8 +191,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index d67dc9bff..4f1576c6a 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -5,10 +5,12 @@ import base64 import re import datetime +import httpx from src.trackers.COMMON import COMMON from src.console import console + class RTF(): """ Edit for Tracker: @@ -17,10 +19,6 @@ class RTF(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RTF' @@ -31,11 +29,11 @@ def __init__(self, config): self.banned_groups = [] pass - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -44,21 +42,21 @@ async def upload(self, meta): screenshots = [] for image in meta['image_list']: - if image['raw_url'] != None: + if image['raw_url'] is not None: screenshots.append(image['raw_url']) json_data = { - 'name' : meta['name'], + 'name': meta['name'], # description does not work for some reason # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub("(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump is None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB "descr": "This is short description", - "poster": meta["poster"] if meta["poster"] != None else "", + "poster": meta["poster"] if meta["poster"] is not None else "", "type": "401" if meta['category'] == 'MOVIE'else "402", "screenshots": screenshots, 'isAnonymous': self.config['TRACKERS'][self.tracker]["anon"], @@ -76,37 +74,42 @@ async def upload(self, meta): 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - - if datetime.date.today().year - meta['year'] <= 9: - console.print(f"[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") - return - - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: console.print(response.json()) - except: + + t_id = response.json()['torrent']['id'] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) + + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(json_data) - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): + disallowed_keywords = {'XXX', 'Erotic'} + if any(keyword in meta['keywords'] for keyword in disallowed_keywords): + console.print('[bold red]XXX not allowed.') + meta['skipping'] = "RTF" + return + if datetime.date.today().year - meta['year'] <= 9: + console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + meta['skipping'] = "RTF" + return dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on RTF...") headers = { 'accept': 'application/json', 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } params = { - 'includingDead' : '1' + 'includingDead': '1' } - # search is intentionally vague and just uses IMDB if available as many releases are not named properly on site. if meta['imdb_id'] != "0": params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] else: @@ -118,8 +121,76 @@ async def search_existing(self, meta): for each in response: result = [each][0]['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + # Tests if stored API key is valid. Site API key expires every week so a new one has to be generated. + async def api_test(self, meta): + headers = { + 'accept': 'application/json', + 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + } + + response = requests.get('https://retroflix.club/api/test', headers=headers) + + if response.status_code != 200: + console.print('[bold red]Your API key is incorrect SO generating a new one') + await self.generate_new_api(meta) + else: + return + + async def generate_new_api(self, meta): + headers = { + 'accept': 'application/json', + } + + json_data = { + 'username': self.config['TRACKERS'][self.tracker]['username'], + 'password': self.config['TRACKERS'][self.tracker]['password'], + } + + base_dir = meta.get('base_dir', '.') + config_path = f"{base_dir}/data/config.py" + + try: + async with httpx.AsyncClient() as client: + response = await client.post('https://retroflix.club/api/login', headers=headers, json=json_data) + + if response.status_code == 201: + token = response.json().get("token") + if token: + console.print('[bold green]Saving and using New API key generated for this upload') + console.print(f'[bold yellow]{token}') + + # Update the in-memory config dictionary + self.config['TRACKERS'][self.tracker]['api_key'] = token + + # Now we update the config file on disk using utf-8 encoding + with open(config_path, 'r', encoding='utf-8') as file: + config_data = file.read() + + # Find the RTF tracker and replace the api_key value + new_config_data = re.sub( + r'("RTF":\s*{[^}]*"api_key":\s*\')[^\']*(\'[^\}]*})', # Match the api_key content only between single quotes + rf'\1{token}\2', # Replace only the content inside the quotes without adding extra backslashes + config_data + ) + + # Write the updated config back to the file + with open(config_path, 'w', encoding='utf-8') as file: + file.write(new_config_data) + + console.print(f'[bold green]API Key successfully saved to {config_path}') + else: + console.print('[bold red]API response does not contain a token.') + else: + console.print(f'[bold red]Error getting new API key: {response.status_code}, please check username and password in the config.') + + except httpx.RequestError as e: + console.print(f'[bold red]An error occurred while requesting the API: {str(e)}') + + except Exception as e: + console.print(f'[bold red]An unexpected error occurred: {str(e)}') diff --git a/src/trackers/SHRI.py b/src/trackers/SHRI.py new file mode 100644 index 000000000..ebf7f298f --- /dev/null +++ b/src/trackers/SHRI.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class SHRI(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'SHRI' + self.source_flag = 'Shareisland' + self.upload_url = 'https://shareisland.org/api/torrents/upload' + self.search_url = 'https://shareisland.org/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '26', + 'REMUX': '7', + 'WEBDL': '27', + 'WEBRIP': '27', + 'HDTV': '6', + 'ENCODE': '15' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on SHRI...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 54f13d64d..5f10c7eb4 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import requests import asyncio -import traceback from src.trackers.COMMON import COMMON from src.console import console @@ -15,7 +14,6 @@ class SN(): Set type/category IDs Upload """ - def __init__(self, config): self.config = config self.tracker = 'SN' @@ -31,19 +29,19 @@ async def get_type_id(self, type): 'BluRay': '3', 'Web': '1', # boxset is 4 - #'NA': '4', + # 'NA': '4', 'DVD': '2' }.get(type, '0') return type_id - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - #await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + # await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) await self.edit_desc(meta) cat_id = "" sub_cat_id = "" - #cat_id = await self.get_cat_id(meta) + # cat_id = await self.get_cat_id(meta) if meta['category'] == 'MOVIE': cat_id = 1 # sub cat is source so using source to get @@ -56,14 +54,13 @@ async def upload(self, meta): sub_cat_id = 5 # todo need to do a check for docs and add as subcat - - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as f: tfile = f.read() @@ -90,7 +87,7 @@ async def upload(self, meta): } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.request("POST", url=self.upload_url, data=data, files=files) try: @@ -99,19 +96,18 @@ async def upload(self, meta): else: console.print("[red]Did not upload successfully") console.print(response.json()) - except: + except Exception: console.print("[red]Error! It may have uploaded, go check") console.print(data) console.print_exception() return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: desc.write(base) images = meta['image_list'] if len(images) > 0: @@ -125,13 +121,12 @@ async def edit_desc(self, meta): desc.close() return - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on SN...") params = { - 'api_key' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # using title if IMDB id does not exist to search @@ -141,7 +136,7 @@ async def search_existing(self, meta): else: params['filter'] = meta['title'] else: - #using IMDB_id to search if it exists. + # using IMDB_id to search if it exists. if meta['category'] == 'TV': params['media_ref'] = f"tt{meta['imdb_id']}" params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] @@ -155,7 +150,7 @@ async def search_existing(self, meta): for i in response['data']: result = i['name'] dupes.append(result) - except: + except Exception: console.print('[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/SPD.py b/src/trackers/SPD.py new file mode 100644 index 000000000..6dfa2956b --- /dev/null +++ b/src/trackers/SPD.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +from torf import Torrent +import requests +from src.console import console +from pprint import pprint +import base64 +import shutil +import os +import traceback + +from src.trackers.COMMON import COMMON + + +# from pprint import pprint + +class SPD(): + + def __init__(self, config): + self.url = "https://speedapp.io" + self.config = config + self.tracker = 'SPD' + self.source_flag = 'speedapp.io' + self.search_url = 'https://speedapp.io/api/torrent' + self.upload_url = 'https://speedapp.io/api/upload' + self.forum_link = 'https://speedapp.io/support/wiki/rules' + self.banned_groups = [''] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + type_id = "" + if meta['anime']: + type_id = '3' + elif meta['category'] == 'TV': + if meta['tv_pack']: + type_id = '41' + elif meta['sd'] and not meta['tv_pack']: + type_id = '45' + # must be hd + else: + type_id = '43' + else: + if meta['type'] != "DISC" and meta['resolution'] == "2160p": + type_id = '61' + else: + type_id = { + 'DISC': '17', + 'REMUX': '8', + 'WEBDL': '8', + 'WEBRIP': '8', + 'HDTV': '8', + 'SD': '10', + 'ENCODE': '8' + }.get(type, '0') + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() + bd_dump = None + screenshots = [] + if len(meta['image_list']) != 0: + for image in meta['image_list']: + screenshots.append(image['raw_url']) + data = { + 'name': meta['name'].replace("'", '').replace(': ', '.').replace(':', '.').replace(' ', '.').replace(' ', '.').replace('DD+', 'DDP'), + 'screenshots': screenshots, + 'release_info': f"[center][url={self.forum_link}]Please seed[/url][/center]", + 'media_info': mi_dump, + 'bd_info': bd_dump, + 'type': type_id, + 'url': f"https://www.imdb.com/title/tt{meta['imdb_id']}", + 'shortDescription': meta['genres'], + 'keywords': meta['keywords'], + 'releaseInfo': self.forum_link + } + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') as binary_file: + binary_file_data = binary_file.read() + base64_encoded_data = base64.b64encode(binary_file_data) + base64_message = base64_encoded_data.decode('utf-8') + data['file'] = base64_message + + headers = {'Authorization': 'Bearer ' + self.config['TRACKERS'][self.tracker]['api_key'].strip()} + + if meta['debug'] is False: + response = requests.request("POST", url=self.upload_url, json=data, headers=headers) + try: + print(response.json()) + # response = {'status': True, 'error': False, 'downloadUrl': '/api/torrent/383435/download', 'torrent': {'id': 383435, 'name': 'name-with-full-stops', 'slug': 'name-with-dashs', 'category_id': 3}} + # downloading the torrent from site as it adds a tonne of different trackers and the source is different all the time. + try: + # torrent may not dl and may not provide error if machine is under load or network connection usage high. + with requests.get(url=self.url + response.json()['downloadUrl'], stream=True, headers=headers) as r: + # replacing L4g/torf created torrent so it will be added to the client. + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", + 'wb') as f: + shutil.copyfileobj(r.raw, f) + # adding as comment link to torrent + if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent"): + new_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent") + new_torrent.metainfo['comment'] = f"{self.url}/browse/{response.json()['torrent']['id']}" + Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", overwrite=True) + except Exception: + console.print(traceback.print_exc()) + console.print("[red]Unable to Download torrent, try manually") + except Exception: + console.print(traceback.print_exc()) + console.print("[yellow]Unable to Download torrent, try manually") + return + else: + console.print("[cyan]Request Data:") + pprint(data) + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'FANRES': '3' + }.get(category_name, '0') + return category_id + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on SPD...") + headers = { + 'accept': 'application/json', + 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + } + + params = { + 'includingDead': '1' + } + + if meta['imdb_id'] != "0": + params['imdbId'] = meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id'] + else: + params['search'] = meta['title'].replace(':', '').replace("'", '').replace(",", '') + + try: + response = requests.get(url=self.search_url, params=params, headers=headers) + response = response.json() + for each in response: + result = [each][0]['name'] + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes diff --git a/src/trackers/STC.py b/src/trackers/STC.py index 224e89889..ff72fc63a 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -1,14 +1,15 @@ # -*- coding: utf-8 -*- import asyncio import requests -from difflib import SequenceMatcher -import distutils.util -import json -import os +from str2bool import str2bool import platform +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON -from src.console import console +from src.console import console + class STC(): """ @@ -27,8 +28,8 @@ def __init__(self, config): self.signature = '\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]' self.banned_groups = [""] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -36,94 +37,101 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : stc_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stc_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: - + console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stc_name = meta.get('name') return stc_name async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, sd, category): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') if tv_pack == 1: if sd == 1: # Season SD @@ -146,37 +154,30 @@ async def get_type_id(self, type, tv_pack, sd, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on STC...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -188,8 +189,47 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/STT.py b/src/trackers/STT.py index 0a72f7eab..076a0f505 100644 --- a/src/trackers/STT.py +++ b/src/trackers/STT.py @@ -2,15 +2,16 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher -import distutils.util -import json -import os +from str2bool import str2bool import platform +import bencodepy +import os +import glob from src.trackers.COMMON import COMMON from src.console import console + class STT(): """ Edit for Tracker: @@ -28,8 +29,8 @@ def __init__(self, config): self.signature = '\n[center][url=https://skipthetrailers.xyz/pages/1]Please Seed[/url][/center]' self.banned_groups = [""] pass - - async def upload(self, meta): + + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -37,70 +38,77 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") data = { - 'name' : stt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stt_name = meta['name'] return stt_name @@ -108,47 +116,46 @@ async def edit_name(self, meta): async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'FANRES': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - # '8640p':'10', - '4320p': '1', - '2160p': '2', + # '8640p':'10', + '4320p': '1', + '2160p': '2', # '1440p' : '3', '1080p': '3', '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '11') + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '11') return resolution_id - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] - console.print("[yellow]Searching for existing torrents on site...") + console.print("[yellow]Searching for existing torrents on STT...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies.') @@ -163,8 +170,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/TDC.py b/src/trackers/TDC.py deleted file mode 100644 index e201bcb83..000000000 --- a/src/trackers/TDC.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -# import discord -import asyncio -import requests -import distutils.util -import os - -from src.trackers.COMMON import COMMON -from src.console import console - -class TDC(): - """ - Edit for Tracker: - Edit BASE.torrent with announce and source - Check for duplicates - Set type/category IDs - Upload - """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### - def __init__(self, config): - self.config = config - self.tracker = 'TDC' - self.source_flag = 'TDC' - self.upload_url = 'https://thedarkcommunity.cc/api/torrents/upload' - self.search_url = 'https://thedarkcommunity.cc/api/torrents/filter' - self.signature = "Created by L4G's Upload Assistant" - self.banned_groups = [""] - pass - - async def get_cat_id(self, category_name): - category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') - return category_id - - async def get_type_id(self, type): - type_id = { - 'DISC': '1', - 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', - 'HDTV': '6', - 'ENCODE': '3' - }.get(type, '0') - return type_id - - async def get_res_id(self, resolution): - resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', - '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') - return resolution_id - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): - common = COMMON(config=self.config) - await common.edit_torrent(meta, self.tracker, self.source_flag) - cat_id = await self.get_cat_id(meta['category']) - type_id = await self.get_type_id(meta['type']) - resolution_id = await self.get_res_id(meta['resolution']) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) - region_id = await common.unit3d_region_ids(meta.get('region')) - distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: - anon = 0 - else: - anon = 1 - - if meta['bdinfo'] != None: - mi_dump = None - bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() - else: - mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() - bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() - open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') - files = {'torrent': open_torrent} - data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, - } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: - if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): - data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id - if distributor_id != 0: - data['distributor_id'] = distributor_id - if meta.get('category') == "TV": - data['season_number'] = meta.get('season_int', '0') - data['episode_number'] = meta.get('episode_int', '0') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' - } - params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() - } - - if meta['debug'] == False: - response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) - try: - console.print(response.json()) - except: - console.print("It may have uploaded, go check") - return - else: - console.print(f"[cyan]Request Data:") - console.print(data) - open_torrent.close() - - - - - - async def search_existing(self, meta): - dupes = [] - console.print("[yellow]Searching for existing torrents on site...") - params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" - } - if meta['category'] == 'TV': - params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" - if meta.get('edition', "") != "": - params['name'] = params['name'] + meta['edition'] - - try: - response = requests.get(url=self.search_url, params=params) - response = response.json() - for each in response['data']: - result = [each][0]['attributes']['name'] - # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() - # if difference >= 0.05: - dupes.append(result) - except: - console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') - await asyncio.sleep(5) - - return dupes diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 3080ae581..f36556978 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -4,15 +4,13 @@ import requests import json import glob -from difflib import SequenceMatcher import cli_ui -import base64 import os import re import platform from unidecode import unidecode -from src.console import console +from src.console import console class THR(): @@ -29,19 +27,19 @@ def __init__(self, config): self.password = config['TRACKERS']['THR'].get('password') self.banned_groups = [""] pass - - async def upload(self, session, meta): + + async def upload(self, session, meta, disctype): await self.edit_torrent(meta) cat_id = await self.get_cat_id(meta) subs = self.get_subtitles(meta) - pronfo = await self.edit_desc(meta) + pronfo = await self.edit_desc(meta) # noqa #F841 thr_name = unidecode(meta['name'].replace('DD+', 'DDP')) # Confirm the correct naming order for FL cli_ui.info(f"THR name: {thr_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: thr_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if thr_confirm != True: + if thr_confirm is not True: thr_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if thr_name_manually == "": console.print('No proper name given') @@ -49,8 +47,7 @@ async def upload(self, session, meta): return else: thr_name = thr_name_manually - torrent_name = re.sub("[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) - + torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) if meta.get('is_disc', '') == 'BDMV': mi_file = None @@ -62,42 +59,41 @@ async def upload(self, session, meta): f.close() # bd_file = None - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'r', encoding='utf-8') as f: desc = f.read() f.close() - + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent") with open(torrent_path, 'rb') as f: tfile = f.read() f.close() - - #Upload Form + + # Upload Form url = 'https://www.torrenthr.org/takeupload.php' files = { - 'tfile' : (f'{torrent_name}.torrent', tfile) + 'tfile': (f'{torrent_name}.torrent', tfile) } payload = { - 'name' : thr_name, - 'descr' : desc, - 'type' : cat_id, - 'url' : f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", - 'tube' : meta.get('youtube', '') + 'name': thr_name, + 'descr': desc, + 'type': cat_id, + 'url': f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", + 'tube': meta.get('youtube', '') } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } - #If pronfo fails, put mediainfo into THR parser + # If pronfo fails, put mediainfo into THR parser if meta.get('is_disc', '') != 'BDMV': files['nfo'] = ("MEDIAINFO.txt", mi_file) if subs != []: payload['subs[]'] = tuple(subs) - - if meta['debug'] == False: + if meta['debug'] is False: thr_upload_prompt = True else: thr_upload_prompt = cli_ui.ask_yes_no("send to takeupload.php?", default=False) - if thr_upload_prompt == True: + if thr_upload_prompt is True: await asyncio.sleep(0.5) response = session.post(url=url, files=files, data=payload, headers=headers) try: @@ -105,18 +101,16 @@ async def upload(self, session, meta): console.print(response.text) if response.url.endswith('uploaded=1'): console.print(f'[green]Successfully Uploaded at: {response.url}') - #Check if actually uploaded - except: + # Check if actually uploaded + except Exception: if meta['debug']: console.print(response.text) console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(payload) - - - + async def get_cat_id(self, meta): if meta['category'] == "MOVIE": if meta.get('is_disc') == "BMDV": @@ -133,7 +127,7 @@ async def get_cat_id(self, meta): cat = '7' else: cat = '34' - elif meta.get('anime') != False: + elif meta.get('anime') is not False: cat = '31' return cat @@ -156,30 +150,26 @@ def get_subtitles(self, meta): if sub_langs != []: subs = [] sub_lang_map = { - 'hr' : 1, 'en' : 2, 'bs' : 3, 'sr' : 4, 'sl' : 5, - 'Croatian' : 1, 'English' : 2, 'Bosnian' : 3, 'Serbian' : 4, 'Slovenian' : 5 + 'hr': 1, 'en': 2, 'bs': 3, 'sr': 4, 'sl': 5, + 'Croatian': 1, 'English': 2, 'Bosnian': 3, 'Serbian': 4, 'Slovenian': 5 } for sub in sub_langs: language = sub_lang_map.get(sub) - if language != None: + if language is not None: subs.append(language) return subs - - - - async def edit_torrent(self, meta): if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): THR_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") THR_torrent.metainfo['announce'] = self.config['TRACKERS']['THR']['announce_url'] THR_torrent.metainfo['info']['source'] = "[https://www.torrenthr.org] TorrentHR.org" Torrent.copy(THR_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]{meta['clean_name']}.torrent", overwrite=True) - return - + return + async def edit_desc(self, meta): pronfo = False - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[THR]DESCRIPTION.txt", 'w', encoding='utf-8') as desc: if meta['tag'] == "": tag = "" @@ -213,11 +203,11 @@ async def edit_desc(self, meta): for image in image_glob: url = "https://img2.torrenthr.org/api/1/upload" data = { - 'key' : self.config['TRACKERS']['THR'].get('img_api'), + 'key': self.config['TRACKERS']['THR'].get('img_api'), # 'source' : base64.b64encode(open(image, "rb").read()).decode('utf8') } - files = {'source' : open(image, 'rb')} - response = requests.post(url, data = data, files=files) + files = {'source': open(image, 'rb')} + response = requests.post(url, data=data, files=files) try: response = response.json() # med_url = response['image']['medium']['url'] @@ -239,22 +229,22 @@ async def edit_desc(self, meta): # ProNFO pronfo_url = f"https://www.pronfo.com/api/v1/access/upload/{self.config['TRACKERS']['THR'].get('pronfo_api_key', '')}" data = { - 'content' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), - 'theme' : self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), - 'rapi' : self.config['TRACKERS']['THR'].get('pronfo_rapi_id') + 'content': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), + 'theme': self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), + 'rapi': self.config['TRACKERS']['THR'].get('pronfo_rapi_id') } response = requests.post(pronfo_url, data=data) try: response = response.json() - if response.get('error', True) == False: + if response.get('error', True) is False: mi_img = response.get('url') desc.write(f"\n[img]{mi_img}[/img]\n") pronfo = True - except: + except Exception: console.print('[bold red]Error parsing pronfo response, using THR parser instead') if meta['debug']: console.print(f"[red]{response}") - console.print(response.text) + console.print(response.text) for each in image_list[:int(meta['screens'])]: desc.write(f"\n[img]{each}[/img]\n") @@ -267,10 +257,7 @@ async def edit_desc(self, meta): desc.close() return pronfo - - - - def search_existing(self, session, imdb_id): + def search_existing(self, session, imdb_id, disctype): from bs4 import BeautifulSoup imdb_id = imdb_id.replace('tt', '') search_url = f"https://www.torrenthr.org/browse.php?search={imdb_id}&blah=2&incldead=1" @@ -288,12 +275,12 @@ def search_existing(self, session, imdb_id): def login(self, session): url = 'https://www.torrenthr.org/takelogin.php' payload = { - 'username' : self.username, - 'password' : self.password, - 'ssl' : 'yes' + 'username': self.username, + 'password': self.password, + 'ssl': 'yes' } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } resp = session.post(url, headers=headers, data=payload) if resp.url == "https://www.torrenthr.org/index.php": diff --git a/src/trackers/TIK.py b/src/trackers/TIK.py new file mode 100644 index 000000000..788a2164c --- /dev/null +++ b/src/trackers/TIK.py @@ -0,0 +1,635 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import os +import re +import platform +import sys +import cli_ui +import urllib.request +import click +from str2bool import str2bool +import bencodepy + +from src.trackers.COMMON import COMMON +from src.console import console + + +class TIK(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'TIK' + self.source_flag = 'TIK' + self.search_url = 'https://cinematik.net/api/torrents/filter' + self.upload_url = 'https://cinematik.net/api/torrents/upload' + self.torrent_url = 'https://cinematik.net/api/torrents/' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [""] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + cat_id = await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')) + type_id = await self.get_type_id(disctype) + resolution_id = await self.get_res_id(meta['resolution']) + modq = await self.get_flag(meta, 'modq') + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if not meta['is_disc']: + console.print("[red]Only disc-based content allowed at TIK") + return + elif meta['bdinfo'] is not None: + mi_dump = None + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') as bd_file: + bd_dump = bd_file.read() + else: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') as mi_file: + mi_dump = mi_file.read() + bd_dump = None + + if meta.get('desclink'): + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + print(f"Custom Description Link: {desc}") + + elif meta.get('descfile'): + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + print(f"Custom Description File Path: {desc}") + + else: + await self.edit_desc(meta) + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", "r", encoding='utf-8').read() + + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + data = { + 'name': await self.get_name(meta, disctype), + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'region_id': region_id, + 'distributor_id': distributor_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': 0, + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + 'mod_queue_opt_in': modq, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + if self.config['TRACKERS'][self.tracker].get('personal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('personal_group', [])): + data['personal_release'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + console.print(data) + console.print(f"TIK response: {response}") + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + def get_basename(self, meta): + path = next(iter(meta['filelist']), meta['path']) + return os.path.basename(path) + + async def get_name(self, meta, disctype): + disctype = meta.get('disctype', None) + basename = self.get_basename(meta) + type = meta.get('type', "") + title = meta.get('title', "").replace('AKA', '/').strip() + alt_title = meta.get('aka', "").replace('AKA', '/').strip() + year = meta.get('year', "") + resolution = meta.get('resolution', "") + season = meta.get('season', "") + repack = meta.get('repack', "") + if repack.strip(): + repack = f"[{repack}]" + three_d = meta.get('3D', "") + three_d_tag = f"[{three_d}]" if three_d else "" + tag = meta.get('tag', "").replace("-", "- ") + if tag == "": + tag = "- NOGRP" + source = meta.get('source', "") + uhd = meta.get('uhd', "") # noqa #841 + hdr = meta.get('hdr', "") + if not hdr.strip(): + hdr = "SDR" + distributor = meta.get('distributor', "") # noqa F841 + video_codec = meta.get('video_codec', "") + video_encode = meta.get('video_encode', "").replace(".", "") + if 'x265' in basename: + video_encode = video_encode.replace('H', 'x') + dvd_size = meta.get('dvd_size', "") + search_year = meta.get('search_year', "") + if not str(search_year).strip(): + search_year = year + + category_name = meta.get('category', "") + foreign = meta.get('foreign') + opera = meta.get('opera') + asian = meta.get('asian') + meta['category_id'] = await self.get_cat_id(category_name, foreign, opera, asian) + + name = "" + alt_title_part = f" / {alt_title}" if alt_title else "" + if meta['category_id'] in ("1", "3", "5", "6"): + if meta['is_disc'] == 'BDMV': + name = f"{title}{alt_title_part} ({year}) {disctype} {resolution} {video_codec} {three_d_tag}" + elif meta['is_disc'] == 'DVD': + name = f"{title}{alt_title_part} ({year}) {source} {dvd_size}" + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk + if meta['is_disc'] == 'BDMV': + name = f"{title}{alt_title_part} ({search_year}) {season} {disctype} {resolution} {video_codec}" + if meta['is_disc'] == 'DVD': + name = f"{title}{alt_title_part} ({search_year}) {season} {source} {dvd_size}" + + # User confirmation + console.print(f"[yellow]Final generated name: [greee]{name}") + confirmation = cli_ui.ask_yes_no("Do you want to use this name?", default=False) # Default is 'No' + + if confirmation: + return name + else: + console.print("[red]Sorry, this seems to be an edge case, please report at (insert_link)") + sys.exit(1) + + async def get_cat_id(self, category_name, foreign, opera, asian): + category_id = { + 'FILM': '1', + 'TV': '2', + 'Foreign Film': '3', + 'Foreign TV': '4', + 'Opera & Musical': '5', + 'Asian Film': '6', + }.get(category_name, '0') + + if category_name == 'MOVIE': + if foreign: + category_id = '3' + elif opera: + category_id = '5' + elif asian: + category_id = '6' + else: + category_id = '1' + elif category_name == 'TV': + if foreign: + category_id = '4' + elif opera: + category_id = '5' + else: + category_id = '2' + + return category_id + + async def get_type_id(self, disctype): + type_id_map = { + 'Custom': '1', + 'BD100': '3', + 'BD66': '4', + 'BD50': '5', + 'BD25': '6', + 'NTSC DVD9': '7', + 'NTSC DVD5': '8', + 'PAL DVD9': '9', + 'PAL DVD5': '10', + '3D': '11' + } + + if not disctype: + console.print("[red]You must specify a --disctype") + return None + + disctype_value = disctype[0] if isinstance(disctype, list) else disctype + type_id = type_id_map.get(disctype_value, '1') # '1' is the default fallback + + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + 'Other': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def get_flag(self, meta, flag_name): + config_flag = self.config['TRACKERS'][self.tracker].get(flag_name) + if config_flag is not None: + return 1 if config_flag else 0 + + return 1 if meta.get(flag_name, False) else 0 + + async def edit_desc(self, meta): + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + # Fetch additional IMDb metadata + meta_imdb = await prep.imdb_other_meta(meta) # noqa #F841 + + if len(meta.get('discs', [])) > 0: + summary = meta['discs'][0].get('summary', '') + else: + summary = None + + # Proceed with matching Total Bitrate if the summary exists + if summary: + match = re.search(r"Total Bitrate: ([\d.]+ Mbps)", summary) + if match: + total_bitrate = match.group(1) + else: + total_bitrate = "Unknown" + else: + total_bitrate = "Unknown" + + country_name = self.country_code_to_name(meta.get('region')) + + # Rehost poster if tmdb_poster is available + poster_url = f"https://image.tmdb.org/t/p/original{meta.get('tmdb_poster', '')}" + + # Define the paths for both jpg and png poster images + poster_jpg_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/poster.jpg" + poster_png_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/poster.png" + + # Check if either poster.jpg or poster.png already exists + if os.path.exists(poster_jpg_path): + poster_path = poster_jpg_path + console.print("[green]Poster already exists as poster.jpg, skipping download.[/green]") + elif os.path.exists(poster_png_path): + poster_path = poster_png_path + console.print("[green]Poster already exists as poster.png, skipping download.[/green]") + else: + # No poster file exists, download the poster image + poster_path = poster_jpg_path # Default to saving as poster.jpg + try: + urllib.request.urlretrieve(poster_url, poster_path) + console.print(f"[green]Poster downloaded to {poster_path}[/green]") + except Exception as e: + console.print(f"[red]Error downloading poster: {e}[/red]") + + # Upload the downloaded or existing poster image once + if os.path.exists(poster_path): + try: + console.print("Uploading standard poster to image host....") + new_poster_url, _ = prep.upload_screens(meta, 1, 1, 0, 1, [poster_path], {}) + + # Ensure that the new poster URL is assigned only once + if len(new_poster_url) > 0: + poster_url = new_poster_url[0]['raw_url'] + except Exception as e: + console.print(f"[red]Error uploading poster: {e}[/red]") + else: + console.print("[red]Poster file not found, cannot upload.[/red]") + + # Generate the description text + desc_text = [] + + images = meta['image_list'] + discs = meta.get('discs', []) # noqa #F841 + + if len(images) >= 4: + image_link_1 = images[0]['raw_url'] + image_link_2 = images[1]['raw_url'] + image_link_3 = images[2]['raw_url'] + image_link_4 = images[3]['raw_url'] + image_link_5 = images[4]['raw_url'] + image_link_6 = images[5]['raw_url'] + else: + image_link_1 = image_link_2 = image_link_3 = image_link_4 = image_link_5 = image_link_6 = "" + + # Write the cover section with rehosted poster URL + desc_text.append("[h3]Cover[/h3] [color=red]A stock poster has been automatically added, but you'll get more love if you include a proper cover, see rule 6.6[/color]\n") + desc_text.append("[center]\n") + desc_text.append(f"[IMG=500]{poster_url}[/IMG]\n") + desc_text.append("[/center]\n\n") + + # Write screenshots section + desc_text.append("[h3]Screenshots[/h3]\n") + desc_text.append("[center]\n") + desc_text.append(f"[URL={image_link_1}][IMG=300]{image_link_1}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_2}][IMG=300]{image_link_2}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_3}][IMG=300]{image_link_3}[/IMG][/URL]\n ") + desc_text.append(f"[URL={image_link_4}][IMG=300]{image_link_4}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_5}][IMG=300]{image_link_5}[/IMG][/URL] ") + desc_text.append(f"[URL={image_link_6}][IMG=300]{image_link_6}[/IMG][/URL]\n") + desc_text.append("[/center]\n\n") + + # Write synopsis section with the custom title + desc_text.append("[h3]Synopsis/Review/Personal Thoughts (edit as needed)[/h3]\n") + desc_text.append("[color=red]Default TMDB sypnosis added, more love if you use a sypnosis from credible film institutions such as the BFI or directly quoting well-known film critics, see rule 6.3[/color]\n") + desc_text.append("[quote]\n") + desc_text.append(f"{meta.get('overview', 'No synopsis available.')}\n") + desc_text.append("[/quote]\n\n") + + # Write technical info section + desc_text.append("[h3]Technical Info[/h3]\n") + desc_text.append("[code]\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Disc Label.........:{meta.get('bdinfo', {}).get('label', '')}\n") + desc_text.append(f" IMDb...............: [url=https://www.imdb.com/title/tt{meta.get('imdb_id')}]{meta.get('imdb_rating', '')}[/url]\n") + desc_text.append(f" Year...............: {meta.get('year', '')}\n") + desc_text.append(f" Country............: {country_name}\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Runtime............: {meta.get('bdinfo', {}).get('length', '')} hrs [color=red](double check this is actual runtime)[/color]\n") + else: + desc_text.append(" Runtime............: [color=red]Insert the actual runtime[/color]\n") + + if meta['is_disc'] == 'BDMV': + audio_languages = ', '.join([f"{track.get('language', 'Unknown')} {track.get('codec', 'Unknown')} {track.get('channels', 'Unknown')}" for track in meta.get('bdinfo', {}).get('audio', [])]) + desc_text.append(f" Audio..............: {audio_languages}\n") + desc_text.append(f" Subtitles..........: {', '.join(meta.get('bdinfo', {}).get('subtitles', []))}\n") + else: + # Process each disc's `vob_mi` or `ifo_mi` to extract audio and subtitles separately + for disc in meta.get('discs', []): + vob_mi = disc.get('vob_mi', '') + ifo_mi = disc.get('ifo_mi', '') + + unique_audio = set() # Store unique audio strings + + audio_section = vob_mi.split('\n\nAudio\n')[1].split('\n\n')[0] if 'Audio\n' in vob_mi else None + if audio_section: + if "AC-3" in audio_section: + codec = "AC-3" + elif "DTS" in audio_section: + codec = "DTS" + elif "MPEG Audio" in audio_section: + codec = "MPEG Audio" + elif "PCM" in audio_section: + codec = "PCM" + elif "AAC" in audio_section: + codec = "AAC" + else: + codec = "Unknown" + + channels = audio_section.split("Channel(s)")[1].split(":")[1].strip().split(" ")[0] if "Channel(s)" in audio_section else "Unknown" + # Convert 6 channels to 5.1, otherwise leave as is + channels = "5.1" if channels == "6" else channels + language = disc.get('ifo_mi_full', '').split('Language')[1].split(":")[1].strip().split('\n')[0] if "Language" in disc.get('ifo_mi_full', '') else "Unknown" + audio_info = f"{language} {codec} {channels}" + unique_audio.add(audio_info) + + # Append audio information to the description + if unique_audio: + desc_text.append(f" Audio..............: {', '.join(sorted(unique_audio))}\n") + + # Subtitle extraction using the helper function + unique_subtitles = self.parse_subtitles(ifo_mi) + + # Append subtitle information to the description + if unique_subtitles: + desc_text.append(f" Subtitles..........: {', '.join(sorted(unique_subtitles))}\n") + + if meta['is_disc'] == 'BDMV': + video_info = meta.get('bdinfo', {}).get('video', []) + video_codec = video_info[0].get('codec', 'Unknown') + video_bitrate = video_info[0].get('bitrate', 'Unknown') + desc_text.append(f" Video Format.......: {video_codec} / {video_bitrate}\n") + else: + desc_text.append(f" DVD Format.........: {meta.get('source', 'Unknown')}\n") + desc_text.append(" Film Aspect Ratio..: [color=red]The actual aspect ratio of the content, not including the black bars[/color]\n") + if meta['is_disc'] == 'BDMV': + desc_text.append(f" Source.............: {meta.get('disctype', 'Unknown')}\n") + else: + desc_text.append(f" Source.............: {meta.get('dvd_size', 'Unknown')}\n") + desc_text.append(f" Film Distributor...: [url={meta.get('distributor_link', '')}]{meta.get('distributor', 'Unknown')}[url] [color=red]Don't forget the actual distributor link\n") + desc_text.append(f" Average Bitrate....: {total_bitrate}\n") + desc_text.append(" Ripping Program....: [color=red]Specify - if it's your rip or custom version, otherwise 'Not my rip'[/color]\n") + desc_text.append("\n") + if meta.get('untouched') is True: + desc_text.append(" Menus......: [X] Untouched\n") + desc_text.append(" Video......: [X] Untouched\n") + desc_text.append(" Extras.....: [X] Untouched\n") + desc_text.append(" Audio......: [X] Untouched\n") + else: + desc_text.append(" Menus......: [ ] Untouched\n") + desc_text.append(" [ ] Stripped\n") + desc_text.append(" Video......: [ ] Untouched\n") + desc_text.append(" [ ] Re-encoded\n") + desc_text.append(" Extras.....: [ ] Untouched\n") + desc_text.append(" [ ] Stripped\n") + desc_text.append(" [ ] Re-encoded\n") + desc_text.append(" [ ] None\n") + desc_text.append(" Audio......: [ ] Untouched\n") + desc_text.append(" [ ] Stripped tracks\n") + + desc_text.append("[/code]\n\n") + + # Extras + desc_text.append("[h4]Extras[/h4]\n") + desc_text.append("[*] Insert special feature 1 here\n") + desc_text.append("[*] Insert special feature 2 here\n") + desc_text.append("... (add more special features as needed)\n\n") + + # Uploader Comments + desc_text.append("[h4]Uploader Comments[/h4]\n") + desc_text.append(f" - {meta.get('uploader_comments', 'No comments.')}\n") + + # Convert the list to a single string for the description + description = ''.join(desc_text) + + # Ask user if they want to edit or keep the description + console.print(f"Current description: {description}", markup=False) + console.print("[cyan]Do you want to edit or keep the description?[/cyan]") + edit_choice = input("Enter 'e' to edit, or press Enter to keep it as is: ") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"Final description after editing: {description}", markup=False) + else: + console.print("[green]Keeping the original description.[/green]") + + # Write the final description to the file + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding="utf-8") as desc_file: + desc_file.write(description) + + def parse_subtitles(self, disc_mi): + unique_subtitles = set() # Store unique subtitle strings + lines = disc_mi.splitlines() # Split the multiline text into individual lines + current_block = None + + for line in lines: + # Detect the start of a subtitle block (Text #) + if line.startswith("Text #"): + current_block = "subtitle" + continue + + # Extract language information for subtitles + if current_block == "subtitle" and "Language" in line: + language = line.split(":")[1].strip() + unique_subtitles.add(language) + + return unique_subtitles + + def country_code_to_name(self, code): + country_mapping = { + 'AFG': 'Afghanistan', 'ALB': 'Albania', 'DZA': 'Algeria', 'AND': 'Andorra', 'AGO': 'Angola', + 'ARG': 'Argentina', 'ARM': 'Armenia', 'AUS': 'Australia', 'AUT': 'Austria', 'AZE': 'Azerbaijan', + 'BHS': 'Bahamas', 'BHR': 'Bahrain', 'BGD': 'Bangladesh', 'BRB': 'Barbados', 'BLR': 'Belarus', + 'BEL': 'Belgium', 'BLZ': 'Belize', 'BEN': 'Benin', 'BTN': 'Bhutan', 'BOL': 'Bolivia', + 'BIH': 'Bosnia and Herzegovina', 'BWA': 'Botswana', 'BRA': 'Brazil', 'BRN': 'Brunei', + 'BGR': 'Bulgaria', 'BFA': 'Burkina Faso', 'BDI': 'Burundi', 'CPV': 'Cabo Verde', 'KHM': 'Cambodia', + 'CMR': 'Cameroon', 'CAN': 'Canada', 'CAF': 'Central African Republic', 'TCD': 'Chad', 'CHL': 'Chile', + 'CHN': 'China', 'COL': 'Colombia', 'COM': 'Comoros', 'COG': 'Congo', 'CRI': 'Costa Rica', + 'HRV': 'Croatia', 'CUB': 'Cuba', 'CYP': 'Cyprus', 'CZE': 'Czech Republic', 'DNK': 'Denmark', + 'DJI': 'Djibouti', 'DMA': 'Dominica', 'DOM': 'Dominican Republic', 'ECU': 'Ecuador', 'EGY': 'Egypt', + 'SLV': 'El Salvador', 'GNQ': 'Equatorial Guinea', 'ERI': 'Eritrea', 'EST': 'Estonia', + 'SWZ': 'Eswatini', 'ETH': 'Ethiopia', 'FJI': 'Fiji', 'FIN': 'Finland', 'FRA': 'France', + 'GAB': 'Gabon', 'GMB': 'Gambia', 'GEO': 'Georgia', 'DEU': 'Germany', 'GHA': 'Ghana', + 'GRC': 'Greece', 'GRD': 'Grenada', 'GTM': 'Guatemala', 'GIN': 'Guinea', 'GNB': 'Guinea-Bissau', + 'GUY': 'Guyana', 'HTI': 'Haiti', 'HND': 'Honduras', 'HUN': 'Hungary', 'ISL': 'Iceland', 'IND': 'India', + 'IDN': 'Indonesia', 'IRN': 'Iran', 'IRQ': 'Iraq', 'IRL': 'Ireland', 'ISR': 'Israel', 'ITA': 'Italy', + 'JAM': 'Jamaica', 'JPN': 'Japan', 'JOR': 'Jordan', 'KAZ': 'Kazakhstan', 'KEN': 'Kenya', + 'KIR': 'Kiribati', 'KOR': 'Korea', 'KWT': 'Kuwait', 'KGZ': 'Kyrgyzstan', 'LAO': 'Laos', 'LVA': 'Latvia', + 'LBN': 'Lebanon', 'LSO': 'Lesotho', 'LBR': 'Liberia', 'LBY': 'Libya', 'LIE': 'Liechtenstein', + 'LTU': 'Lithuania', 'LUX': 'Luxembourg', 'MDG': 'Madagascar', 'MWI': 'Malawi', 'MYS': 'Malaysia', + 'MDV': 'Maldives', 'MLI': 'Mali', 'MLT': 'Malta', 'MHL': 'Marshall Islands', 'MRT': 'Mauritania', + 'MUS': 'Mauritius', 'MEX': 'Mexico', 'FSM': 'Micronesia', 'MDA': 'Moldova', 'MCO': 'Monaco', + 'MNG': 'Mongolia', 'MNE': 'Montenegro', 'MAR': 'Morocco', 'MOZ': 'Mozambique', 'MMR': 'Myanmar', + 'NAM': 'Namibia', 'NRU': 'Nauru', 'NPL': 'Nepal', 'NLD': 'Netherlands', 'NZL': 'New Zealand', + 'NIC': 'Nicaragua', 'NER': 'Niger', 'NGA': 'Nigeria', 'MKD': 'North Macedonia', 'NOR': 'Norway', + 'OMN': 'Oman', 'PAK': 'Pakistan', 'PLW': 'Palau', 'PAN': 'Panama', 'PNG': 'Papua New Guinea', + 'PRY': 'Paraguay', 'PER': 'Peru', 'PHL': 'Philippines', 'POL': 'Poland', 'PRT': 'Portugal', + 'QAT': 'Qatar', 'ROU': 'Romania', 'RUS': 'Russia', 'RWA': 'Rwanda', 'KNA': 'Saint Kitts and Nevis', + 'LCA': 'Saint Lucia', 'VCT': 'Saint Vincent and the Grenadines', 'WSM': 'Samoa', 'SMR': 'San Marino', + 'STP': 'Sao Tome and Principe', 'SAU': 'Saudi Arabia', 'SEN': 'Senegal', 'SRB': 'Serbia', + 'SYC': 'Seychelles', 'SLE': 'Sierra Leone', 'SGP': 'Singapore', 'SVK': 'Slovakia', 'SVN': 'Slovenia', + 'SLB': 'Solomon Islands', 'SOM': 'Somalia', 'ZAF': 'South Africa', 'SSD': 'South Sudan', + 'ESP': 'Spain', 'LKA': 'Sri Lanka', 'SDN': 'Sudan', 'SUR': 'Suriname', 'SWE': 'Sweden', + 'CHE': 'Switzerland', 'SYR': 'Syria', 'TWN': 'Taiwan', 'TJK': 'Tajikistan', 'TZA': 'Tanzania', + 'THA': 'Thailand', 'TLS': 'Timor-Leste', 'TGO': 'Togo', 'TON': 'Tonga', 'TTO': 'Trinidad and Tobago', + 'TUN': 'Tunisia', 'TUR': 'Turkey', 'TKM': 'Turkmenistan', 'TUV': 'Tuvalu', 'UGA': 'Uganda', + 'UKR': 'Ukraine', 'ARE': 'United Arab Emirates', 'GBR': 'United Kingdom', 'USA': 'United States', + 'URY': 'Uruguay', 'UZB': 'Uzbekistan', 'VUT': 'Vanuatu', 'VEN': 'Venezuela', 'VNM': 'Vietnam', + 'YEM': 'Yemen', 'ZMB': 'Zambia', 'ZWE': 'Zimbabwe' + } + return country_mapping.get(code.upper(), 'Unknown Country') + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on TIK...") + disctype = meta.get('disctype', None) + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('foreign'), meta.get('opera'), meta.get('asian')), + 'types[]': await self.get_type_id(disctype), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 9b98f602f..e662f2913 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -35,13 +35,13 @@ def __init__(self, config): self.upload_url = 'https://www.torrentleech.org/torrents/upload/apiupload' self.signature = None self.banned_groups = [""] - + self.announce_key = self.config['TRACKERS'][self.tracker]['announce_key'] self.config['TRACKERS'][self.tracker]['announce_url'] = f"https://tracker.torrentleech.org/a/{self.announce_key}/announce" pass - + async def get_cat_id(self, common, meta): - if meta.get('anime', 0): + if meta.get('anime', 0): return self.CATEGORIES['Anime'] if meta['category'] == 'MOVIE': @@ -64,7 +64,7 @@ async def get_cat_id(self, common, meta): elif meta['type'] == 'HDTV': return self.CATEGORIES['MovieHdRip'] elif meta['category'] == 'TV': - if meta['original_language'] != 'en': + if meta['original_language'] != 'en': return self.CATEGORIES['TvForeign'] elif meta.get('tv_pack', 0): return self.CATEGORIES['TvBoxsets'] @@ -75,20 +75,20 @@ async def get_cat_id(self, common, meta): raise NotImplementedError('Failed to determine TL category!') - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(common, meta) - await common.unit3d_edit_desc(meta, self.tracker, self.signature) + # await common.unit3d_edit_desc(meta, self.tracker, self.signature) - open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+') - - info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] != None else 'MEDIAINFO_CLEANPATH' + open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+', encoding='utf-8') + + info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] is not None else 'MEDIAINFO_CLEANPATH' open_info = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", 'r', encoding='utf-8') open_desc.write('\n\n') open_desc.write(open_info.read()) open_info.close() - + open_desc.seek(0) open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = { @@ -96,19 +96,19 @@ async def upload(self, meta): 'torrent': (self.get_name(meta) + '.torrent', open_torrent) } data = { - 'announcekey' : self.announce_key, - 'category' : cat_id + 'announcekey': self.announce_key, + 'category': cat_id } headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) if not response.text.isnumeric(): console.print(f'[red]{response.text}') else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() open_desc.close() diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index 491a3bacc..9337e8a83 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -4,15 +4,12 @@ import asyncio import re import os -from pathlib import Path -import traceback -import json -import distutils.util import cli_ui +from str2bool import str2bool from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa #F405 from src.console import console @@ -28,11 +25,10 @@ def __init__(self, config): self.passan = str(config['TRACKERS']['TTG'].get('login_answer', '')).strip() self.uid = str(config['TRACKERS']['TTG'].get('user_id', '')).strip() self.passkey = str(config['TRACKERS']['TTG'].get('announce_url', '')).strip().split('/')[-1] - + self.signature = None self.banned_groups = [""] - async def edit_name(self, meta): ttg_name = meta['name'] @@ -48,46 +44,45 @@ async def get_type_id(self, meta): if meta['category'] == "MOVIE": # 51 = DVDRip if meta['resolution'].startswith("720"): - type_id = 52 # 720p + type_id = 52 # 720p if meta['resolution'].startswith("1080"): - type_id = 53 # 1080p/i + type_id = 53 # 1080p/i if meta['is_disc'] == "BDMV": - type_id = 54 # Blu-ray disc - + type_id = 54 # Blu-ray disc + elif meta['category'] == "TV": if meta.get('tv_pack', 0) != 1: # TV Singles if meta['resolution'].startswith("720"): - type_id = 69 # 720p TV EU/US + type_id = 69 # 720p TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 76 # Chinese + type_id = 76 # Chinese if meta['resolution'].startswith("1080"): - type_id = 70 # 1080 TV EU/US + type_id = 70 # 1080 TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 75 # Chinese + type_id = 75 # Chinese if lang in ('KR', 'KO'): - type_id = 75 # Korean + type_id = 75 # Korean if lang in ('JA', 'JP'): - type_id = 73 # Japanese + type_id = 73 # Japanese else: # TV Packs - type_id = 87 # EN/US + type_id = 87 # EN/US if lang in ('KR', 'KO'): - type_id = 99 # Korean + type_id = 99 # Korean if lang in ('JA', 'JP'): - type_id = 88 # Japanese + type_id = 88 # Japanese if lang in ('ZH', 'CN', 'CMN'): - type_id = 90 # Chinese - - + type_id = 90 # Chinese + if "documentary" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'documentary' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta['resolution'].startswith("720"): - type_id = 62 # 720p + type_id = 62 # 720p if meta['resolution'].startswith("1080"): - type_id = 63 # 1080 + type_id = 63 # 1080 if meta.get('is_disc', '') == 'BDMV': - type_id = 64 # BDMV - + type_id = 64 # BDMV + if "animation" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'animation' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta.get('sd', 1) == 0: type_id = 58 @@ -104,38 +99,34 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' return anon - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await self.edit_desc(meta) ttg_name = await self.edit_name(meta) # FORM - # type = category dropdown - # name = name - # descr = description - # anonymity = "yes" / "no" - # nodistr = "yes" / "no" (exclusive?) not required - # imdb_c = tt123456 - # + # type = category dropdown + # name = name + # descr = description + # anonymity = "yes" / "no" + # nodistr = "yes" / "no" (exclusive?) not required + # imdb_c = tt123456 + # # POST > upload/upload - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') - ttg_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + ttg_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: @@ -143,21 +134,20 @@ async def upload(self, meta): else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), - 'nfo' : ("torrent.nfo", mi_dump) + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'nfo': ("torrent.nfo", mi_dump) } data = { - 'MAX_FILE_SIZE' : '4000000', - 'team' : '', - 'hr' : 'no', - 'name' : ttg_name, - 'type' : await self.get_type_id(meta), - 'descr' : ttg_desc.rstrip(), - + 'MAX_FILE_SIZE': '4000000', + 'team': '', + 'hr': 'no', + 'name': ttg_name, + 'type': await self.get_type_id(meta), + 'descr': ttg_desc.rstrip(), + + 'anonymity': await self.get_anon(meta['anon']), + 'nodistr': 'no', - 'anonymity' : await self.get_anon(meta['anon']), - 'nodistr' : 'no', - } url = "https://totheglory.im/takeupload.php" if int(meta['imdb_id'].replace('tt', '')) != 0: @@ -175,7 +165,7 @@ async def upload(self, meta): up = session.post(url=url, data=data, files=files) torrentFile.close() mi_dump.close() - + if up.url.startswith("https://totheglory.im/details.php?id="): console.print(f"[green]Uploaded to: [yellow]{up.url}[/yellow][/green]") id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) @@ -184,17 +174,16 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] with requests.Session() as session: cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) - + if int(meta['imdb_id'].replace('tt', '')) != 0: imdb = f"imdb{meta['imdb_id'].replace('tt', '')}" else: @@ -218,18 +207,15 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -238,7 +224,7 @@ async def validate_credentials(self, meta): else: return False return True - + async def validate_cookies(self, meta, cookiefile): url = "https://totheglory.im" if os.path.exists(cookiefile): @@ -259,7 +245,7 @@ async def validate_cookies(self, meta, cookiefile): async def login(self, cookiefile): url = "https://totheglory.im/takelogin.php" - data={ + data = { 'username': self.username, 'password': self.password, 'passid': self.passid, @@ -270,11 +256,11 @@ async def login(self, cookiefile): await asyncio.sleep(0.5) if response.url.endswith('2fa.php'): soup = BeautifulSoup(response.text, 'html.parser') - auth_token = soup.find('input', {'name' : 'authenticity_token'}).get('value') + auth_token = soup.find('input', {'name': 'authenticity_token'}).get('value') two_factor_data = { - 'otp' : console.input('[yellow]TTG 2FA Code: '), - 'authenticity_token' : auth_token, - 'uid' : self.uid + 'otp': console.input('[yellow]TTG 2FA Code: '), + 'authenticity_token': auth_token, + 'uid': self.uid } two_factor_url = "https://totheglory.im/take2fa.php" response = session.post(two_factor_url, data=two_factor_data) @@ -290,21 +276,19 @@ async def login(self, cookiefile): console.print(response.url) return - - async def edit_desc(self, meta): - base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() - with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf-8').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', encoding='utf-8') as descfile: from src.bbcode import BBCODE from src.trackers.COMMON import COMMON common = COMMON(config=self.config) if int(meta.get('imdb_id', '0').replace('tt', '')) != 0: ptgen = await common.ptgen(meta) if ptgen.strip() != '': - descfile.write(ptgen) + descfile.write(ptgen) # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][b][color=#ff00ff][size=3]{meta['service_longname']}ēš„ę— ęŸREMUXē‰‡ęŗļ¼Œę²”ęœ‰č½¬ē /This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -327,17 +311,17 @@ async def edit_desc(self, meta): desc = bbcode.convert_spoiler_to_hide(desc) desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = desc.replace('[img]', '[img]') - desc = re.sub("(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) + desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) images = meta['image_list'] - if len(images) > 0: + if len(images) > 0: descfile.write("[center]") for each in range(len(images[:int(meta['screens'])])): web_url = images[each]['web_url'] img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() @@ -350,4 +334,4 @@ async def download_new_torrent(self, id, torrent_path): tor.write(r.content) else: console.print("[red]There was an issue downloading the new .torrent from TTG") - console.print(r.text) \ No newline at end of file + console.print(r.text) diff --git a/src/trackers/TVC.py b/src/trackers/TVC.py new file mode 100644 index 000000000..7de6e800f --- /dev/null +++ b/src/trackers/TVC.py @@ -0,0 +1,435 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import traceback +import cli_ui +import os +from src.bbcode import BBCODE +import json + +from src.trackers.COMMON import COMMON +from src.console import console + + +class TVC(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'TVC' + self.source_flag = 'TVCHAOS' + self.upload_url = 'https://tvchaosuk.com/api/torrents/upload' + self.search_url = 'https://tvchaosuk.com/api/torrents/filter' + self.signature = "" + self.banned_groups = [''] + self.images = { + "imdb_75": 'https://i.imgur.com/Mux5ObG.png', + "tmdb_75": 'https://i.imgur.com/r3QzUbk.png', + "tvdb_75": 'https://i.imgur.com/UWtUme4.png', + "tvmaze_75": 'https://i.imgur.com/ZHEF5nE.png', + "mal_75": 'https://i.imgur.com/PBfdP3M.png' + } + + pass + + async def get_cat_id(self, genres): + # Note sections are based on Genre not type, source, resolution etc.. + self.tv_types = ["comedy", "documentary", "drama", "entertainment", "factual", "foreign", "kids", "movies", "News", "radio", "reality", "soaps", "sci-fi", "sport", "holding bin"] + self.tv_types_ids = ["29", "5", "11", "14", "19", "42", "32", "44", "45", "51", "52", "30", "33", "42", "53"] + + genres = genres.split(', ') + if len(genres) >= 1: + for i in genres: + g = i.lower().replace(',', '') + for s in self.tv_types: + if s.__contains__(g): + return self.tv_types_ids[self.tv_types.index(s)] + + # returning 14 as that is holding bin/misc + return self.tv_types_ids[14] + + async def get_res_id(self, tv_pack, resolution): + if tv_pack: + resolution_id = { + '1080p': 'HD1080p Pack', + '1080i': 'HD1080p Pack', + '720p': 'HD720p Pack', + '576p': 'SD Pack', + '576i': 'SD Pack', + '540p': 'SD Pack', + '540i': 'SD Pack', + '480p': 'SD Pack', + '480i': 'SD Pack' + }.get(resolution, 'SD') + else: + resolution_id = { + '1080p': 'HD1080p', + '1080i': 'HD1080p', + '720p': 'HD720p', + '576p': 'SD', + '576i': 'SD', + '540p': 'SD', + '540': 'SD', + '480p': 'SD', + '480i': 'SD' + }.get(resolution, 'SD') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await self.get_tmdb_data(meta) + if meta['category'] == 'TV': + cat_id = await self.get_cat_id(meta['genres']) + else: + cat_id = 44 + # type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['tv_pack'] if 'tv_pack' in meta else 0, meta['resolution']) + await self.unit3d_edit_desc(meta, self.tracker, self.signature) + + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + + if meta['type'] == "ENCODE" and (str(meta['path']).lower().__contains__("bluray") or str(meta['path']).lower().__contains__("brrip") or str(meta['path']).lower().__contains__("bdrip")): + type = "BRRip" + else: + type = meta['type'].replace('WEBDL', 'WEB-DL') + + # Naming as per TVC rules. Site has unusual naming conventions. + if meta['category'] == "MOVIE": + tvc_name = f"{meta['title']} ({meta['year']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]" + else: + if meta['search_year'] != "": + year = meta['year'] + else: + year = "" + if meta.get('no_season', False) is True: + season = '' + if meta.get('no_year', False) is True: + year = '' + + if meta['category'] == "TV": + if meta['tv_pack']: + # seasons called series here. + tvc_name = f"{meta['title']} ({meta['year'] if 'season_air_first_date' and len(meta['season_air_first_date']) >= 4 else meta['season_air_first_date'][:4]}) Series {meta['season_int']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + else: + if 'episode_airdate' in meta: + tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} ({meta['episode_airdate']}) [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + else: + tvc_name = f"{meta['title']} ({year}) {meta['season']}{meta['episode']} [{meta['resolution']} {type} {str(meta['video'][-3:]).upper()}]".replace(" ", " ").replace(' () ', ' ') + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MediaInfo.json", 'r', encoding='utf-8') as f: + mi = json.load(f) + + if not meta['is_disc']: + self.get_subs_info(meta, mi) + + if 'eng_subs' in meta and meta['eng_subs']: + tvc_name = tvc_name.replace(']', ' SUBS]') + if 'sdh_subs' in meta and meta['eng_subs']: + if 'eng_subs' in meta and meta['eng_subs']: + tvc_name = tvc_name.replace(' SUBS]', ' (ENG + SDH SUBS)]') + else: + tvc_name = tvc_name.replace(']', ' (SDH SUBS)]') + + if 'origin_country_code' in meta: + if "IE" in meta['origin_country_code']: + tvc_name += " [IRL]" + elif "AU" in meta['origin_country_code']: + tvc_name += " [AUS]" + elif "NZ" in meta['origin_country_code']: + tvc_name += " [NZ]" + elif "CA" in meta['origin_country_code']: + tvc_name += " [CA]" + + if meta.get('unattended', False) is False: + upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) + + if not upload_to_tvc: + tvc_name = cli_ui.ask_string("Please enter New Name:") + upload_to_tvc = cli_ui.ask_yes_no(f"Upload to {self.tracker} with the name {tvc_name}?", default=False) + + data = { + 'name': tvc_name, + # newline does not seem to work on this site for some reason. if you edit and save it again they will but not if pushed by api + 'description': desc.replace('\n', '
').replace('\r', '
'), + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type': resolution_id, + # 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + if 'upload_to_tvc' in locals() and upload_to_tvc is False: + return + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + # some reason this does not return json instead it returns something like below. + # b'application/x-bittorrent\n{"success":true,"data":"https:\\/\\/tvchaosuk.com\\/torrent\\/download\\/164633.REDACTED","message":"Torrent uploaded successfully."}' + # so you need to convert text to json. + json_data = json.loads(response.text.strip('application/x-bittorrent\n')) + console.print(json_data) + + # adding torrent link to torrent as comment + t_id = json_data['data'].split(".")[1].split("/")[3] + await common.add_tracker_torrent(meta, self.tracker, self.source_flag, + self.config['TRACKERS'][self.tracker].get('announce_url'), + "https://tvchaosuk.com/torrents/" + t_id) + + except Exception: + console.print(traceback.print_exc()) + console.print("[yellow]It may have uploaded, go check") + console.print(response.text.strip('application/x-bittorrent\n')) + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def get_tmdb_data(self, meta): + import tmdbsimple as tmdb + if meta['category'] == "MOVIE": + movie = tmdb.Movies(meta['tmdb']) + response = movie.info() + else: + tv = tmdb.TV(meta['tmdb']) + response = tv.info() + + # TVC stuff + if meta['category'] == "TV": + if hasattr(tv, 'release_dates'): + meta['release_dates'] = tv.release_dates() + + if hasattr(tv, 'networks') and len(tv.networks) != 0 and 'name' in tv.networks[0]: + meta['networks'] = tv.networks[0]['name'] + + try: + if 'tv_pack' in meta and not meta['tv_pack']: + episode_info = tmdb.TV_Episodes(meta['tmdb'], meta['season_int'], meta['episode_int']).info() + + meta['episode_airdate'] = episode_info['air_date'] + meta['episode_name'] = episode_info['name'] + meta['episode_overview'] = episode_info['overview'] + if 'tv_pack' in meta and meta['tv_pack']: + season_info = tmdb.TV_Seasons(meta['tmdb'], meta['season_int']).info() + meta['season_air_first_date'] = season_info['air_date'] + + if hasattr(tv, 'first_air_date'): + meta['first_air_date'] = tv.first_air_date + except Exception: + console.print(traceback.print_exc()) + console.print(f"Unable to get episode information, Make sure episode {meta['season']}{meta['episode']} exists in TMDB. \nhttps://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}/season/{meta['season_int']}") + meta['season_air_first_date'] = str({meta["year"]}) + "-N/A-N/A" + meta['first_air_date'] = str({meta["year"]}) + "-N/A-N/A" + + meta['origin_country_code'] = [] + if 'origin_country' in response: + if isinstance(response['origin_country'], list): + for i in response['origin_country']: + meta['origin_country_code'].append(i) + else: + meta['origin_country_code'].append(response['origin_country']) + print(type(response['origin_country'])) + + elif len(response['production_countries']): + for i in response['production_countries']: + if 'iso_3166_1' in i: + meta['origin_country_code'].append(i['iso_3166_1']) + elif len(response['production_companies']): + meta['origin_country_code'].append(response['production_companies'][0]['origin_country']) + + async def search_existing(self, meta, disctype): + # Search on TVCUK has been DISABLED due to issues + # leaving code here for future use when it is re-enabled + console.print("[red]Cannot search for dupes as search api is not working...") + console.print("[red]Please make sure you are not uploading duplicates.") + # https://tvchaosuk.com/api/torrents/filter?api_token=&tmdb=138108 + + dupes = [] + console.print("[yellow]Searching for existing torrents on TVC...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'name': "" + } + + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + if "message" in response and response["message"] == "No Torrents Found": + return + else: + for each in response['data']: + result = [each][0]['attributes']['name'] + dupes.append(result) + except Exception: + console.print(response) + console.print(self.search_url, params) + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False): + base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w') as descfile: + bbcode = BBCODE() + if meta.get('discs', []) != []: + discs = meta['discs'] + if discs[0]['type'] == "DVD": + descfile.write(f"[spoiler=VOB MediaInfo][code]{discs[0]['vob_mi']}[/code][/spoiler]\n") + descfile.write("\n") + if len(discs) >= 2: + for each in discs[1:]: + if each['type'] == "BDMV": + descfile.write(f"[spoiler={each.get('name', 'BDINFO')}][code]{each['summary']}[/code][/spoiler]\n") + descfile.write("\n") + if each['type'] == "DVD": + descfile.write(f"{each['name']}:\n") + descfile.write(f"[spoiler={os.path.basename(each['vob'])}][code][{each['vob_mi']}[/code][/spoiler] [spoiler={os.path.basename(each['ifo'])}][code][{each['ifo_mi']}[/code][/spoiler]\n") + descfile.write("\n") + desc = "" + + # release info + rd_info = "" + # getting movie release info + if meta['category'] != "TV" and 'release_dates' in meta: + for cc in meta['release_dates']['results']: + for rd in cc['release_dates']: + if rd['type'] == 6: + channel = str(rd['note']) if str(rd['note']) != "" else "N/A Channel" + rd_info += "[color=orange][size=15]" + cc['iso_3166_1'] + " TV Release info [/size][/color]" + "\n" + str(rd['release_date'])[:10] + " on " + channel + "\n" + # movie release info adding + if rd_info != "": + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += rd_info + "\n\n" + # getting season release info. need to fix so it gets season info instead of first episode info. + elif meta['category'] == "TV" and meta['tv_pack'] == 1 and 'first_air_date' in meta: + channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += f"[color=orange][size=15]First episode of this season aired {meta['season_air_first_date']} on channel {channel}[/size][/color]" + "\n\n" + elif meta['category'] == "TV" and meta['tv_pack'] != 1 and 'episode_airdate' in meta: + channel = meta['networks'] if 'networks' in meta and meta['networks'] != "" else "N/A" + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += f"[color=orange][size=15]Episode aired on channel {channel} on {meta['episode_airdate']}[/size][/color]" + "\n\n" + else: + desc += "[color=green][size=25]Release Info[/size][/color]" + "\n\n" + desc += "[color=orange][size=15]TMDB has No TV release info for this[/size][/color]" + "\n\n" + + if meta['category'] == 'TV' and meta['tv_pack'] != 1 and 'episode_overview' in meta: + desc += "[color=green][size=25]PLOT[/size][/color]" + "\n\n" + "[color=green][size=25]PLOT[/size][/color]\n" + "Episode Name: " + str(meta['episode_name']) + "\n" + str(meta['episode_overview'] + "\n\n") + else: + desc += "[color=green][size=25]PLOT[/size][/color]" + "\n" + str(meta['overview'] + "\n\n") + # Max two screenshots as per rules + if len(base) > 2 and meta['description'] != "PTP": + desc += "[color=green][size=25]Notes/Extra Info[/size][/color]" + " \n \n" + str(base) + " \n \n " + desc += self.get_links(meta, "[color=green][size=25]", "[/size][/COLOR]") + desc = bbcode.convert_pre_to_code(desc) + desc = bbcode.convert_hide_to_spoiler(desc) + if comparison is False: + desc = bbcode.convert_comparison_to_collapse(desc, 1000) + descfile.write(desc) + images = meta['image_list'] + # only adding 2 screens as that is mentioned in rules. + if len(images) > 0 and int(meta['screens']) >= 2: + descfile.write("[color=green][size=25]Screenshots[/size][/color]\n\n[center]") + for each in range(len(images[:2])): + web_url = images[each]['web_url'] + img_url = images[each]['img_url'] + descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") + descfile.write("[/center]") + + if signature is not None: + descfile.write(signature) + descfile.close() + return + + def get_links(self, movie, subheading, heading_end): + description = "" + description += "\n\n" + subheading + "Links" + heading_end + "\n" + if movie['imdb_id'] != "0": + description += f"[URL=https://www.imdb.com/title/tt{movie['imdb_id']}][img]{self.images['imdb_75']}[/img][/URL]" + if movie['tmdb'] != "0": + description += f" [URL=https://www.themoviedb.org/{str(movie['category'].lower())}/{str(movie['tmdb'])}][img]{self.images['tmdb_75']}[/img][/URL]" + if movie['tvdb_id'] != 0: + description += f" [URL=https://www.thetvdb.com/?id={str(movie['tvdb_id'])}&tab=series][img]{self.images['tvdb_75']}[/img][/URL]" + if movie['tvmaze_id'] != 0: + description += f" [URL=https://www.tvmaze.com/shows/{str(movie['tvmaze_id'])}][img]{self.images['tvmaze_75']}[/img][/URL]" + if movie['mal_id'] != 0: + description += f" [URL=https://myanimelist.net/anime/{str(movie['mal_id'])}][img]{self.images['mal_75']}[/img][/URL]" + return description + " \n \n " + + # get subs function + # used in naming conventions + def get_subs_info(self, meta, mi): + subs = "" + subs_num = 0 + for s in mi.get("media").get("track"): + if s["@type"] == "Text": + subs_num = subs_num + 1 + if subs_num >= 1: + meta['has_subs'] = 1 + else: + meta['has_subs'] = 0 + for s in mi.get("media").get("track"): + if s["@type"] == "Text": + if "Language_String" in s: + if not subs_num <= 0: + subs = subs + s["Language_String"] + ", " + # checking if it has romanian subs as for data scene. + if s["Language_String"] == "Romanian": + # console.print("it has romanian subs", 'grey', 'on_green') + meta['ro_sub'] = 1 + if str(s["Language_String"]).lower().__contains__("english"): + meta['eng_subs'] = 1 + if str(s).lower().__contains__("sdh"): + meta['sdh_subs'] = 1 + + return + # get subs function^^^^ diff --git a/src/trackers/ULCX.py b/src/trackers/ULCX.py new file mode 100644 index 000000000..440132904 --- /dev/null +++ b/src/trackers/ULCX.py @@ -0,0 +1,224 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +import platform +from str2bool import str2bool +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class ULCX(): + + def __init__(self, config): + self.config = config + self.tracker = 'ULCX' + self.source_flag = 'ULCX' + self.upload_url = 'https://upload.cx/api/torrents/upload' + self.search_url = 'https://upload.cx/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = ['Tigole', 'x0r', 'Judas', 'SPDVD', 'MeGusta', 'YIFY', 'SWTYBLZ', 'TAoE', 'TSP', 'TSPxL', 'LAMA', '4K4U', + 'ION10', 'Will1869', 'TGx', 'Sicario', 'QxR', 'Hi10', 'EMBER', 'FGT', 'AROMA', 'd3g', 'nikt0', 'Grym', + 'RARBG', 'iVy', 'FnP', 'EDGE2020', 'NuBz', 'NAHOM', 'Ralphy'] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution, type): + if type not in ['DISC']: + if resolution not in ['8640p', '4320p', '2160p', '1440p', '1080p', '1080i', '720p']: + return None + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution'], meta['type']) + if resolution_id is None: + console.print("Resolution is below 720p; skipping.") + return + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + if 'concert' in meta['keywords']: + console.print('[bold red]Concerts not allowed.') + meta['skipping'] = "ULCX" + return + dupes = [] + console.print("[yellow]Searching for existing torrents on ULCX...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution'], meta['type']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 405e2c9f1..d4a530e2a 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,9 +2,9 @@ # import discord import asyncio import requests -import distutils.util -import os import platform +from str2bool import str2bool +import bencodepy from src.trackers.COMMON import COMMON from src.console import console @@ -20,7 +20,7 @@ class UNIT3D_TEMPLATE(): """ ############################################################### - ######## EDIT ME ######## + ######## EDIT ME ######## noqa E266 ############################################################### # ALSO EDIT CLASS NAME ABOVE @@ -31,49 +31,49 @@ def __init__(self, config): self.source_flag = 'Source flag for .torrent' self.upload_url = 'https://domain.tld/api/torrents/upload' self.search_url = 'https://domain.tld/api/torrents/filter' - self.signature = None + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '0') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') return category_id async def get_type_id(self, type): type_id = { - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 ############################################################### - async def upload(self, meta): + async def upload(self, meta, disctype): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) cat_id = await self.get_cat_id(meta['category']) @@ -82,49 +82,49 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() bd_dump = None - desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -133,38 +133,34 @@ async def upload(self, meta): data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') headers = { - 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - - async def search_existing(self, meta): + async def search_existing(self, meta, disctype): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +172,47 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes \ No newline at end of file + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py new file mode 100644 index 000000000..375fc2170 --- /dev/null +++ b/src/trackers/UTP.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class UTP(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + def __init__(self, config): + self.config = config + self.tracker = 'UTP' + self.source_flag = 'UTOPIA' + self.search_url = 'https://utp.to/api/torrents/filter' + self.torrent_url = 'https://utp.to/api/torrents/' + self.upload_url = 'https://utp.to/api/torrents/upload' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = [] + pass + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + cat_id = await self.get_cat_id(meta['category'], meta.get('edition', '')) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def get_cat_id(self, category_name, edition): + category_id = { + 'MOVIE': '1', + 'TV': '2', + 'FANRES': '3' + }.get(category_name, '0') + if category_name == 'MOVIE' and 'FANRES' in edition: + category_id = '3' + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '4320p': '1', + '2160p': '2', + '1080p': '3', + '1080i': '4' + }.get(resolution, '1') + return resolution_id + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on UTP...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta['category'] == 'TV': + params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackers/YOINK.py b/src/trackers/YOINK.py new file mode 100644 index 000000000..a6d2cc1d3 --- /dev/null +++ b/src/trackers/YOINK.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# import discord +import asyncio +import requests +from str2bool import str2bool +import platform +import bencodepy +import os +import glob + +from src.trackers.COMMON import COMMON +from src.console import console + + +class YOINK(): + """ + Edit for Tracker: + Edit BASE.torrent with announce and source + Check for duplicates + Set type/category IDs + Upload + """ + + def __init__(self, config): + self.config = config + self.tracker = 'YOINK' + self.source_flag = 'YOINK' + self.upload_url = 'https://yoinked.org/api/torrents/upload' + self.search_url = 'https://yoinked.org/api/torrents/filter' + self.signature = "\n[center][url=https://github.com/Audionut/Upload-Assistant]Created by Audionut's Upload Assistant[/url][/center]" + self.banned_groups = ["YTS,YiFY,LAMA,MeGUSTA,NAHOM,GalaxyRG,RARBG"] + pass + + async def get_cat_id(self, category_name): + category_id = { + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '0') + return category_id + + async def get_type_id(self, type): + type_id = { + 'DISC': '1', + 'REMUX': '2', + 'WEBDL': '4', + 'WEBRIP': '5', + 'HDTV': '6', + 'ENCODE': '3' + }.get(type, '0') + return type_id + + async def get_res_id(self, resolution): + resolution_id = { + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', + '1080p': '3', + '1080i': '4', + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') + return resolution_id + + async def upload(self, meta, disctype): + common = COMMON(config=self.config) + await common.edit_torrent(meta, self.tracker, self.source_flag) + cat_id = await self.get_cat_id(meta['category']) + type_id = await self.get_type_id(meta['type']) + resolution_id = await self.get_res_id(meta['resolution']) + await common.unit3d_edit_desc(meta, self.tracker, self.signature, comparison=True) + region_id = await common.unit3d_region_ids(meta.get('region')) + distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: + anon = 0 + else: + anon = 1 + + if meta['bdinfo'] is not None: + mi_dump = None + bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() + else: + mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() + bd_dump = None + desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', encoding='utf-8').read() + open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') + files = {'torrent': open_torrent} + base_dir = meta['base_dir'] + uuid = meta['uuid'] + specified_dir_path = os.path.join(base_dir, "tmp", uuid, "*.nfo") + nfo_files = glob.glob(specified_dir_path) + nfo_file = None + if nfo_files: + nfo_file = open(nfo_files[0], 'rb') + if nfo_file: + files['nfo'] = ("nfo_file.nfo", nfo_file, "text/plain") + data = { + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, + } + # Internal + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: + if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): + data['internal'] = 1 + + if region_id != 0: + data['region_id'] = region_id + if distributor_id != 0: + data['distributor_id'] = distributor_id + if meta.get('category') == "TV": + data['season_number'] = meta.get('season_int', '0') + data['episode_number'] = meta.get('episode_int', '0') + headers = { + 'User-Agent': f'Upload Assistant/2.2 ({platform.system()} {platform.release()})' + } + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() + } + + if meta['debug'] is False: + response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) + try: + console.print(response.json()) + except Exception: + console.print("It may have uploaded, go check") + return + else: + console.print("[cyan]Request Data:") + console.print(data) + open_torrent.close() + + async def search_existing(self, meta, disctype): + dupes = [] + console.print("[yellow]Searching for existing torrents on YOINK...") + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" + } + if meta.get('edition', "") != "": + params['name'] = params['name'] + f" {meta['edition']}" + try: + response = requests.get(url=self.search_url, params=params) + response = response.json() + for each in response['data']: + result = [each][0]['attributes']['name'] + # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() + # if difference >= 0.05: + dupes.append(result) + except Exception: + console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') + await asyncio.sleep(5) + + return dupes + + async def search_torrent_page(self, meta, disctype): + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + Name = meta['name'] + quoted_name = f'"{Name}"' + + params = { + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'name': quoted_name + } + + try: + response = requests.get(url=self.search_url, params=params) + response.raise_for_status() + response_data = response.json() + + if response_data['data'] and isinstance(response_data['data'], list): + details_link = response_data['data'][0]['attributes'].get('details_link') + + if details_link: + with open(torrent_file_path, 'rb') as open_torrent: + torrent_data = open_torrent.read() + + torrent = bencodepy.decode(torrent_data) + torrent[b'comment'] = details_link.encode('utf-8') + updated_torrent_data = bencodepy.encode(torrent) + + with open(torrent_file_path, 'wb') as updated_torrent_file: + updated_torrent_file.write(updated_torrent_data) + + return details_link + else: + return None + else: + return None + + except requests.exceptions.RequestException as e: + print(f"An error occurred during the request: {e}") + return None diff --git a/src/trackersetup.py b/src/trackersetup.py new file mode 100644 index 000000000..1f4787993 --- /dev/null +++ b/src/trackersetup.py @@ -0,0 +1,119 @@ +from src.trackers.HUNO import HUNO +from src.trackers.BLU import BLU +from src.trackers.BHD import BHD +from src.trackers.AITHER import AITHER +from src.trackers.STC import STC +from src.trackers.R4E import R4E +from src.trackers.THR import THR +from src.trackers.STT import STT +from src.trackers.HP import HP +from src.trackers.PTP import PTP +from src.trackers.SN import SN +from src.trackers.ACM import ACM +from src.trackers.HDB import HDB +from src.trackers.LCD import LCD +from src.trackers.TTG import TTG +from src.trackers.LST import LST +from src.trackers.FL import FL +from src.trackers.LT import LT +from src.trackers.NBL import NBL +from src.trackers.ANT import ANT +from src.trackers.PTER import PTER +from src.trackers.MTV import MTV +from src.trackers.JPTV import JPTV +from src.trackers.TL import TL +from src.trackers.HDT import HDT +from src.trackers.RF import RF +from src.trackers.OE import OE +from src.trackers.BHDTV import BHDTV +from src.trackers.RTF import RTF +from src.trackers.OTW import OTW +from src.trackers.FNP import FNP +from src.trackers.CBR import CBR +from src.trackers.UTP import UTP +from src.trackers.AL import AL +from src.trackers.SHRI import SHRI +from src.trackers.TIK import TIK +from src.trackers.TVC import TVC +from src.trackers.PSS import PSS +from src.trackers.ULCX import ULCX +from src.trackers.SPD import SPD +from src.trackers.YOINK import YOINK +from src.trackers.HHD import HHD +import cli_ui +from src.console import console + + +class TRACKER_SETUP: + def __init__(self, config): + self.config = config + # Add initialization details here + pass + + def trackers_enabled(self, meta): + from data.config import config + if meta.get('trackers', None) is not None: + trackers = meta['trackers'] + else: + trackers = config['TRACKERS']['default_trackers'] + if "," in trackers: + trackers = trackers.split(',') + + if isinstance(trackers, str): + trackers = trackers.split(',') + trackers = [s.strip().upper() for s in trackers] + if meta.get('manual', False): + trackers.insert(0, "MANUAL") + return trackers + + def check_banned_group(self, tracker, banned_group_list, meta): + if meta['tag'] == "": + return False + else: + q = False + for tag in banned_group_list: + if isinstance(tag, list): + if meta['tag'][1:].lower() == tag[0].lower(): + console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") + q = True + else: + if meta['tag'][1:].lower() == tag.lower(): + console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") + q = True + if q: + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): + return True + else: + return True + return False + + +tracker_class_map = { + 'ACM': ACM, 'AITHER': AITHER, 'AL': AL, 'ANT': ANT, 'BHD': BHD, 'BHDTV': BHDTV, 'BLU': BLU, 'CBR': CBR, + 'FNP': FNP, 'FL': FL, 'HDB': HDB, 'HDT': HDT, 'HHD': HHD, 'HP': HP, 'HUNO': HUNO, 'JPTV': JPTV, 'LCD': LCD, + 'LST': LST, 'LT': LT, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PSS': PSS, 'PTP': PTP, 'PTER': PTER, + 'R4E': R4E, 'RF': RF, 'RTF': RTF, 'SHRI': SHRI, 'SN': SN, 'SPD': SPD, 'STC': STC, 'STT': STT, 'THR': THR, + 'TIK': TIK, 'TL': TL, 'TVC': TVC, 'TTG': TTG, 'ULCX': ULCX, 'UTP': UTP, 'YOINK': YOINK, +} + +tracker_capabilities = { + 'AITHER': {'mod_q': True, 'draft': False}, + 'BHD': {'draft_live': True}, + 'BLU': {'mod_q': True, 'draft': False}, + 'LST': {'mod_q': True, 'draft': True} +} + +api_trackers = { + 'ACM', 'AITHER', 'AL', 'BHD', 'BLU', 'CBR', 'FNP', 'HHD', 'HUNO', 'JPTV', 'LCD', 'LST', 'LT', + 'OE', 'OTW', 'PSS', 'RF', 'R4E', 'SHRI', 'STC', 'STT', 'TIK', 'ULCX', 'UTP', 'YOINK' +} + +other_api_trackers = { + 'ANT', 'BHDTV', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' +} + +http_trackers = { + 'FL', 'HDB', 'HDT', 'MTV', 'PTER', 'TTG' +} diff --git a/src/uphelper.py b/src/uphelper.py new file mode 100644 index 000000000..f3a07213f --- /dev/null +++ b/src/uphelper.py @@ -0,0 +1,123 @@ +import cli_ui +from rich.console import Console +from data.config import config + +console = Console() + + +class UploadHelper: + def dupe_check(self, dupes, meta, tracker_name): + if not dupes: + console.print("[green]No dupes found") + meta['upload'] = True + return meta, False + else: + console.print() + dupe_text = "\n".join([d['name'] if isinstance(d, dict) else d for d in dupes]) + console.print() + cli_ui.info_section(cli_ui.bold, f"Check if these are actually dupes from {tracker_name}!") + cli_ui.info(dupe_text) + + if not meta['unattended'] or (meta['unattended'] and meta.get('unattended-confirm', False)): + if meta.get('dupe', False) is False: + print() + upload = cli_ui.ask_yes_no(f"Upload to {tracker_name} anyway?", default=False) + else: + upload = True + else: + if meta.get('dupe', False) is False: + console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") + upload = False + else: + console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") + upload = True + + console.print() + if upload is False: + meta['upload'] = False + return meta, True + else: + meta['upload'] = True + for each in dupes: + each_name = each['name'] if isinstance(each, dict) else each + if each_name == meta['name']: + meta['name'] = f"{meta['name']} DUPE?" + + return meta, False + + def get_confirmation(self, meta): + if meta['debug'] is True: + console.print("[bold red]DEBUG: True") + console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") + console.print() + console.print("[bold yellow]Database Info[/bold yellow]") + console.print(f"[bold]Title:[/bold] {meta['title']} ({meta['year']})") + console.print() + console.print(f"[bold]Overview:[/bold] {meta['overview']}") + console.print() + console.print(f"[bold]Category:[/bold] {meta['category']}") + if int(meta.get('tmdb', 0)) != 0: + console.print(f"[bold]TMDB:[/bold] https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") + if int(meta.get('imdb_id', '0')) != 0: + console.print(f"[bold]IMDB:[/bold] https://www.imdb.com/title/tt{meta['imdb_id']}") + if int(meta.get('tvdb_id', '0')) != 0: + console.print(f"[bold]TVDB:[/bold] https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") + if int(meta.get('tvmaze_id', '0')) != 0: + console.print(f"[bold]TVMaze:[/bold] https://www.tvmaze.com/shows/{meta['tvmaze_id']}") + if int(meta.get('mal_id', 0)) != 0: + console.print(f"[bold]MAL:[/bold] https://myanimelist.net/anime/{meta['mal_id']}") + console.print() + if int(meta.get('freeleech', '0')) != 0: + console.print(f"[bold]Freeleech:[/bold] {meta['freeleech']}") + tag = "" if meta['tag'] == "" else f" / {meta['tag'][1:]}" + res = meta['source'] if meta['is_disc'] == "DVD" else meta['resolution'] + console.print(f"{res} / {meta['type']}{tag}") + if meta.get('personalrelease', False) is True: + console.print("[bold green]Personal Release![/bold green]") + console.print() + if meta.get('unattended', False) is False: + self.get_missing(meta) + ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" + if ring_the_bell: + console.print(ring_the_bell) + + if meta.get('is disc', False) is True: + meta['keep_folder'] = False + + if meta.get('keep_folder') and meta['isdir']: + console.print("[bold yellow]Uploading with --keep-folder[/bold yellow]") + kf_confirm = input("You specified --keep-folder. Uploading in folders might not be allowed. Proceed? [y/N]: ").strip().lower() + if kf_confirm != 'y': + console.print("[bold red]Aborting...[/bold red]") + exit() + + console.print("[bold yellow]Is this correct?[/bold yellow]") + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm = input("Correct? [y/N]: ").strip().lower() == 'y' + else: + console.print(f"[bold]Name:[/bold] {meta['name']}") + confirm = True + + return confirm + + def get_missing(self, meta): + info_notes = { + 'edition': 'Special Edition/Release', + 'description': "Please include Remux/Encode Notes if possible", + 'service': "WEB Service e.g.(AMZN, NF)", + 'region': "Disc Region", + 'imdb': 'IMDb ID (tt1234567)', + 'distributor': "Disc Distributor e.g.(BFI, Criterion)" + } + missing = [] + if meta.get('imdb_id', '0') == '0': + meta['imdb_id'] = '0' + meta['potential_missing'].append('imdb_id') + for each in meta['potential_missing']: + if str(meta.get(each, '')).strip() in ["", "None", "0"]: + missing.append(f"--{each} | {info_notes.get(each, '')}") + if missing: + cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") + for each in missing: + cli_ui.info(each) + console.print() diff --git a/src/vs.py b/src/vs.py index 616b1844e..4209464f6 100644 --- a/src/vs.py +++ b/src/vs.py @@ -1,63 +1,106 @@ import vapoursynth as vs -core = vs.core -from awsmfunc import ScreenGen, DynamicTonemap, FrameInfo, zresize +from awsmfunc import ScreenGen, DynamicTonemap, zresize import random -import argparse -from typing import Union, List -from pathlib import Path -import os, sys +import os from functools import partial -# Modified version of https://git.concertos.live/AHD/ahd_utils/src/branch/master/screengn.py -def vs_screengn(source, encode, filter_b_frames, num, dir): - # prefer ffms2, fallback to lsmash for m2ts +core = vs.core + +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libffms2.so") +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libsub.so") +# core.std.LoadPlugin(path="/usr/local/lib/vapoursynth/libimwri.so") + + +def CustomFrameInfo(clip, text): + def FrameProps(n, f, clip): + # Modify the frame properties extraction here to avoid the decode issue + info = f"Frame {n} of {clip.num_frames}\nPicture type: {f.props['_PictType']}" + # Adding the frame information as text to the clip + return core.text.Text(clip, info) + + # Apply FrameProps to each frame + return core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip) + + +def optimize_images(image, config): + import platform # Ensure platform is imported here + if config.get('optimize_images', True): + if os.path.exists(image): + try: + pyver = platform.python_version_tuple() + if int(pyver[0]) == 3 and int(pyver[1]) >= 7: + import oxipng + if os.path.getsize(image) >= 16000000: + oxipng.optimize(image, level=6) + else: + oxipng.optimize(image, level=3) + except Exception as e: + print(f"Image optimization failed: {e}") + return + + +def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", config=None): + if config is None: + config = {'optimize_images': True} # Default configuration + + screens_file = os.path.join(dir, "screens.txt") + + # Check if screens.txt already exists and use it if valid + if os.path.exists(screens_file): + with open(screens_file, "r") as txt: + frames = [int(line.strip()) for line in txt.readlines()] + if len(frames) == num and all(isinstance(f, int) and 0 <= f for f in frames): + print(f"Using existing frame numbers from {screens_file}") + else: + frames = [] + else: + frames = [] + + # Indexing the source using ffms2 or lsmash for m2ts files if str(source).endswith(".m2ts"): + print(f"Indexing {source} with LSMASHSource... This may take a while.") src = core.lsmas.LWLibavSource(source) else: - src = core.ffms2.Source(source, cachefile=f"{os.path.abspath(dir)}{os.sep}ffms2.ffms2") + cachefile = f"{os.path.abspath(dir)}{os.sep}ffms2.ffms2" + if not os.path.exists(cachefile): + print(f"Indexing {source} with ffms2... This may take a while.") + try: + src = core.ffms2.Source(source, cachefile=cachefile) + except vs.Error as e: + print(f"Error during indexing: {str(e)}") + raise + if os.path.exists(cachefile): + print(f"Indexing completed and cached at: {cachefile}") + else: + print("Indexing did not complete as expected.") - # we don't allow encodes in non-mkv containers anyway + # Check if encode is provided if encode: - enc = core.ffms2.Source(encode) + if not os.path.exists(encode): + print(f"Encode file {encode} not found. Skipping encode processing.") + encode = None + else: + enc = core.ffms2.Source(encode) - # since encodes are optional we use source length + # Use source length if encode is not provided num_frames = len(src) - # these values don't really matter, they're just to cut off intros/credits start, end = 1000, num_frames - 10000 - # filter b frames function for frameeval - def filter_ftype(n, f, clip, frame, frames, ftype="B"): - if f.props["_PictType"].decode() == ftype: - frames.append(frame) - return clip - - # generate random frame numbers, sort, and format for ScreenGen - # if filter option is on filter out non-b frames in encode - frames = [] - if filter_b_frames: - with open(os.devnull, "wb") as f: - i = 0 - while len(frames) < num: - frame = random.randint(start, end) - enc_f = enc[frame] - enc_f = enc_f.std.FrameEval(partial(filter_ftype, clip=enc_f, frame=frame, frames=frames), enc_f) - enc_f.output(f) - i += 1 - if i > num * 10: - raise ValueError("screengn: Encode doesn't seem to contain desired picture type frames.") - else: + # Generate random frame numbers for screenshots if not using existing ones + if not frames: for _ in range(num): frames.append(random.randint(start, end)) - frames = sorted(frames) - frames = [f"{x}\n" for x in frames] + frames = sorted(frames) + frames = [f"{x}\n" for x in frames] - # write to file, we might want to re-use these later - with open("screens.txt", "w") as txt: - txt.writelines(frames) + # Write the frame numbers to a file for reuse + with open(screens_file, "w") as txt: + txt.writelines(frames) + print(f"Generated and saved new frame numbers to {screens_file}") - # if an encode exists we have to crop and resize + # If an encode exists and is provided, crop and resize if encode: - if src.width != enc.width and src.height != enc.height: + if src.width != enc.width or src.height != enc.height: ref = zresize(enc, preset=src.height) crop = [(src.width - ref.width) / 2, (src.height - ref.height) / 2] src = src.std.Crop(left=crop[0], right=crop[0], top=crop[1], bottom=crop[1]) @@ -69,19 +112,25 @@ def filter_ftype(n, f, clip, frame, frames, ftype="B"): height = enc.height src = zresize(src, width=width, height=height) - # tonemap HDR + # Apply tonemapping if the source is HDR tonemapped = False if src.get_frame(0).props["_Primaries"] == 9: tonemapped = True - src = DynamicTonemap(src, src_fmt=False, libplacebo=False, adjust_gamma=True) + src = DynamicTonemap(src, src_fmt=False, libplacebo=True, adjust_gamma=True) if encode: - enc = DynamicTonemap(enc, src_fmt=False, libplacebo=False, adjust_gamma=True) + enc = DynamicTonemap(enc, src_fmt=False, libplacebo=True, adjust_gamma=True) - # add FrameInfo - if tonemapped == True: - src = FrameInfo(src, "Tonemapped") + # Use the custom FrameInfo function + if tonemapped: + src = CustomFrameInfo(src, "Tonemapped") + + # Generate screenshots ScreenGen(src, dir, "a") if encode: - if tonemapped == True: - enc = FrameInfo(enc, "Encode (Tonemapped)") - ScreenGen(enc, dir, "b") \ No newline at end of file + enc = CustomFrameInfo(enc, "Encode (Tonemapped)") + ScreenGen(enc, dir, "b") + + # Optimize images + for i in range(1, num + 1): + image_path = os.path.join(dir, f"{str(i).zfill(2)}a.png") + optimize_images(image_path, config) diff --git a/upload.py b/upload.py index 0c7292865..8fc5ab4be 100644 --- a/upload.py +++ b/upload.py @@ -1,113 +1,378 @@ +#!/usr/bin/env python3 + import requests from src.args import Args from src.clients import Clients -from src.prep import Prep from src.trackers.COMMON import COMMON -from src.trackers.HUNO import HUNO -from src.trackers.BLU import BLU -from src.trackers.BHD import BHD -from src.trackers.AITHER import AITHER -from src.trackers.STC import STC -from src.trackers.R4E import R4E from src.trackers.THR import THR -from src.trackers.STT import STT -from src.trackers.HP import HP from src.trackers.PTP import PTP -from src.trackers.SN import SN -from src.trackers.ACM import ACM -from src.trackers.HDB import HDB -from src.trackers.LCD import LCD -from src.trackers.TTG import TTG -from src.trackers.LST import LST -from src.trackers.FL import FL -from src.trackers.LT import LT -from src.trackers.NBL import NBL -from src.trackers.ANT import ANT -from src.trackers.PTER import PTER -from src.trackers.MTV import MTV -from src.trackers.JPTV import JPTV -from src.trackers.TL import TL -from src.trackers.TDC import TDC -from src.trackers.HDT import HDT -from src.trackers.RF import RF -from src.trackers.OE import OE -from src.trackers.BHDTV import BHDTV -from src.trackers.RTF import RTF import json from pathlib import Path import asyncio import os import sys import platform -import multiprocessing -import logging import shutil import glob import cli_ui +import traceback +import click +import re +from src.trackersetup import TRACKER_SETUP, tracker_class_map, api_trackers, other_api_trackers, http_trackers, tracker_capabilities +import time from src.console import console from rich.markdown import Markdown from rich.style import Style - -cli_ui.setup(color='always', title="L4G's Upload Assistant") -import traceback +cli_ui.setup(color='always', title="Audionut's Upload Assistant") base_dir = os.path.abspath(os.path.dirname(__file__)) try: from data.config import config -except: +except Exception: if not os.path.exists(os.path.abspath(f"{base_dir}/data/config.py")): - try: - if os.path.exists(os.path.abspath(f"{base_dir}/data/config.json")): - with open(f"{base_dir}/data/config.json", 'r', encoding='utf-8-sig') as f: - json_config = json.load(f) - f.close() - with open(f"{base_dir}/data/config.py", 'w') as f: - f.write(f"config = {json.dumps(json_config, indent=4)}") - f.close() - cli_ui.info(cli_ui.green, "Successfully updated config from .json to .py") - cli_ui.info(cli_ui.green, "It is now safe for you to delete", cli_ui.yellow, "data/config.json", "if you wish") - from data.config import config - else: - raise NotImplementedError - except: - cli_ui.info(cli_ui.red, "We have switched from .json to .py for config to have a much more lenient experience") - cli_ui.info(cli_ui.red, "Looks like the auto updater didnt work though") - cli_ui.info(cli_ui.red, "Updating is just 2 easy steps:") - cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py") ) - cli_ui.info(cli_ui.red, "2: Add", cli_ui.green, "config = ", cli_ui.red, "to the beginning of", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) - exit() + cli_ui.info(cli_ui.red, "Configuration file 'config.py' not found.") + cli_ui.info(cli_ui.red, "Please ensure the file is located at:", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.py")) + cli_ui.info(cli_ui.red, "Follow the setup instructions: https://github.com/Audionut/Upload-Assistant") + exit() else: console.print(traceback.print_exc()) + +from src.prep import Prep # noqa E402 client = Clients(config=config) parser = Args(config) -async def do_the_thing(base_dir): - meta = dict() + +def get_log_file(base_dir, queue_name): + """ + Returns the path to the log file for the given base directory and queue name. + """ + safe_queue_name = queue_name.replace(" ", "_") + return os.path.join(base_dir, "tmp", f"{safe_queue_name}_processed_files.log") + + +def load_processed_files(log_file): + """ + Loads the list of processed files from the log file. + """ + if os.path.exists(log_file): + with open(log_file, "r") as f: + return set(json.load(f)) + return set() + + +def save_processed_file(log_file, file_path): + """ + Adds a processed file to the log. + """ + processed_files = load_processed_files(log_file) + processed_files.add(file_path) + with open(log_file, "w") as f: + json.dump(list(processed_files), f, indent=4) + + +def gather_files_recursive(path, allowed_extensions=None): + """ + Gather files and first-level subfolders. + Each subfolder is treated as a single unit, without exploring deeper. + """ + queue = [] + if os.path.isdir(path): + for entry in os.scandir(path): + if entry.is_dir(): + queue.append(entry.path) + elif entry.is_file() and (allowed_extensions is None or entry.name.lower().endswith(tuple(allowed_extensions))): + queue.append(entry.path) + elif os.path.isfile(path): + if allowed_extensions is None or path.lower().endswith(tuple(allowed_extensions)): + queue.append(path) + else: + console.print(f"[red]Invalid path: {path}") + return queue + + +def resolve_queue_with_glob_or_split(path, paths, allowed_extensions=None): + """ + Handle glob patterns and split path resolution. + Treat subfolders as single units and filter files by allowed_extensions. + """ + queue = [] + if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: + escaped_path = path.replace('[', '[[]') + queue = [ + file for file in glob.glob(escaped_path) + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + if queue: + display_queue(queue) + elif os.path.exists(os.path.dirname(path)) and len(paths) > 1: + queue = [ + file for file in paths + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + display_queue(queue) + elif not os.path.exists(os.path.dirname(path)): + queue = [ + file for file in resolve_split_path(path) # noqa F8221 + if os.path.isdir(file) or (os.path.isfile(file) and (allowed_extensions is None or file.lower().endswith(tuple(allowed_extensions)))) + ] + display_queue(queue) + return queue + + +def extract_safe_file_locations(log_file): + """ + Parse the log file to extract file locations under the 'safe' header. + + :param log_file: Path to the log file to parse. + :return: List of file paths from the 'safe' section. + """ + safe_section = False + safe_file_locations = [] + + with open(log_file, 'r') as f: + for line in f: + line = line.strip() + + # Detect the start and end of 'safe' sections + if line.lower() == "safe": + safe_section = True + continue + elif line.lower() in {"danger", "risky"}: + safe_section = False + + # Extract 'File Location' if in a 'safe' section + if safe_section and line.startswith("File Location:"): + match = re.search(r"File Location:\s*(.+)", line) + if match: + safe_file_locations.append(match.group(1).strip()) + + return safe_file_locations + + +def merge_meta(meta, saved_meta, path): + """Merges saved metadata with the current meta, respecting overwrite rules.""" + with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: + saved_meta = json.load(f) + overwrite_list = [ + 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', + 'modq', 'region', 'freeleech', 'personalrelease', 'unattended', 'manual_season', 'manual_episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', + 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs', 'dual_audio', 'manual_type', 'tvmaze_manual' + ] + sanitized_saved_meta = {} + for key, value in saved_meta.items(): + clean_key = key.strip().strip("'").strip('"') + if clean_key in overwrite_list: + if clean_key in meta and meta.get(clean_key) is not None: + sanitized_saved_meta[clean_key] = meta[clean_key] + if meta['debug']: + console.print(f"Overriding {clean_key} with meta value:", meta[clean_key]) + else: + sanitized_saved_meta[clean_key] = value + else: + sanitized_saved_meta[clean_key] = value + meta.update(sanitized_saved_meta) + f.close() + return sanitized_saved_meta + + +def display_queue(queue, base_dir, queue_name, save_to_log=True): + """Displays the queued files in markdown format and optionally saves them to a log file in the tmp directory.""" + md_text = "\n - ".join(queue) + console.print("\n[bold green]Queuing these files:[/bold green]", end='') + console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) + console.print("\n\n") + + if save_to_log: + tmp_dir = os.path.join(base_dir, "tmp") + os.makedirs(tmp_dir, exist_ok=True) + log_file = os.path.join(tmp_dir, f"{queue_name}_queue.log") + + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue successfully saved to log file: {log_file}") + except Exception as e: + console.print(f"[bold red]Failed to save queue to log file: {e}") + + +async def process_meta(meta, base_dir): + """Process the metadata for each queued path.""" + + if meta['imghost'] is None: + meta['imghost'] = config['DEFAULT']['img_host_1'] + + if not meta['unattended']: + ua = config['DEFAULT'].get('auto_mode', False) + if str(ua).lower() == "true": + meta['unattended'] = True + console.print("[yellow]Running in Auto Mode") meta['base_dir'] = base_dir + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + meta = await prep.gather_prep(meta=meta, mode='cli') + if not meta: + return + else: + meta['cutoff'] = int(config['DEFAULT'].get('cutoff_screens', 3)) + if len(meta.get('image_list', [])) < meta.get('cutoff') and meta.get('skip_imghost_upload', False) is False: + if 'image_list' not in meta: + meta['image_list'] = [] + return_dict = {} + new_images, dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict=return_dict) + + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + json.dump(meta, f, indent=4) + + elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: + meta['image_list'] = [] + + torrent_path = os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + if not os.path.exists(torrent_path): + reuse_torrent = None + if meta.get('rehash', False) is False: + reuse_torrent = await client.find_existing_torrent(meta) + if reuse_torrent is not None: + prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) + + if meta['nohash'] is False and reuse_torrent is None: + prep.create_torrent(meta, Path(meta['path']), "BASE") + if meta['nohash']: + meta['client'] = "none" + + elif os.path.exists(torrent_path) and meta.get('rehash', False) is True and meta['nohash'] is False: + prep.create_torrent(meta, Path(meta['path']), "BASE") + + if int(meta.get('randomized', 0)) >= 1: + prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) + + +async def do_the_thing(base_dir): + meta = {'base_dir': base_dir} paths = [] for each in sys.argv[1:]: if os.path.exists(each): paths.append(os.path.abspath(each)) else: break + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - if meta['cleanup'] and os.path.exists(f"{base_dir}/tmp"): + if meta.get('cleanup') and os.path.exists(f"{base_dir}/tmp"): shutil.rmtree(f"{base_dir}/tmp") - console.print("[bold green]Sucessfully emptied tmp directory") - if not meta['path']: + console.print("[bold green]Successfully emptied tmp directory") + + if not meta.get('path'): exit(0) + path = meta['path'] path = os.path.abspath(path) if path.endswith('"'): path = path[:-1] queue = [] - if os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - queue = [path] + + log_file = os.path.join(base_dir, "tmp", f"{meta['queue']}_queue.log") + allowed_extensions = ['.mkv', '.mp4', '.ts'] + + if path.endswith('.txt') and meta.get('unit3d'): + console.print(f"[bold yellow]Detected a text file for queue input: {path}[/bold yellow]") + if os.path.exists(path): + safe_file_locations = extract_safe_file_locations(path) + if safe_file_locations: + console.print(f"[cyan]Extracted {len(safe_file_locations)} safe file locations from the text file.[/cyan]") + queue = safe_file_locations + meta['queue'] = "unit3d" + + # Save the queue to the log file + try: + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file saved successfully: {log_file}[/bold green]") + except IOError as e: + console.print(f"[bold red]Failed to save the queue log file: {e}[/bold red]") + exit(1) + else: + console.print("[bold red]No safe file locations found in the text file. Exiting.[/bold red]") + exit(1) + else: + console.print(f"[bold red]Text file not found: {path}. Exiting.[/bold red]") + exit(1) + + elif path.endswith('.log') and meta['debug']: + console.print(f"[bold yellow]Processing debugging queue:[/bold yellow] [bold green{path}[/bold green]") + if os.path.exists(path): + log_file = path + with open(path, 'r') as f: + queue = json.load(f) + meta['queue'] = "debugging" + + else: + console.print(f"[bold red]Log file not found: {path}. Exiting.[/bold red]") + exit(1) + + elif meta.get('queue'): + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) + if os.path.exists(log_file): + with open(log_file, 'r') as f: + existing_queue = json.load(f) + console.print(f"[bold yellow]Found an existing queue log file:[/bold yellow] [green]{log_file}[/green]") + console.print(f"[cyan]The queue log contains {len(existing_queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit, discard, or keep the existing queue?[/cyan]") + edit_choice = input("Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(existing_queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + queue = existing_queue + else: + console.print("[bold red]No changes were made. Using the original queue.") + queue = existing_queue + elif edit_choice == 'd': + console.print("[bold yellow]Discarding the existing queue log. Creating a new queue.") + queue = [] + else: + console.print("[bold green]Keeping the existing queue as is.") + queue = existing_queue + else: + if os.path.exists(path): + queue = gather_files_recursive(path, allowed_extensions=allowed_extensions) + else: + queue = resolve_queue_with_glob_or_split(path, paths, allowed_extensions=allowed_extensions) + + console.print(f"[cyan]A new queue log file will be created:[/cyan] [green]{log_file}[/green]") + console.print(f"[cyan]The new queue will contain {len(queue)} items.[/cyan]") + console.print("[cyan]Do you want to edit the initial queue before saving?[/cyan]") + edit_choice = input("Enter 'e' to edit, or press Enter to save as is: ").strip().lower() + + if edit_choice == 'e': + edited_content = click.edit(json.dumps(queue, indent=4)) + if edited_content: + try: + queue = json.loads(edited_content.strip()) + console.print("[bold green]Successfully updated the queue from the editor.") + except json.JSONDecodeError as e: + console.print(f"[bold red]Failed to parse the edited content: {e}. Using the original queue.") + else: + console.print("[bold red]No changes were made. Using the original queue.") + + # Save the queue to the log file + with open(log_file, 'w') as f: + json.dump(queue, f, indent=4) + console.print(f"[bold green]Queue log file created: {log_file}[/bold green]") + + elif os.path.exists(path): + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) + queue = [path] + else: # Search glob if dirname exists if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: @@ -121,7 +386,7 @@ async def do_the_thing(base_dir): console.print("\n\n") else: console.print(f"[red]Path: [bold red]{path}[/bold red] does not exist") - + elif os.path.exists(os.path.dirname(path)) and len(paths) != 1: queue = paths md_text = "\n - ".join(queue) @@ -133,11 +398,11 @@ async def do_the_thing(base_dir): p1 = split_path[0] for i, each in enumerate(split_path): try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i+1]}"): + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): queue.append(p1) - p1 = split_path[i+1] + p1 = split_path[i + 1] else: - p1 += f" {split_path[i+1]}" + p1 += f" {split_path[i + 1]}" except IndexError: if os.path.exists(p1): queue.append(p1) @@ -148,423 +413,234 @@ async def do_the_thing(base_dir): console.print("\n[bold green]Queuing these files:[/bold green]", end='') console.print(Markdown(f"- {md_text.rstrip()}\n\n", style=Style(color='cyan'))) console.print("\n\n") - + else: # Add Search Here - console.print(f"[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") exit() + if not queue: + console.print(f"[red]No valid files or directories found for path: {path}") + exit(1) + + if meta.get('queue'): + queue_name = meta['queue'] + log_file = get_log_file(base_dir, meta['queue']) + processed_files = load_processed_files(log_file) + queue = [file for file in queue if file not in processed_files] + if not queue: + console.print(f"[bold yellow]All files in the {meta['queue']} queue have already been processed.") + exit(0) + if meta['debug']: + display_queue(queue, base_dir, queue_name, save_to_log=False) + processed_files_count = 0 base_meta = {k: v for k, v in meta.items()} for path in queue: - meta = {k: v for k, v in base_meta.items()} - meta['path'] = path - meta['uuid'] = None + total_files = len(queue) try: - with open(f"{base_dir}/tmp/{os.path.basename(path)}/meta.json") as f: - saved_meta = json.load(f) - for key, value in saved_meta.items(): - overwrite_list = [ - 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', - 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' - ] - if meta.get(key, None) != value and key in overwrite_list: - saved_meta[key] = meta[key] - meta = saved_meta - f.close() - except FileNotFoundError: - pass + meta = base_meta.copy() + meta['path'] = path + meta['uuid'] = None + + if not path: + raise ValueError("The 'path' variable is not defined or is empty.") + + meta_file = os.path.join(base_dir, "tmp", os.path.basename(path), "meta.json") + + if os.path.exists(meta_file): + with open(meta_file, "r") as f: + saved_meta = json.load(f) + meta.update(merge_meta(meta, saved_meta, path)) + else: + if meta['debug']: + console.print(f"[yellow]No metadata file found at {meta_file}") + + except Exception as e: + console.print(f"[red]Failed to load metadata for path '{path}': {e}") + if meta['debug']: + upload_start_time = time.time() console.print(f"[green]Gathering info for {os.path.basename(path)}") - if meta['imghost'] == None: - meta['imghost'] = config['DEFAULT']['img_host_1'] - if not meta['unattended']: - ua = config['DEFAULT'].get('auto_mode', False) - if str(ua).lower() == "true": - meta['unattended'] = True - console.print("[yellow]Running in Auto Mode") - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - meta = await prep.gather_prep(meta=meta, mode='cli') - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) == False: - return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'],[], return_dict) - if meta['debug']: - console.print(meta['image_list']) - # meta['uploaded_screens'] = True - elif meta.get('skip_imghost_upload', False) == True and meta.get('image_list', False) == False: - meta['image_list'] = [] + await process_meta(meta, base_dir) + if 'we_are_uploading' not in meta: + console.print("we are not uploading.......") + if meta.get('queue') is not None: + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + if not meta['debug']: + if log_file: + save_processed_file(log_file, path) - if not os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")): - reuse_torrent = None - if meta.get('rehash', False) == False: - reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: - prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - if meta['nohash'] == False and reuse_torrent == None: - prep.create_torrent(meta, Path(meta['path']), "BASE", meta.get('piece_size_max', 0)) - if meta['nohash']: - meta['client'] = "none" - elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) == True and meta['nohash'] == False: - prep.create_torrent(meta, Path(meta['path']), "BASE", meta.get('piece_size_max', 0)) - if int(meta.get('randomized', 0)) >= 1: - prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) - - if meta.get('trackers', None) != None: - trackers = meta['trackers'] else: - trackers = config['TRACKERS']['default_trackers'] - if "," in trackers: - trackers = trackers.split(',') - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: - json.dump(meta, f, indent=4) - f.close() - confirm = get_confirmation(meta) - while confirm == False: - # help.print_help() - editargs = cli_ui.ask_string("Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)") - editargs = (meta['path'],) + tuple(editargs.split()) - if meta['debug']: - editargs = editargs + ("--debug",) - meta, help, before_args = parser.parse(editargs, meta) - # meta = await prep.tmdb_other_meta(meta) - meta['edit'] = True - meta = await prep.gather_prep(meta=meta, mode='cli') - meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - confirm = get_confirmation(meta) - - if isinstance(trackers, list) == False: - trackers = [trackers] - trackers = [s.strip().upper() for s in trackers] - if meta.get('manual', False): - trackers.insert(0, "MANUAL") - - - - #################################### - ####### Upload to Trackers ####### - #################################### - common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'STT', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'TDC', 'OE', 'BHDTV', 'RTF'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] - tracker_class_map = { - 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'STT' : STT, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, 'SN' : SN, - 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, - 'TL' : TL, 'TDC' : TDC, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF':RTF} - - for tracker in trackers: - if meta['name'].endswith('DUPE?'): - meta['name'] = meta['name'].replace(' DUPE?', '') - tracker = tracker.replace(" ", "").upper().strip() - if meta['debug']: - debug = "(DEBUG)" - else: - debug = "" - - if tracker in api_trackers: - tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: - upload_to_tracker = True + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) + + #################################### + ####### Upload to Trackers ####### # noqa #F266 + #################################### + + common = COMMON(config=config) + tracker_setup = TRACKER_SETUP(config=config) + enabled_trackers = tracker_setup.trackers_enabled(meta) + + async def check_mod_q_and_draft(tracker_class, meta, debug, disctype): + modq, draft = None, None + + tracker_caps = tracker_capabilities.get(tracker_class.tracker, {}) + + # Handle BHD specific draft/live logic + if tracker_class.tracker == 'BHD' and tracker_caps.get('draft_live'): + draft_int = await tracker_class.get_live(meta) + draft = "Draft" if draft_int == 0 else "Live" + + # Handle mod_q and draft for other trackers else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) - if upload_to_tracker: - console.print(f"Uploading to {tracker_class.tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - dupes = await tracker_class.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - # note BHDTV does not have search implemented. - meta = dupe_check(dupes, meta) - if meta['upload'] == True: - await tracker_class.upload(meta) - if tracker == 'SN': - await asyncio.sleep(16) - await client.add_to_client(meta, tracker_class.tracker) - - if tracker in http_trackers: - tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: - upload_to_tracker = True + if tracker_caps.get('mod_q'): + modq = await tracker_class.get_flag(meta, 'modq') + modq = 'Yes' if modq else 'No' + if tracker_caps.get('draft'): + draft = await tracker_class.get_flag(meta, 'draft') + draft = 'Yes' if draft else 'No' + + return modq, draft + + for tracker in enabled_trackers: + disctype = meta.get('disctype', None) + tracker = tracker.replace(" ", "").upper().strip() + if meta['name'].endswith('DUPE?'): + meta['name'] = meta['name'].replace(' DUPE?', '') + + if meta['debug']: + debug = "(DEBUG)" else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) - if upload_to_tracker: - console.print(f"Uploading to {tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - if await tracker_class.validate_credentials(meta) == True: - dupes = await tracker_class.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] == True: - await tracker_class.upload(meta) + debug = "" + + if tracker in api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + + if upload_status: + modq, draft = await check_mod_q_and_draft(tracker_class, meta, debug, disctype) + + if modq is not None: + console.print(f"(modq: {modq})") + if draft is not None: + console.print(f"(draft: {draft})") + + console.print(f"Uploading to {tracker_class.tracker}") + if meta['debug']: + upload_finish_time = time.time() + console.print(f"Upload from Audionut UA processed in {upload_finish_time - upload_start_time:.2f} seconds") + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) + perm = config['DEFAULT'].get('get_permalink', False) + if perm: + # need a wait so we don't race the api + await asyncio.sleep(5) + await tracker_class.search_torrent_page(meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker in other_api_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + + if upload_status: + console.print(f"Uploading to {tracker_class.tracker}") + + if tracker != "TL": + if tracker == "RTF": + await tracker_class.api_test(meta) + if tracker == "TL" or upload_status: + await tracker_class.upload(meta, disctype) + if tracker == 'SN': + await asyncio.sleep(16) + await asyncio.sleep(0.5) + await client.add_to_client(meta, tracker_class.tracker) + + if tracker in http_trackers: + tracker_class = tracker_class_map[tracker](config=config) + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + console.print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + + if upload_status: + console.print(f"Uploading to {tracker}") + + if await tracker_class.validate_credentials(meta) is True: + await tracker_class.upload(meta, disctype) + await asyncio.sleep(0.5) await client.add_to_client(meta, tracker_class.tracker) - if tracker == "MANUAL": - if meta['unattended']: - do_manual = True - else: - do_manual = cli_ui.ask_yes_no(f"Get files for manual upload?", default=True) - if do_manual: - for manual_tracker in trackers: - if manual_tracker != 'MANUAL': - manual_tracker = manual_tracker.replace(" ", "").upper().strip() - tracker_class = tracker_class_map[manual_tracker](config=config) - if manual_tracker in api_trackers: - await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) - else: - await tracker_class.edit_desc(meta) - url = await prep.package(meta) - if url == False: - console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") + if tracker == "MANUAL": + if meta['unattended']: + do_manual = True else: - console.print(f"[green]{meta['name']}") - console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") - - if tracker == "BHD": - bhd = BHD(config=config) - draft_int = await bhd.get_live(meta) - if draft_int == 0: - draft = "Draft" - else: - draft = "Live" - if meta['unattended']: - upload_to_bhd = True - else: - upload_to_bhd = cli_ui.ask_yes_no(f"Upload to BHD? ({draft}) {debug}", default=meta['unattended']) - if upload_to_bhd: - console.print("Uploading to BHD") - if check_banned_group("BHD", bhd.banned_groups, meta): - continue - dupes = await bhd.search_existing(meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] == True: - await bhd.upload(meta) - await client.add_to_client(meta, "BHD") - - if tracker == "THR": - if meta['unattended']: - upload_to_thr = True - else: - upload_to_thr = cli_ui.ask_yes_no(f"Upload to THR? {debug}", default=meta['unattended']) - if upload_to_thr: - console.print("Uploading to THR") - #Unable to get IMDB id/Youtube Link - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) == None: - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") - meta['youtube'] = youtube - thr = THR(config=config) - try: - with requests.Session() as session: - console.print("[yellow]Logging in to THR") - session = thr.login(session) - console.print("[yellow]Searching for Dupes") - dupes = thr.search_existing(session, meta.get('imdb_id')) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta['upload'] == True: - await thr.upload(session, meta) - await client.add_to_client(meta, "THR") - except: - console.print(traceback.print_exc()) - - if tracker == "PTP": - if meta['unattended']: - upload_to_ptp = True - else: - upload_to_ptp = cli_ui.ask_yes_no(f"Upload to {tracker}? {debug}", default=meta['unattended']) - if upload_to_ptp: - console.print(f"Uploading to {tracker}") - if meta.get('imdb_id', '0') == '0': - imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") - meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - ptp = PTP(config=config) - if check_banned_group("PTP", ptp.banned_groups, meta): - continue - try: - console.print("[yellow]Searching for Group ID") - groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID == None: - console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) == None or "youtube" not in str(meta.get('youtube', '')): - youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") - meta['youtube'] = youtube - meta['upload'] = True + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) + if do_manual: + for manual_tracker in enabled_trackers: + if manual_tracker != 'MANUAL': + manual_tracker = manual_tracker.replace(" ", "").upper().strip() + tracker_class = tracker_class_map[manual_tracker](config=config) + if manual_tracker in api_trackers: + await common.unit3d_edit_desc(meta, tracker_class.tracker, tracker_class.signature) + else: + await tracker_class.edit_desc(meta) + url = await prep.package(meta) + if url is False: + console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") else: - console.print("[yellow]Searching for Existing Releases") - dupes = await ptp.search_existing(groupID, meta) - dupes = await common.filter_dupes(dupes, meta) - meta = dupe_check(dupes, meta) - if meta.get('imdb_info', {}) == {}: - meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] == True: - ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) - await ptp.upload(meta, ptpUrl, ptpData) - await asyncio.sleep(5) - await client.add_to_client(meta, "PTP") - except: - console.print(traceback.print_exc()) - - if tracker == "TL": - tracker_class = tracker_class_map[tracker](config=config) - if meta['unattended']: - upload_to_tracker = True - else: - upload_to_tracker = cli_ui.ask_yes_no(f"Upload to {tracker_class.tracker}? {debug}", default=meta['unattended']) - if upload_to_tracker: - console.print(f"Uploading to {tracker_class.tracker}") - if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): - continue - await tracker_class.upload(meta) - await client.add_to_client(meta, tracker_class.tracker) - - -def get_confirmation(meta): - if meta['debug'] == True: - console.print("[bold red]DEBUG: True") - console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") - console.print() - cli_ui.info_section(cli_ui.yellow, "Database Info") - cli_ui.info(f"Title: {meta['title']} ({meta['year']})") - console.print() - cli_ui.info(f"Overview: {meta['overview']}") - console.print() - cli_ui.info(f"Category: {meta['category']}") - if int(meta.get('tmdb', 0)) != 0: - cli_ui.info(f"TMDB: https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}") - if int(meta.get('imdb_id', '0')) != 0: - cli_ui.info(f"IMDB: https://www.imdb.com/title/tt{meta['imdb_id']}") - if int(meta.get('tvdb_id', '0')) != 0: - cli_ui.info(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series") - if int(meta.get('mal_id', 0)) != 0: - cli_ui.info(f"MAL : https://myanimelist.net/anime/{meta['mal_id']}") - console.print() - if int(meta.get('freeleech', '0')) != 0: - cli_ui.info(f"Freeleech: {meta['freeleech']}") - if meta['tag'] == "": - tag = "" - else: - tag = f" / {meta['tag'][1:]}" - if meta['is_disc'] == "DVD": - res = meta['source'] - else: - res = meta['resolution'] - - cli_ui.info(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) == True: - cli_ui.info("Personal Release!") - console.print() - if meta.get('unattended', False) == False: - get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) == True else "" # \a rings the bell - cli_ui.info_section(cli_ui.yellow, f"Is this correct?{ring_the_bell}") - cli_ui.info(f"Name: {meta['name']}") - confirm = cli_ui.ask_yes_no("Correct?", default=False) - else: - cli_ui.info(f"Name: {meta['name']}") - confirm = True - return confirm - -def dupe_check(dupes, meta): - if not dupes: - console.print("[green]No dupes found") - meta['upload'] = True - return meta - else: - console.print() - dupe_text = "\n".join(dupes) - console.print() - cli_ui.info_section(cli_ui.bold, "Are these dupes?") - cli_ui.info(dupe_text) - if meta['unattended']: - if meta.get('dupe', False) == False: - console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") - upload = False - else: - console.print("[yellow]Found potential dupes. --skip-dupe-check was passed. Uploading anyways") - upload = True - console.print() - if not meta['unattended']: - if meta.get('dupe', False) == False: - upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) - else: - upload = True - if upload == False: - meta['upload'] = False - else: - meta['upload'] = True - for each in dupes: - if each == meta['name']: - meta['name'] = f"{meta['name']} DUPE?" + console.print(f"[green]{meta['name']}") + console.print(f"[green]Files can be found at: [yellow]{url}[/yellow]") - return meta + if tracker == "THR": + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[/yellow]") + if upload_status: + thr = THR(config=config) + try: + with requests.Session() as session: + console.print("[yellow]Logging in to THR") + session = thr.login(session) + await thr.upload(session, meta, disctype) + await asyncio.sleep(0.5) + await client.add_to_client(meta, "THR") + except Exception: + console.print(traceback.format_exc()) -# Return True if banned group -def check_banned_group(tracker, banned_group_list, meta): - if meta['tag'] == "": - return False - else: - q = False - for tag in banned_group_list: - if isinstance(tag, list): - if meta['tag'][1:].lower() == tag[0].lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") - console.print(f"[bold red]NOTE: [bold yellow]{tag[1]}") - q = True - else: - if meta['tag'][1:].lower() == tag.lower(): - console.print(f"[bold yellow]{meta['tag'][1:]}[/bold yellow][bold red] was found on [bold yellow]{tracker}'s[/bold yellow] list of banned groups.") - q = True - if q: - if not cli_ui.ask_yes_no(cli_ui.red, "Upload Anyways?", default=False): - return True - return False - -def get_missing(meta): - info_notes = { - 'edition' : 'Special Edition/Release', - 'description' : "Please include Remux/Encode Notes if possible (either here or edit your upload)", - 'service' : "WEB Service e.g.(AMZN, NF)", - 'region' : "Disc Region", - 'imdb' : 'IMDb ID (tt1234567)', - 'distributor' : "Disc Distributor e.g.(BFI, Criterion, etc)" - } - missing = [] - if meta.get('imdb_id', '0') == '0': - meta['imdb_id'] = '0' - meta['potential_missing'].append('imdb_id') - if len(meta['potential_missing']) > 0: - for each in meta['potential_missing']: - if str(meta.get(each, '')).replace(' ', '') in ["", "None", "0"]: - if each == "imdb_id": - each = 'imdb' - missing.append(f"--{each} | {info_notes.get(each)}") - if missing != []: - cli_ui.info_section(cli_ui.yellow, "Potentially missing information:") - for each in missing: - if each.split('|')[0].replace('--', '').strip() in ["imdb"]: - cli_ui.info(cli_ui.red, each) - else: - cli_ui.info(each) + if tracker == "PTP": + tracker_status = meta.get('tracker_status', {}) + upload_status = tracker_status.get(tracker, {}).get('upload', False) + print(f"[yellow]Tracker: {tracker}, Upload: {'Yes' if upload_status else 'No'}[yellow]") + + if upload_status: + ptp = PTP(config=config) + groupID = meta['ptp_groupID'] + ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) + await ptp.upload(meta, ptpUrl, ptpData, disctype) + await asyncio.sleep(5) + await client.add_to_client(meta, "PTP") + + if meta.get('queue') is not None: + processed_files_count += 1 + console.print(f"[cyan]Processed {processed_files_count}/{total_files} files.") + if not meta['debug']: + if log_file: + save_processed_file(log_file, path) - console.print() - return if __name__ == '__main__': pyver = platform.python_version_tuple() - if int(pyver[0]) != 3: - console.print("[bold red]Python2 Detected, please use python3") - exit() - else: - if int(pyver[1]) <= 6: - console.print("[bold red]Python <= 3.6 Detected, please use Python >=3.7") - loop = asyncio.get_event_loop() - loop.run_until_complete(do_the_thing(base_dir)) - else: - asyncio.run(do_the_thing(base_dir)) - + if int(pyver[0]) != 3 or int(pyver[1]) < 12: + console.print("[bold red]Python version is too low. Please use Python 3.12 or higher.") + sys.exit(1) + + try: + asyncio.run(do_the_thing(base_dir)) # Pass the correct base_dir value here + except (KeyboardInterrupt): + console.print("[bold red]Program interrupted. Exiting.")