From 97999c1316c48e69a95d41ef3d0dc12143acdd93 Mon Sep 17 00:00:00 2001 From: David Maisonave <47364845+David-Maisonave@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:25:34 -0400 Subject: [PATCH] Adding plugin DupFileManager, and updating RenameFile and FileMonitor. (#422) --- plugins/DupFileManager/DupFileManager.py | 463 +++++++++++++++ plugins/DupFileManager/DupFileManager.yml | 70 +++ .../DupFileManager/DupFileManager_config.py | 26 + plugins/DupFileManager/README.md | 50 ++ plugins/DupFileManager/StashPluginHelper.py | 526 ++++++++++++++++++ plugins/DupFileManager/requirements.txt | 4 + plugins/FileMonitor/README.md | 120 ++-- plugins/FileMonitor/StashPluginHelper.py | 259 +++++++-- plugins/FileMonitor/filemonitor.py | 379 ++++++++++--- plugins/FileMonitor/filemonitor.yml | 12 +- plugins/FileMonitor/filemonitor_config.py | 118 ++-- .../FileMonitor/filemonitor_self_unit_test.py | 46 ++ .../FileMonitor/filemonitor_task_examples.py | 53 ++ plugins/FileMonitor/requirements.txt | 5 +- plugins/RenameFile/README.md | 10 +- plugins/RenameFile/StashPluginHelper.py | 526 ++++++++++++++++++ plugins/RenameFile/renamefile.py | 432 ++++---------- plugins/RenameFile/renamefile.yml | 6 +- plugins/RenameFile/renamefile_settings.py | 4 +- plugins/RenameFile/requirements.txt | 3 +- 20 files changed, 2505 insertions(+), 607 deletions(-) create mode 100644 plugins/DupFileManager/DupFileManager.py create mode 100644 plugins/DupFileManager/DupFileManager.yml create mode 100644 plugins/DupFileManager/DupFileManager_config.py create mode 100644 plugins/DupFileManager/README.md create mode 100644 plugins/DupFileManager/StashPluginHelper.py create mode 100644 plugins/DupFileManager/requirements.txt create mode 100644 plugins/FileMonitor/filemonitor_self_unit_test.py create mode 100644 plugins/FileMonitor/filemonitor_task_examples.py create mode 100644 plugins/RenameFile/StashPluginHelper.py diff --git a/plugins/DupFileManager/DupFileManager.py b/plugins/DupFileManager/DupFileManager.py new file mode 100644 index 00000000..c9ef4a16 --- /dev/null +++ b/plugins/DupFileManager/DupFileManager.py @@ -0,0 +1,463 @@ +# Description: This is a Stash plugin which manages duplicate files. +# By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/) +# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager +# Note: To call this script outside of Stash, pass argument --url +# Example: python DupFileManager.py --url http://localhost:9999 -a +import os, sys, time, pathlib, argparse, platform, shutil, logging +from StashPluginHelper import StashPluginHelper +from DupFileManager_config import config # Import config from DupFileManager_config.py + +parser = argparse.ArgumentParser() +parser.add_argument('--url', '-u', dest='stash_url', type=str, help='Add Stash URL') +parser.add_argument('--trace', '-t', dest='trace', action='store_true', help='Enables debug trace mode.') +parser.add_argument('--add_dup_tag', '-a', dest='dup_tag', action='store_true', help='Set a tag to duplicate files.') +parser.add_argument('--del_tag_dup', '-d', dest='del_tag', action='store_true', help='Only delete scenes having DuplicateMarkForDeletion tag.') +parser.add_argument('--remove_dup', '-r', dest='remove', action='store_true', help='Remove (delete) duplicate files.') +parse_args = parser.parse_args() + +settings = { + "mergeDupFilename": False, + "permanentlyDelete": False, + "whitelistDelDupInSameFolder": False, + "whitelistDoTagLowResDup": False, + "zCleanAfterDel": False, + "zSwapHighRes": False, + "zSwapLongLength": False, + "zWhitelist": "", + "zxGraylist": "", + "zyBlacklist": "", + "zyMaxDupToProcess": 0, + "zzdebugTracing": False, +} +stash = StashPluginHelper( + stash_url=parse_args.stash_url, + debugTracing=parse_args.trace, + settings=settings, + config=config, + maxbytes=10*1024*1024, + ) +if len(sys.argv) > 1: + stash.Log(f"argv = {sys.argv}") +else: + stash.Trace(f"No command line arguments. JSON_INPUT['args'] = {stash.JSON_INPUT['args']}") +stash.Status(logLevel=logging.DEBUG) + +# stash.Trace(f"\nStarting (__file__={__file__}) (stash.CALLED_AS_STASH_PLUGIN={stash.CALLED_AS_STASH_PLUGIN}) (stash.DEBUG_TRACING={stash.DEBUG_TRACING}) (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})************************************************") +# stash.encodeToUtf8 = True + + +LOG_STASH_N_PLUGIN = stash.LOG_TO_STASH if stash.CALLED_AS_STASH_PLUGIN else stash.LOG_TO_CONSOLE + stash.LOG_TO_FILE +listSeparator = stash.Setting('listSeparator', ',', notEmpty=True) +addPrimaryDupPathToDetails = stash.Setting('addPrimaryDupPathToDetails') +mergeDupFilename = stash.Setting('mergeDupFilename') +moveToTrashCan = False if stash.Setting('permanentlyDelete') else True +alternateTrashCanPath = stash.Setting('dup_path') +whitelistDelDupInSameFolder = stash.Setting('whitelistDelDupInSameFolder') +whitelistDoTagLowResDup = stash.Setting('whitelistDoTagLowResDup') +maxDupToProcess = int(stash.Setting('zyMaxDupToProcess')) +swapHighRes = stash.Setting('zSwapHighRes') +swapLongLength = stash.Setting('zSwapLongLength') +significantTimeDiff = stash.Setting('significantTimeDiff') +toRecycleBeforeSwap = stash.Setting('toRecycleBeforeSwap') +cleanAfterDel = stash.Setting('zCleanAfterDel') +duration_diff = float(stash.Setting('duration_diff')) +if duration_diff > 10: + duration_diff = 10 +elif duration_diff < 1: + duration_diff = 1 + +# significantTimeDiff can not be higher than 1 and shouldn't be lower than .5 +if significantTimeDiff > 1: + significantTimeDiff = 1 +if significantTimeDiff < .5: + significantTimeDiff = .5 + + +duplicateMarkForDeletion = stash.Setting('DupFileTag') +if duplicateMarkForDeletion == "": + duplicateMarkForDeletion = 'DuplicateMarkForDeletion' + +duplicateWhitelistTag = stash.Setting('DupWhiteListTag') +if duplicateWhitelistTag == "": + duplicateWhitelistTag = 'DuplicateWhitelistFile' + +excludeMergeTags = [duplicateMarkForDeletion, duplicateWhitelistTag] +stash.init_mergeMetadata(excludeMergeTags) + +graylist = stash.Setting('zxGraylist').split(listSeparator) +graylist = [item.lower() for item in graylist] +if graylist == [""] : graylist = [] +stash.Trace(f"graylist = {graylist}") +whitelist = stash.Setting('zWhitelist').split(listSeparator) +whitelist = [item.lower() for item in whitelist] +if whitelist == [""] : whitelist = [] +stash.Trace(f"whitelist = {whitelist}") +blacklist = stash.Setting('zyBlacklist').split(listSeparator) +blacklist = [item.lower() for item in blacklist] +if blacklist == [""] : blacklist = [] +stash.Trace(f"blacklist = {blacklist}") + +def realpath(path): + """ + get_symbolic_target for win + """ + try: + import win32file + f = win32file.CreateFile(path, win32file.GENERIC_READ, + win32file.FILE_SHARE_READ, None, + win32file.OPEN_EXISTING, + win32file.FILE_FLAG_BACKUP_SEMANTICS, None) + target = win32file.GetFinalPathNameByHandle(f, 0) + # an above gives us something like u'\\\\?\\C:\\tmp\\scalarizr\\3.3.0.7978' + return target.strip('\\\\?\\') + except ImportError: + handle = open_dir(path) + target = get_symbolic_target(handle) + check_closed(handle) + return target + +def isReparsePoint(path): + import win32api + import win32con + from parse_reparsepoint import Navigator + FinalPathname = realpath(path) + stash.Log(f"(path='{path}') (FinalPathname='{FinalPathname}')") + if FinalPathname != path: + stash.Log(f"Symbolic link '{path}'") + return True + if not os.path.isdir(path): + path = os.path.dirname(path) + return win32api.GetFileAttributes(path) & win32con.FILE_ATTRIBUTE_REPARSE_POINT + +def testReparsePointAndSymLink(merge=False, deleteDup=False): + stash.Trace(f"Debug Tracing (platform.system()={platform.system()})") + myTestPath1 = r"B:\V\V\Tip\POV - Holly Molly petite ginger anal slut - RedTube.mp4" # not a reparse point or symbolic link + myTestPath2 = r"B:\_\SpecialSet\Amateur Anal Attempts\BRCC test studio name.m2ts" # reparse point + myTestPath3 = r"B:\_\SpecialSet\Amateur Anal Attempts\Amateur Anal Attempts 4.mp4" #symbolic link + myTestPath4 = r"E:\Stash\plugins\RenameFile\README.md" #symbolic link + myTestPath5 = r"E:\_\David-Maisonave\Axter-Stash\plugins\RenameFile\README.md" #symbolic link + myTestPath6 = r"E:\_\David-Maisonave\Axter-Stash\plugins\DeleteMe\Renamer\README.md" # not reparse point + stash.Log(f"Testing '{myTestPath1}'") + if isReparsePoint(myTestPath1): + stash.Log(f"isSymLink '{myTestPath1}'") + else: + stash.Log(f"Not isSymLink '{myTestPath1}'") + + if isReparsePoint(myTestPath2): + stash.Log(f"isSymLink '{myTestPath2}'") + else: + stash.Log(f"Not isSymLink '{myTestPath2}'") + + if isReparsePoint(myTestPath3): + stash.Log(f"isSymLink '{myTestPath3}'") + else: + stash.Log(f"Not isSymLink '{myTestPath3}'") + + if isReparsePoint(myTestPath4): + stash.Log(f"isSymLink '{myTestPath4}'") + else: + stash.Log(f"Not isSymLink '{myTestPath4}'") + + if isReparsePoint(myTestPath5): + stash.Log(f"isSymLink '{myTestPath5}'") + else: + stash.Log(f"Not isSymLink '{myTestPath5}'") + + if isReparsePoint(myTestPath6): + stash.Log(f"isSymLink '{myTestPath6}'") + else: + stash.Log(f"Not isSymLink '{myTestPath6}'") + return + + +def createTagId(tagName, tagName_descp, deleteIfExist = False): + tagId = stash.find_tags(q=tagName) + if len(tagId): + tagId = tagId[0] + if deleteIfExist: + stash.destroy_tag(int(tagId['id'])) + else: + return tagId['id'] + tagId = stash.create_tag({"name":tagName, "description":tagName_descp, "ignore_auto_tag": True}) + stash.Log(f"Dup-tagId={tagId['id']}") + return tagId['id'] + +def setTagId(tagId, tagName, sceneDetails, DupFileToKeep): + details = "" + ORG_DATA_DICT = {'id' : sceneDetails['id']} + dataDict = ORG_DATA_DICT.copy() + doAddTag = True + if addPrimaryDupPathToDetails: + BaseDupStr = f"BaseDup={DupFileToKeep['files'][0]['path']}\n{stash.STASH_URL}/scenes/{DupFileToKeep['id']}\n" + if sceneDetails['details'] == "": + details = BaseDupStr + elif not sceneDetails['details'].startswith(BaseDupStr): + details = f"{BaseDupStr};\n{sceneDetails['details']}" + for tag in sceneDetails['tags']: + if tag['name'] == tagName: + doAddTag = False + break + if doAddTag: + dataDict.update({'tag_ids' : tagId}) + if details != "": + dataDict.update({'details' : details}) + if dataDict != ORG_DATA_DICT: + stash.update_scene(dataDict) + stash.Trace(f"[setTagId] Updated {sceneDetails['files'][0]['path']} with metadata {dataDict}", toAscii=True) + else: + stash.Trace(f"[setTagId] Nothing to update {sceneDetails['files'][0]['path']}.", toAscii=True) + + +def isInList(listToCk, pathToCk): + pathToCk = pathToCk.lower() + for item in listToCk: + if pathToCk.startswith(item): + return True + return False + +def hasSameDir(path1, path2): + if pathlib.Path(path1).resolve().parent == pathlib.Path(path2).resolve().parent: + return True + return False + +def sendToTrash(path): + if not os.path.isfile(path): + stash.Warn(f"File does not exist: {path}.", toAscii=True) + return False + try: + from send2trash import send2trash # Requirement: pip install Send2Trash + send2trash(path) + return True + except Exception as e: + stash.Error(f"Failed to send file {path} to recycle bin. Error: {e}", toAscii=True) + try: + if os.path.isfile(path): + os.remove(path) + return True + except Exception as e: + stash.Error(f"Failed to delete file {path}. Error: {e}", toAscii=True) + return False + +def significantLessTime(durrationToKeep, durrationOther): + timeDiff = durrationToKeep / durrationOther + if timeDiff < significantTimeDiff: + return True + return False + +def isSwapCandidate(DupFileToKeep, DupFile): + # Don't move if both are in whitelist + if isInList(whitelist, DupFileToKeep['files'][0]['path']) and isInList(whitelist, DupFile['files'][0]['path']): + return False + if swapHighRes and (int(DupFileToKeep['files'][0]['width']) > int(DupFile['files'][0]['width']) or int(DupFileToKeep['files'][0]['height']) > int(DupFile['files'][0]['height'])): + if not significantLessTime(int(DupFileToKeep['files'][0]['duration']), int(DupFile['files'][0]['duration'])): + return True + else: + stash.Warn(f"File '{DupFileToKeep['files'][0]['path']}' has a higher resolution than '{DupFile['files'][0]['path']}', but the duration is significantly shorter.", toAscii=True) + if swapLongLength and int(DupFileToKeep['files'][0]['duration']) > int(DupFile['files'][0]['duration']): + if int(DupFileToKeep['files'][0]['width']) >= int(DupFile['files'][0]['width']) or int(DupFileToKeep['files'][0]['height']) >= int(DupFile['files'][0]['height']): + return True + return False + +def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False): + duplicateMarkForDeletion_descp = 'Tag added to duplicate scenes so-as to tag them for deletion.' + stash.Trace(f"duplicateMarkForDeletion = {duplicateMarkForDeletion}") + dupTagId = createTagId(duplicateMarkForDeletion, duplicateMarkForDeletion_descp) + stash.Trace(f"dupTagId={dupTagId} name={duplicateMarkForDeletion}") + + dupWhitelistTagId = None + if whitelistDoTagLowResDup: + stash.Trace(f"duplicateWhitelistTag = {duplicateWhitelistTag}") + duplicateWhitelistTag_descp = 'Tag added to duplicate scenes which are in the whitelist. This means there are two or more duplicates in the whitelist.' + dupWhitelistTagId = createTagId(duplicateWhitelistTag, duplicateWhitelistTag_descp) + stash.Trace(f"dupWhitelistTagId={dupWhitelistTagId} name={duplicateWhitelistTag}") + + QtyDupSet = 0 + QtyDup = 0 + QtyExactDup = 0 + QtyAlmostDup = 0 + QtyRealTimeDiff = 0 + QtyTagForDel = 0 + QtySkipForDel = 0 + QtySwap = 0 + QtyMerge = 0 + QtyDeleted = 0 + stash.Log("#########################################################################") + stash.Trace("#########################################################################") + stash.Log(f"Waiting for find_duplicate_scenes_diff to return results; duration_diff={duration_diff}; significantTimeDiff={significantTimeDiff}", printTo=LOG_STASH_N_PLUGIN) + DupFileSets = stash.find_duplicate_scenes_diff(duration_diff=duration_diff) + qtyResults = len(DupFileSets) + stash.Trace("#########################################################################") + for DupFileSet in DupFileSets: + stash.Trace(f"DupFileSet={DupFileSet}") + QtyDupSet+=1 + stash.Progress(QtyDupSet, qtyResults) + SepLine = "---------------------------" + DupFileToKeep = "" + DupToCopyFrom = "" + DupFileDetailList = [] + for DupFile in DupFileSet: + QtyDup+=1 + stash.log.sl.progress(f"Scene ID = {DupFile['id']}") + time.sleep(2) + Scene = stash.find_scene(DupFile['id']) + sceneData = f"Scene = {Scene}" + stash.Trace(sceneData, toAscii=True) + DupFileDetailList = DupFileDetailList + [Scene] + if DupFileToKeep != "": + if int(DupFileToKeep['files'][0]['duration']) == int(Scene['files'][0]['duration']): # Do not count fractions of a second as a difference + QtyExactDup+=1 + else: + QtyAlmostDup+=1 + SepLine = "***************************" + if significantLessTime(int(DupFileToKeep['files'][0]['duration']), int(Scene['files'][0]['duration'])): + QtyRealTimeDiff += 1 + if int(DupFileToKeep['files'][0]['width']) < int(Scene['files'][0]['width']) or int(DupFileToKeep['files'][0]['height']) < int(Scene['files'][0]['height']): + DupFileToKeep = Scene + elif int(DupFileToKeep['files'][0]['duration']) < int(Scene['files'][0]['duration']): + DupFileToKeep = Scene + elif isInList(whitelist, Scene['files'][0]['path']) and not isInList(whitelist, DupFileToKeep['files'][0]['path']): + DupFileToKeep = Scene + elif isInList(blacklist, DupFileToKeep['files'][0]['path']) and not isInList(blacklist, Scene['files'][0]['path']): + DupFileToKeep = Scene + elif isInList(graylist, Scene['files'][0]['path']) and not isInList(graylist, DupFileToKeep['files'][0]['path']): + DupFileToKeep = Scene + elif len(DupFileToKeep['files'][0]['path']) < len(Scene['files'][0]['path']): + DupFileToKeep = Scene + elif int(DupFileToKeep['files'][0]['size']) < int(Scene['files'][0]['size']): + DupFileToKeep = Scene + else: + DupFileToKeep = Scene + # stash.Trace(f"DupFileToKeep = {DupFileToKeep}") + stash.Trace(f"KeepID={DupFileToKeep['id']}, ID={DupFile['id']} duration=({Scene['files'][0]['duration']}), Size=({Scene['files'][0]['size']}), Res=({Scene['files'][0]['width']} x {Scene['files'][0]['height']}) Name={Scene['files'][0]['path']}, KeepPath={DupFileToKeep['files'][0]['path']}", toAscii=True) + + for DupFile in DupFileDetailList: + if DupFile['id'] != DupFileToKeep['id']: + if merge: + result = stash.merge_metadata(DupFile, DupFileToKeep) + if result != "Nothing To Merge": + QtyMerge += 1 + + if isInList(whitelist, DupFile['files'][0]['path']) and (not whitelistDelDupInSameFolder or not hasSameDir(DupFile['files'][0]['path'], DupFileToKeep['files'][0]['path'])): + if isSwapCandidate(DupFileToKeep, DupFile): + if merge: + stash.merge_metadata(DupFileToKeep, DupFile) + if toRecycleBeforeSwap: + sendToTrash(DupFile['files'][0]['path']) + shutil.move(DupFileToKeep['files'][0]['path'], DupFile['files'][0]['path']) + stash.Log(f"Moved better file '{DupFileToKeep['files'][0]['path']}' to '{DupFile['files'][0]['path']}'", toAscii=True, printTo=LOG_STASH_N_PLUGIN) + DupFileToKeep = DupFile + QtySwap+=1 + else: + stash.Log(f"NOT processing duplicate, because it's in whitelist. '{DupFile['files'][0]['path']}'", toAscii=True) + if dupWhitelistTagId and tagDuplicates: + setTagId(dupWhitelistTagId, duplicateWhitelistTag, DupFile, DupFileToKeep) + QtySkipForDel+=1 + else: + if deleteDup: + DupFileName = DupFile['files'][0]['path'] + DupFileNameOnly = pathlib.Path(DupFileName).stem + stash.Warn(f"Deleting duplicate '{DupFileName}'", toAscii=True, printTo=LOG_STASH_N_PLUGIN) + if alternateTrashCanPath != "": + destPath = f"{alternateTrashCanPath }{os.sep}{DupFileNameOnly}" + if os.path.isfile(destPath): + destPath = f"{alternateTrashCanPath }{os.sep}_{time.time()}_{DupFileNameOnly}" + shutil.move(DupFileName, destPath) + elif moveToTrashCan: + sendToTrash(DupFileName) + stash.destroy_scene(DupFile['id'], delete_file=True) + QtyDeleted += 1 + elif tagDuplicates: + if QtyTagForDel == 0: + stash.Log(f"Tagging duplicate {DupFile['files'][0]['path']} for deletion with tag {duplicateMarkForDeletion}.", toAscii=True, printTo=LOG_STASH_N_PLUGIN) + else: + stash.Log(f"Tagging duplicate {DupFile['files'][0]['path']} for deletion.", toAscii=True, printTo=LOG_STASH_N_PLUGIN) + setTagId(dupTagId, duplicateMarkForDeletion, DupFile, DupFileToKeep) + QtyTagForDel+=1 + stash.Trace(SepLine) + if maxDupToProcess > 0 and QtyDup > maxDupToProcess: + break + + stash.Log(f"QtyDupSet={QtyDupSet}, QtyDup={QtyDup}, QtyDeleted={QtyDeleted}, QtySwap={QtySwap}, QtyTagForDel={QtyTagForDel}, QtySkipForDel={QtySkipForDel}, QtyExactDup={QtyExactDup}, QtyAlmostDup={QtyAlmostDup}, QtyMerge={QtyMerge}, QtyRealTimeDiff={QtyRealTimeDiff}", printTo=LOG_STASH_N_PLUGIN) + if cleanAfterDel: + stash.Log("Adding clean jobs to the Task Queue", printTo=LOG_STASH_N_PLUGIN) + stash.metadata_clean(paths=stash.STASH_PATHS) + stash.metadata_clean_generated() + stash.optimise_database() + +def deleteTagggedDuplicates(): + tagId = stash.find_tags(q=duplicateMarkForDeletion) + if len(tagId) > 0 and 'id' in tagId[0]: + tagId = tagId[0]['id'] + else: + stash.Warn(f"Could not find tag ID for tag '{duplicateMarkForDeletion}'.") + return + QtyDup = 0 + QtyDeleted = 0 + QtyFailedQuery = 0 + stash.Trace("#########################################################################") + sceneIDs = stash.find_scenes(f={"tags": {"value":tagId, "modifier":"INCLUDES"}}, fragment='id') + qtyResults = len(sceneIDs) + stash.Trace(f"Found {qtyResults} scenes with tag ({duplicateMarkForDeletion}): sceneIDs = {sceneIDs}") + for sceneID in sceneIDs: + # stash.Trace(f"Getting scene data for scene ID {sceneID['id']}.") + QtyDup += 1 + prgs = QtyDup / qtyResults + stash.Progress(QtyDup, qtyResults) + scene = stash.find_scene(sceneID['id']) + if scene == None or len(scene) == 0: + stash.Warn(f"Could not get scene data for scene ID {sceneID['id']}.") + QtyFailedQuery += 1 + continue + # stash.Log(f"scene={scene}") + DupFileName = scene['files'][0]['path'] + DupFileNameOnly = pathlib.Path(DupFileName).stem + stash.Warn(f"Deleting duplicate '{DupFileName}'", toAscii=True, printTo=LOG_STASH_N_PLUGIN) + if alternateTrashCanPath != "": + destPath = f"{alternateTrashCanPath }{os.sep}{DupFileNameOnly}" + if os.path.isfile(destPath): + destPath = f"{alternateTrashCanPath }{os.sep}_{time.time()}_{DupFileNameOnly}" + shutil.move(DupFileName, destPath) + elif moveToTrashCan: + sendToTrash(DupFileName) + result = stash.destroy_scene(scene['id'], delete_file=True) + stash.Trace(f"destroy_scene result={result} for file {DupFileName}", toAscii=True) + QtyDeleted += 1 + stash.Log(f"QtyDup={QtyDup}, QtyDeleted={QtyDeleted}, QtyFailedQuery={QtyFailedQuery}", printTo=LOG_STASH_N_PLUGIN) + return + +def testSetDupTagOnScene(sceneId): + scene = stash.find_scene(sceneId) + stash.Log(f"scene={scene}") + stash.Log(f"scene tags={scene['tags']}") + tag_ids = [dupTagId] + for tag in scene['tags']: + tag_ids = tag_ids + [tag['id']] + stash.Log(f"tag_ids={tag_ids}") + stash.update_scene({'id' : scene['id'], 'tag_ids' : tag_ids}) + +if stash.PLUGIN_TASK_NAME == "tag_duplicates_task": + mangeDupFiles(tagDuplicates=True, merge=mergeDupFilename) + stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT") +elif stash.PLUGIN_TASK_NAME == "delete_tagged_duplicates_task": + deleteTagggedDuplicates() + stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT") +elif stash.PLUGIN_TASK_NAME == "delete_duplicates_task": + mangeDupFiles(deleteDup=True, merge=mergeDupFilename) + stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT") +elif parse_args.dup_tag: + mangeDupFiles(tagDuplicates=True, merge=mergeDupFilename) + stash.Trace(f"Tag duplicate EXIT") +elif parse_args.del_tag: + deleteTagggedDuplicates() + stash.Trace(f"Delete Tagged duplicates EXIT") +elif parse_args.remove: + mangeDupFiles(deleteDup=True, merge=mergeDupFilename) + stash.Trace(f"Delete duplicate EXIT") +else: + stash.Log(f"Nothing to do!!! (PLUGIN_ARGS_MODE={stash.PLUGIN_TASK_NAME})") + + + + + +stash.Trace("\n*********************************\nEXITING ***********************\n*********************************") diff --git a/plugins/DupFileManager/DupFileManager.yml b/plugins/DupFileManager/DupFileManager.yml new file mode 100644 index 00000000..c75f561f --- /dev/null +++ b/plugins/DupFileManager/DupFileManager.yml @@ -0,0 +1,70 @@ +name: DupFileManager +description: Manages duplicate files. +version: 0.1.2 +url: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager +settings: + mergeDupFilename: + displayName: Merge Duplicate Tags + description: Before deletion, merge metadata from duplicate. E.g. Tag names, performers, studios, title, galleries, rating, details, etc... + type: BOOLEAN + permanentlyDelete: + displayName: Permanent Delete + description: Enable to permanently delete files, instead of moving files to trash can. + type: BOOLEAN + whitelistDelDupInSameFolder: + displayName: Whitelist Delete In Same Folder + description: Allow whitelist deletion of duplicates within the same whitelist folder. + type: BOOLEAN + whitelistDoTagLowResDup: + displayName: Whitelist Duplicate Tagging + description: Enable to tag whitelist duplicates of lower resolution or duration or same folder. + type: BOOLEAN + zCleanAfterDel: + displayName: Run Clean After Delete + description: After running a 'Delete Duplicates' task, run Clean, Clean-Generated, and Optimize-Database. + type: BOOLEAN + zSwapHighRes: + displayName: Swap High Resolution + description: If enabled, swap higher resolution duplicate files to preferred path. + type: BOOLEAN + zSwapLongLength: + displayName: Swap Longer Duration + description: If enabled, swap longer duration media files to preferred path. Longer is determine by significantLongerTime field. + type: BOOLEAN + zWhitelist: + displayName: White List + description: A comma seperated list of paths NOT to be deleted. E.g. C:\Favorite\,E:\MustKeep\ + type: STRING + zxGraylist: + displayName: Gray List + description: List of preferential paths to determine which duplicate should be the primary. E.g. C:\2nd_Favorite\,H:\ShouldKeep\ + type: STRING + zyBlacklist: + displayName: Black List + description: List of LEAST preferential paths to determine primary candidates for deletion. E.g. C:\Downloads\,F:\DeleteMeFirst\ + type: STRING + zyMaxDupToProcess: + displayName: Max Dup Process + description: Maximum number of duplicates to process. If 0, infinity + type: NUMBER + zzdebugTracing: + displayName: Debug Tracing + description: (Default=false) [***For Advanced Users***] Enable debug tracing. When enabled, additional tracing logging is added to Stash\plugins\DupFileManager\DupFileManager.log + type: BOOLEAN +exec: + - python + - "{pluginDir}/DupFileManager.py" +interface: raw +tasks: + - name: Tag Duplicates + description: Set tag DuplicateMarkForDeletion to the duplicates with lower resolution, duration, file name length, or black list path. + defaultArgs: + mode: tag_duplicates_task + - name: Delete Tagged Duplicates + description: Only delete scenes having DuplicateMarkForDeletion tag. + defaultArgs: + mode: delete_tagged_duplicates_task + - name: Delete Duplicates + description: Delete duplicate scenes. Performs deletion without first tagging. + defaultArgs: + mode: delete_duplicates_task diff --git a/plugins/DupFileManager/DupFileManager_config.py b/plugins/DupFileManager/DupFileManager_config.py new file mode 100644 index 00000000..ab5b8178 --- /dev/null +++ b/plugins/DupFileManager/DupFileManager_config.py @@ -0,0 +1,26 @@ +# Description: This is a Stash plugin which manages duplicate files. +# By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/) +# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager +config = { + # If enabled, adds the primary duplicate path to the scene detail. + "addPrimaryDupPathToDetails" : True, + # Alternative path to move duplicate files. + "dup_path": "", #Example: "C:\\TempDeleteFolder" + # The threshold as to what percentage is consider a significant shorter time. + "significantTimeDiff" : .90, # 90% threshold + # Valued passed to stash API function FindDuplicateScenes. + "duration_diff" : 10, # (default=10) A value from 1 to 10. + # If enabled, moves destination file to recycle bin before swapping Hi-Res file. + "toRecycleBeforeSwap" : True, + # Character used to seperate items on the whitelist, blacklist, and graylist + "listSeparator" : ",", + # Tag used to tag duplicates with lower resolution, duration, and file name length. + "DupFileTag" : "DuplicateMarkForDeletion", + # Tag name used to tag duplicates in the whitelist. E.g. DuplicateWhitelistFile + "DupWhiteListTag" : "DuplicateWhitelistFile", + + # The following fields are ONLY used when running DupFileManager in script mode + "endpoint_Scheme" : "http", # Define endpoint to use when contacting the Stash server + "endpoint_Host" : "0.0.0.0", # Define endpoint to use when contacting the Stash server + "endpoint_Port" : 9999, # Define endpoint to use when contacting the Stash server +} diff --git a/plugins/DupFileManager/README.md b/plugins/DupFileManager/README.md new file mode 100644 index 00000000..7d0cf052 --- /dev/null +++ b/plugins/DupFileManager/README.md @@ -0,0 +1,50 @@ +# DupFileManager: Ver 0.1.2 (By David Maisonave) + +DupFileManager is a [Stash](https://github.com/stashapp/stash) plugin which manages duplicate file in the Stash system. + +### Features + +- Can merge potential source in the duplicate file names for tag names, performers, and studios. + - Normally when Stash searches the file name for tag names, performers, and studios, it only does so using the primary file. +- Delete duplicate file task with the following options: + - Tasks (Settings->Task->[Plugin Tasks]->DupFileManager) + - **Tag Duplicates** - Set tag DuplicateMarkForDeletion to the duplicates with lower resolution, duration, file name length, and/or black list path. + - **Delete Tagged Duplicates** - Delete scenes having DuplicateMarkForDeletion tag. + - **Delete Duplicates** - Deletes duplicate files. Performs deletion without first tagging. + - Plugin UI options (Settings->Plugins->Plugins->[DupFileManager]) + - Has a 3 tier path selection to determine which duplicates to keep, and which should be candidates for deletions. + - **Whitelist** - List of paths NOT to be deleted. + - E.g. C:\Favorite\,E:\MustKeep\ + - **Gray-List** - List of preferential paths to determine which duplicate should be the primary. + - E.g. C:\2nd_Favorite\,H:\ShouldKeep\ + - **Blacklist** - List of LEAST preferential paths to determine primary candidates for deletion. + - E.g. C:\Downloads\,F:\DeleteMeFirst\ + - **Permanent Delete** - Enable to permanently delete files, instead of moving files to trash can. + - **Max Dup Process** - Use to limit the maximum files to process. Can be used to do a limited test run. + - **Merge Duplicate Tags** - Before deletion, merge metadata from duplicate. E.g. Tag names, performers, studios, title, galleries, rating, details, etc... + - **Swap High Resolution** - When enabled, swaps higher resolution files between whitelist and blacklist/graylist files. + - **Swap Longer Duration** - When enabled, swaps scene with longer duration. + - Options available via DupFileManager_config.py + - **dup_path** - Alternate path to move deleted files to. Example: "C:\TempDeleteFolder" + - **toRecycleBeforeSwap** - When enabled, moves destination file to recycle bin before swapping files. + - **addPrimaryDupPathToDetails** - If enabled, adds the primary duplicate path to the scene detail. + +### Requirements + +`pip install --upgrade stashapp-tools` +`pip install pyYAML` +`pip install Send2Trash` + +### Installation + +- Follow **Requirements** instructions. +- In the stash plugin directory (C:\Users\MyUserName\.stash\plugins), create a folder named **DupFileManager**. +- Copy all the plugin files to this folder.(**C:\Users\MyUserName\\.stash\plugins\DupFileManager**). +- Click the **[Reload Plugins]** button in Stash->Settings->Plugins->Plugins. + +That's it!!! + +### Options + +- Options are accessible in the GUI via Settings->Plugins->Plugins->[DupFileManager]. +- More options available in DupFileManager_config.py. diff --git a/plugins/DupFileManager/StashPluginHelper.py b/plugins/DupFileManager/StashPluginHelper.py new file mode 100644 index 00000000..6f0d3d15 --- /dev/null +++ b/plugins/DupFileManager/StashPluginHelper.py @@ -0,0 +1,526 @@ +from stashapi.stashapp import StashInterface +from logging.handlers import RotatingFileHandler +import re, inspect, sys, os, pathlib, logging, json +import concurrent.futures +from stashapi.stash_types import PhashDistance +import __main__ + +_ARGUMENT_UNSPECIFIED_ = "_ARGUMENT_UNSPECIFIED_" + +# StashPluginHelper (By David Maisonave aka Axter) + # See end of this file for example usage + # Log Features: + # Can optionally log out to multiple outputs for each Log or Trace call. + # Logging includes source code line number + # Sets a maximum plugin log file size + # Stash Interface Features: + # Gets STASH_URL value from command line argument and/or from STDIN_READ + # Sets FRAGMENT_SERVER based on command line arguments or STDIN_READ + # Sets PLUGIN_ID based on the main script file name (in lower case) + # Gets PLUGIN_TASK_NAME value + # Sets pluginSettings (The plugin UI settings) + # Misc Features: + # Gets DRY_RUN value from command line argument and/or from UI and/or from config file + # Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file + # Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments + # Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ +class StashPluginHelper(StashInterface): + # Primary Members for external reference + PLUGIN_TASK_NAME = None + PLUGIN_ID = None + PLUGIN_CONFIGURATION = None + PLUGINS_PATH = None + pluginSettings = None + pluginConfig = None + STASH_URL = None + STASH_CONFIGURATION = None + JSON_INPUT = None + DEBUG_TRACING = False + DRY_RUN = False + CALLED_AS_STASH_PLUGIN = False + RUNNING_IN_COMMAND_LINE_MODE = False + FRAGMENT_SERVER = None + STASHPATHSCONFIG = None + STASH_PATHS = [] + API_KEY = None + excludeMergeTags = None + + # printTo argument + LOG_TO_FILE = 1 + LOG_TO_CONSOLE = 2 # Note: Only see output when running in command line mode. In plugin mode, this output is lost. + LOG_TO_STDERR = 4 # Note: In plugin mode, output to StdErr ALWAYS gets sent to stash logging as an error. + LOG_TO_STASH = 8 + LOG_TO_WARN = 16 + LOG_TO_ERROR = 32 + LOG_TO_CRITICAL = 64 + LOG_TO_ALL = LOG_TO_FILE + LOG_TO_CONSOLE + LOG_TO_STDERR + LOG_TO_STASH + + # Misc class variables + MAIN_SCRIPT_NAME = None + LOG_LEVEL = logging.INFO + LOG_FILE_DIR = None + LOG_FILE_NAME = None + STDIN_READ = None + pluginLog = None + logLinePreviousHits = [] + thredPool = None + STASH_INTERFACE_INIT = False + _mergeMetadata = None + encodeToUtf8 = False + convertToAscii = False # If set True, it takes precedence over encodeToUtf8 + + # Prefix message value + LEV_TRACE = "TRACE: " + LEV_DBG = "DBG: " + LEV_INF = "INF: " + LEV_WRN = "WRN: " + LEV_ERR = "ERR: " + LEV_CRITICAL = "CRITICAL: " + + # Default format + LOG_FORMAT = "[%(asctime)s] %(message)s" + + # Externally modifiable variables + log_to_err_set = LOG_TO_FILE + LOG_TO_STDERR # This can be changed by the calling source in order to customize what targets get error messages + log_to_norm = LOG_TO_FILE + LOG_TO_CONSOLE # Can be change so-as to set target output for normal logging + # Warn message goes to both plugin log file and stash when sent to Stash log file. + log_to_wrn_set = LOG_TO_STASH # This can be changed by the calling source in order to customize what targets get warning messages + + def __init__(self, + debugTracing = None, # Set debugTracing to True so as to output debug and trace logging + logFormat = LOG_FORMAT, # Plugin log line format + dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file + maxbytes = 8*1024*1024, # Max size of plugin log file + backupcount = 2, # Backup counts when log file size reaches max size + logToWrnSet = 0, # Customize the target output set which will get warning logging + logToErrSet = 0, # Customize the target output set which will get error logging + logToNormSet = 0, # Customize the target output set which will get normal logging + logFilePath = "", # Plugin log file. If empty, the log file name will be set based on current python file name and path + mainScriptName = "", # The main plugin script file name (full path) + pluginID = "", + settings = None, # Default settings for UI fields + config = None, # From pluginName_config.py or pluginName_setting.py + fragmentServer = None, + stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999 + apiKey = None, # API Key only needed when username and password set while running script via command line + DebugTraceFieldName = "zzdebugTracing", + DryRunFieldName = "zzdryRun", + setStashLoggerAsPluginLogger = False): + self.thredPool = concurrent.futures.ThreadPoolExecutor(max_workers=2) + if logToWrnSet: self.log_to_wrn_set = logToWrnSet + if logToErrSet: self.log_to_err_set = logToErrSet + if logToNormSet: self.log_to_norm = logToNormSet + if stash_url and len(stash_url): self.STASH_URL = stash_url + self.MAIN_SCRIPT_NAME = mainScriptName if mainScriptName != "" else __main__.__file__ + self.PLUGIN_ID = pluginID if pluginID != "" else pathlib.Path(self.MAIN_SCRIPT_NAME).stem + # print(f"self.MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME}, self.PLUGIN_ID={self.PLUGIN_ID}", file=sys.stderr) + self.LOG_FILE_NAME = logFilePath if logFilePath != "" else f"{pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}{os.sep}{pathlib.Path(self.MAIN_SCRIPT_NAME).stem}.log" + self.LOG_FILE_DIR = pathlib.Path(self.LOG_FILE_NAME).resolve().parent + RFH = RotatingFileHandler( + filename=self.LOG_FILE_NAME, + mode='a', + maxBytes=maxbytes, + backupCount=backupcount, + encoding=None, + delay=0 + ) + if fragmentServer: + self.FRAGMENT_SERVER = fragmentServer + else: + self.FRAGMENT_SERVER = {'Scheme': 'http', 'Host': '0.0.0.0', 'Port': '9999', 'SessionCookie': {'Name': 'session', 'Value': '', 'Path': '', 'Domain': '', 'Expires': '0001-01-01T00:00:00Z', 'RawExpires': '', 'MaxAge': 0, 'Secure': False, 'HttpOnly': False, 'SameSite': 0, 'Raw': '', 'Unparsed': None}, 'Dir': os.path.dirname(pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent), 'PluginDir': pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent} + + if debugTracing: self.DEBUG_TRACING = debugTracing + if config: + self.pluginConfig = config + if self.Setting('apiKey', "") != "": + self.FRAGMENT_SERVER['ApiKey'] = self.Setting('apiKey') + + + if apiKey and apiKey != "": + self.FRAGMENT_SERVER['ApiKey'] = apiKey + + if len(sys.argv) > 1: + RUNNING_IN_COMMAND_LINE_MODE = True + if not debugTracing or not stash_url: + for argValue in sys.argv[1:]: + if argValue.lower() == "--trace": + self.DEBUG_TRACING = True + elif argValue.lower() == "--dry_run" or argValue.lower() == "--dryrun": + self.DRY_RUN = True + elif ":" in argValue and not self.STASH_URL: + self.STASH_URL = argValue + if self.STASH_URL: + endpointUrlArr = self.STASH_URL.split(":") + if len(endpointUrlArr) == 3: + self.FRAGMENT_SERVER['Scheme'] = endpointUrlArr[0] + self.FRAGMENT_SERVER['Host'] = endpointUrlArr[1][2:] + self.FRAGMENT_SERVER['Port'] = endpointUrlArr[2] + super().__init__(self.FRAGMENT_SERVER) + self.STASH_INTERFACE_INIT = True + else: + try: + self.STDIN_READ = sys.stdin.read() + self.CALLED_AS_STASH_PLUGIN = True + except: + pass + if self.STDIN_READ: + self.JSON_INPUT = json.loads(self.STDIN_READ) + if "args" in self.JSON_INPUT and "mode" in self.JSON_INPUT["args"]: + self.PLUGIN_TASK_NAME = self.JSON_INPUT["args"]["mode"] + self.FRAGMENT_SERVER = self.JSON_INPUT["server_connection"] + self.STASH_URL = f"{self.FRAGMENT_SERVER['Scheme']}://{self.FRAGMENT_SERVER['Host']}:{self.FRAGMENT_SERVER['Port']}" + super().__init__(self.FRAGMENT_SERVER) + self.STASH_INTERFACE_INIT = True + + if self.STASH_URL.startswith("http://0.0.0.0:"): + self.STASH_URL = self.STASH_URL.replace("http://0.0.0.0:", "http://localhost:") + + if self.STASH_INTERFACE_INIT: + self.PLUGIN_CONFIGURATION = self.get_configuration()["plugins"] + self.STASH_CONFIGURATION = self.get_configuration()["general"] + self.STASHPATHSCONFIG = self.STASH_CONFIGURATION['stashes'] + if 'pluginsPath' in self.STASH_CONFIGURATION: + self.PLUGINS_PATH = self.STASH_CONFIGURATION['pluginsPath'] + for item in self.STASHPATHSCONFIG: + self.STASH_PATHS.append(item["path"]) + if settings: + self.pluginSettings = settings + if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION: + self.pluginSettings.update(self.PLUGIN_CONFIGURATION[self.PLUGIN_ID]) + if 'apiKey' in self.STASH_CONFIGURATION: + self.API_KEY = self.STASH_CONFIGURATION['apiKey'] + + self.DRY_RUN = self.Setting(DryRunFieldName, self.DRY_RUN) + self.DEBUG_TRACING = self.Setting(DebugTraceFieldName, self.DEBUG_TRACING) + if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG + + logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH]) + self.pluginLog = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem) + if setStashLoggerAsPluginLogger: + self.log = self.pluginLog + + def __del__(self): + self.thredPool.shutdown(wait=False) + + def Setting(self, name, default=_ARGUMENT_UNSPECIFIED_, raiseEx=True, notEmpty=False): + if self.pluginSettings != None and name in self.pluginSettings: + if notEmpty == False or self.pluginSettings[name] != "": + return self.pluginSettings[name] + if self.pluginConfig != None and name in self.pluginConfig: + if notEmpty == False or self.pluginConfig[name] != "": + return self.pluginConfig[name] + if default == _ARGUMENT_UNSPECIFIED_ and raiseEx: + raise Exception(f"Missing {name} from both UI settings and config file settings.") + return default + + def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False, toAscii = None): + if toAscii or (toAscii == None and (self.encodeToUtf8 or self.convertToAscii)): + logMsg = self.asc2(logMsg) + else: + logMsg = logMsg + if printTo == 0: + printTo = self.log_to_norm + elif printTo == self.LOG_TO_ERROR and logLevel == logging.INFO: + logLevel = logging.ERROR + printTo = self.log_to_err_set + elif printTo == self.LOG_TO_CRITICAL and logLevel == logging.INFO: + logLevel = logging.CRITICAL + printTo = self.log_to_err_set + elif printTo == self.LOG_TO_WARN and logLevel == logging.INFO: + logLevel = logging.WARN + printTo = self.log_to_wrn_set + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + LN_Str = f"[LN:{lineNo}]" + # print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}") + if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG): + if levelStr == "": levelStr = self.LEV_DBG + if printTo & self.LOG_TO_FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.INFO or logLevel == logging.DEBUG: + if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG + if printTo & self.LOG_TO_FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.WARN: + if levelStr == "": levelStr = self.LEV_WRN + if printTo & self.LOG_TO_FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.ERROR: + if levelStr == "": levelStr = self.LEV_ERR + if printTo & self.LOG_TO_FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.CRITICAL: + if levelStr == "": levelStr = self.LEV_CRITICAL + if printTo & self.LOG_TO_FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}") + if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways): + print(f"{LN_Str} {levelStr}{logMsg}") + if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways): + print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr) + + def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None): + if printTo == 0: printTo = self.LOG_TO_FILE + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + logLev = logging.INFO if logAlways else logging.DEBUG + if self.DEBUG_TRACING or logAlways: + if logMsg == "": + logMsg = f"Line number {lineNo}..." + self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways, toAscii=toAscii) + + # Log once per session. Only logs the first time called from a particular line number in the code. + def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None): + lineNo = inspect.currentframe().f_back.f_lineno + if self.DEBUG_TRACING or logAlways: + FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" + if FuncAndLineNo in self.logLinePreviousHits: + return + self.logLinePreviousHits.append(FuncAndLineNo) + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) + + # Log INFO on first call, then do Trace on remaining calls. + def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True, toAscii = None): + if printTo == 0: printTo = self.LOG_TO_FILE + lineNo = inspect.currentframe().f_back.f_lineno + FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" + if FuncAndLineNo in self.logLinePreviousHits: + if traceOnRemainingCalls: + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) + else: + self.logLinePreviousHits.append(FuncAndLineNo) + self.Log(logMsg, printTo, logging.INFO, lineNo, toAscii=toAscii) + + def Warn(self, logMsg, printTo = 0, toAscii = None): + if printTo == 0: printTo = self.log_to_wrn_set + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(logMsg, printTo, logging.WARN, lineNo, toAscii=toAscii) + + def Error(self, logMsg, printTo = 0, toAscii = None): + if printTo == 0: printTo = self.log_to_err_set + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(logMsg, printTo, logging.ERROR, lineNo, toAscii=toAscii) + + def Status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1): + if printTo == 0: printTo = self.log_to_norm + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(f"StashPluginHelper Status: (CALLED_AS_STASH_PLUGIN={self.CALLED_AS_STASH_PLUGIN}), (RUNNING_IN_COMMAND_LINE_MODE={self.RUNNING_IN_COMMAND_LINE_MODE}), (DEBUG_TRACING={self.DEBUG_TRACING}), (DRY_RUN={self.DRY_RUN}), (PLUGIN_ID={self.PLUGIN_ID}), (PLUGIN_TASK_NAME={self.PLUGIN_TASK_NAME}), (STASH_URL={self.STASH_URL}), (MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME})", + printTo, logLevel, lineNo) + + def ExecuteProcess(self, args, ExecDetach=False): + import platform, subprocess + is_windows = any(platform.win32_ver()) + pid = None + self.Trace(f"is_windows={is_windows} args={args}") + if is_windows: + if ExecDetach: + self.Trace("Executing process using Windows DETACHED_PROCESS") + DETACHED_PROCESS = 0x00000008 + pid = subprocess.Popen(args,creationflags=DETACHED_PROCESS, shell=True).pid + else: + pid = subprocess.Popen(args, shell=True).pid + else: + self.Trace("Executing process using normal Popen") + pid = subprocess.Popen(args).pid + self.Trace(f"pid={pid}") + return pid + + def ExecutePythonScript(self, args, ExecDetach=True): + PythonExe = f"{sys.executable}" + argsWithPython = [f"{PythonExe}"] + args + return self.ExecuteProcess(argsWithPython,ExecDetach=ExecDetach) + + def Submit(self, *args, **kwargs): + return self.thredPool.submit(*args, **kwargs) + + def asc2(self, data, convertToAscii=None): + if convertToAscii or (convertToAscii == None and self.convertToAscii): + return ascii(data) + return str(str(data).encode('utf-8'))[2:-1] # This works better for logging than ascii function + # data = str(data).encode('ascii','ignore') # This works better for logging than ascii function + # return str(data)[2:-1] # strip out b'str' + + def init_mergeMetadata(self, excludeMergeTags=None): + self.excludeMergeTags = excludeMergeTags + self._mergeMetadata = mergeMetadata(self, self.excludeMergeTags) + + # Must call init_mergeMetadata, before calling merge_metadata + def merge_metadata(self, SrcData, DestData): # Input arguments can be scene ID or scene metadata + if type(SrcData) is int: + SrcData = self.find_scene(SrcData) + DestData = self.find_scene(DestData) + return self._mergeMetadata.merge(SrcData, DestData) + + def Progress(self, currentIndex, maxCount): + progress = (currentIndex / maxCount) if currentIndex < maxCount else (maxCount / currentIndex) + self.log.progress(progress) + + def run_plugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False): + """Runs a plugin operation. + The operation is run immediately and does not use the job queue. + Args: + plugin_id (ID): plugin_id + task_name (str, optional): Plugin task to perform + args (dict, optional): Arguments to pass to plugin. Plugin access via JSON_INPUT['args'] + Returns: + A map of the result. + """ + query = """mutation RunPluginOperation($plugin_id: ID!, $args: Map!) { + runPluginOperation(plugin_id: $plugin_id, args: $args) + }""" + if task_mode != None: + args.update({"mode" : task_mode}) + variables = { + "plugin_id": plugin_id, + "args": args, + } + if asyn: + self.Submit(self.call_GQL, query, variables) + return f"Made asynchronous call for plugin {plugin_id}" + else: + return self.call_GQL(query, variables) + + def find_duplicate_scenes_diff(self, distance: PhashDistance=PhashDistance.EXACT, fragment='id', duration_diff: float=10.00 ): + query = """ + query FindDuplicateScenes($distance: Int, $duration_diff: Float) { + findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) { + ...SceneSlim + } + } + """ + if fragment: + query = re.sub(r'\.\.\.SceneSlim', fragment, query) + else: + query += "fragment SceneSlim on Scene { id }" + + variables = { "distance": distance, "duration_diff": duration_diff } + result = self.call_GQL(query, variables) + return result['findDuplicateScenes'] + + # ################################################################################################# + # The below functions extends class StashInterface with functions which are not yet in the class + def get_all_scenes(self): + query_all_scenes = """ + query AllScenes { + allScenes { + id + updated_at + } + } + """ + return self.call_GQL(query_all_scenes) + + def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]): + query = """ + mutation MetadataAutoTag($input:AutoTagMetadataInput!) { + metadataAutoTag(input: $input) + } + """ + metadata_autotag_input = { + "paths":paths, + "performers": performers, + "studios":studios, + "tags":tags, + } + result = self.call_GQL(query, {"input": metadata_autotag_input}) + return result + + def backup_database(self): + return self.call_GQL("mutation { backupDatabase(input: {download: false})}") + + def optimise_database(self): + return self.call_GQL("mutation OptimiseDatabase { optimiseDatabase }") + + def metadata_clean_generated(self, blobFiles=True, dryRun=False, imageThumbnails=True, markers=True, screenshots=True, sprites=True, transcodes=True): + query = """ + mutation MetadataCleanGenerated($input: CleanGeneratedInput!) { + metadataCleanGenerated(input: $input) + } + """ + clean_metadata_input = { + "blobFiles": blobFiles, + "dryRun": dryRun, + "imageThumbnails": imageThumbnails, + "markers": markers, + "screenshots": screenshots, + "sprites": sprites, + "transcodes": transcodes, + } + result = self.call_GQL(query, {"input": clean_metadata_input}) + return result + + def rename_generated_files(self): + return self.call_GQL("mutation MigrateHashNaming {migrateHashNaming}") + +class mergeMetadata: # A class to merge scene metadata from source scene to destination scene + srcData = None + destData = None + stash = None + excludeMergeTags = None + dataDict = None + result = "Nothing To Merge" + def __init__(self, stash, excludeMergeTags=None): + self.stash = stash + self.excludeMergeTags = excludeMergeTags + + def merge(self, SrcData, DestData): + self.srcData = SrcData + self.destData = DestData + ORG_DATA_DICT = {'id' : self.destData['id']} + self.dataDict = ORG_DATA_DICT.copy() + self.mergeItems('tags', 'tag_ids', [], excludeName=self.excludeMergeTags) + self.mergeItems('performers', 'performer_ids', []) + self.mergeItems('galleries', 'gallery_ids', []) + self.mergeItems('movies', 'movies', []) + self.mergeItems('urls', listToAdd=self.destData['urls'], NotStartWith=self.stash.STASH_URL) + self.mergeItem('studio', 'studio_id', 'id') + self.mergeItem('title') + self.mergeItem('director') + self.mergeItem('date') + self.mergeItem('details') + self.mergeItem('rating100') + self.mergeItem('code') + if self.dataDict != ORG_DATA_DICT: + self.stash.Trace(f"Updating scene ID({self.destData['id']}) with {self.dataDict}; path={self.destData['files'][0]['path']}", toAscii=True) + self.result = self.stash.update_scene(self.dataDict) + return self.result + + def Nothing(self, Data): + if not Data or Data == "" or (type(Data) is str and Data.strip() == ""): + return True + return False + + def mergeItem(self,fieldName, updateFieldName=None, subField=None): + if updateFieldName == None: + updateFieldName = fieldName + if self.Nothing(self.destData[fieldName]) and not self.Nothing(self.srcData[fieldName]): + if subField == None: + self.dataDict.update({ updateFieldName : self.srcData[fieldName]}) + else: + self.dataDict.update({ updateFieldName : self.srcData[fieldName][subField]}) + def mergeItems(self, fieldName, updateFieldName=None, listToAdd=[], NotStartWith=None, excludeName=None): + dataAdded = "" + for item in self.srcData[fieldName]: + if item not in self.destData[fieldName]: + if NotStartWith == None or not item.startswith(NotStartWith): + if excludeName == None or item['name'] not in excludeName: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + dataAdded += f"{item['movie']['id']} " + elif updateFieldName == None: + listToAdd += [item] + dataAdded += f"{item} " + else: + listToAdd += [item['id']] + dataAdded += f"{item['id']} " + if dataAdded != "": + if updateFieldName == None: + updateFieldName = fieldName + else: + for item in self.destData[fieldName]: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + else: + listToAdd += [item['id']] + self.dataDict.update({ updateFieldName : listToAdd}) + # self.stash.Trace(f"Added {fieldName} ({dataAdded}) to scene ID({self.destData['id']})", toAscii=True) diff --git a/plugins/DupFileManager/requirements.txt b/plugins/DupFileManager/requirements.txt new file mode 100644 index 00000000..d503550d --- /dev/null +++ b/plugins/DupFileManager/requirements.txt @@ -0,0 +1,4 @@ +stashapp-tools >= 0.2.50 +pyYAML +watchdog +Send2Trash \ No newline at end of file diff --git a/plugins/FileMonitor/README.md b/plugins/FileMonitor/README.md index a196509c..c801ee28 100644 --- a/plugins/FileMonitor/README.md +++ b/plugins/FileMonitor/README.md @@ -1,19 +1,24 @@ -# FileMonitor: Ver 0.8.2 (By David Maisonave) +# FileMonitor: Ver 0.9.0 (By David Maisonave) + FileMonitor is a [Stash](https://github.com/stashapp/stash) plugin with the following two main features: + - Updates Stash when any file changes occurs in the Stash library. - **Task Scheduler**: Runs scheduled task based on the scheduler configuration in **filemonitor_config.py**. ## Starting FileMonitor from the UI + From the GUI, FileMonitor can be started as a service or as a plugin. The recommended method is to start it as a service. When started as a service, it will jump on the Task Queue momentarily, and then disappear as it starts running in the background. + - To start monitoring file changes, go to **Stash->Settings->Task->[Plugin Tasks]->FileMonitor**, and click on the [Start Library Monitor Service] button. - ![FileMonitorService](https://github.com/user-attachments/assets/b12aeca9-37a8-447f-90da-26e9440735ad) - **Important Note**: At first, this will show up as a plugin in the Task Queue momentarily. It will then disappear from the Task Queue and run in the background as a service. - To stop FileMonitor click on [Stop Library Monitor] button. - The **[Monitor as a Plugin]** option is mainly available for backwards compatibility and for test purposes. - ## Using FileMonitor as a script + **FileMonitor** can be called as a standalone script. + - To start monitoring call the script and pass --url and the Stash URL. - python filemonitor.py --url http://localhost:9999 - To stop **FileMonitor**, pass argument **--stop**. @@ -24,6 +29,7 @@ From the GUI, FileMonitor can be started as a service or as a plugin. The recomm - The restart command restarts FileMonitor as a Task in Stash. # Task Scheduler + To enable the scheduler go to **Stash->Settings->Plugins->Plugins->FileMonitor** and enable the **Scheduler** option. ![ReoccurringTaskScheduler](https://github.com/user-attachments/assets/5a7bf6a4-3bd6-4692-a6c3-e9f8f4664f14) @@ -35,80 +41,72 @@ To enable the scheduler go to **Stash->Settings->Plugins->Plugins->FileMonitor** - Generated Content-> [Generate] (Every Sunday at 7AM) - Library -> [Scan] (Weekly) (Every Sunday at 3AM) - Backup -> [Backup] 2nd sunday of the month at 1AM -- The example task are disabled by default because they have a zero frequency value. +- The example tasks are disabled by default because they either have a zero frequency value or the time field is set to **DISABLED**. To configure the schedule or to add new task, edit the **task_scheduler** section in the **filemonitor_config.py** file. -```` python + +```python "task_scheduler": [ - # To create a daily task, include each day of the week for the weekday field. - {"task" : "Auto Tag", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "06:00"}, # Auto Tag -> [Auto Tag] (Daily at 6AM) - {"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "07:00"}, # Maintenance -> [Optimise Database] (Daily at 7AM) - - # The following tasks are scheduled for 3 days out of the week. - {"task" : "Clean", "weekday" : "monday,wednesday,friday", "time" : "08:00"}, # Maintenance -> [Clean] (3 days per week at 8AM) - {"task" : "Clean Generated Files", "weekday" : "tuesday,thursday,saturday", "time" : "08:00"}, # Maintenance -> [Clean Generated Files] (3 days per week at 8AM) - + # To create a daily task, include each day of the week for the weekday field or "every" + # Optional field for task "Auto Tag" is 'paths'. For detail usage, see example #A3: in filemonitor_task_examples.py + {"task" : "Auto Tag", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "05:00"}, # Auto Tag -> [Auto Tag] (Daily at 6AM) + # Task "Create Tags" is a plugin task. Optional fields are taskName and validateDir field. For detail usage, see examples #B1, #B2, #B3, and #B4 in filemonitor_task_examples.py + {"task" : "pathParser", "taskName" : "Create Tags", "validateDir" : "pathParser", + "weekday" : "every", "time" : "05:30"}, # [Plugin Tasks] - > [Path Parser] -> [Create Tags] (Daily at 5AM) : This task requires plugin [Path Parser] + # The following task runs plugin DupFileManager (tag_duplicates_task) if the plugin is installed. The task runs in the background because of "taskQue" : False + {"task" : "DupFileManager", "taskMode" : "tag_duplicates_task", "validateDir" : "DupFileManager", "taskQue" : False, + "weekday" : "every", "time" : "02:30"}, # [Plugin Tasks] -> DupFileManager -> [Delete Duplicates] (Daily at 2:30AM) + {"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday", "time" : "07:00"}, # Maintenance -> [Optimise Database] (Every weekday at 7AM) + # The following tasks are scheduled weekly - {"task" : "Generate", "weekday" : "sunday", "time" : "07:00"}, # Generated Content-> [Generate] (Every Sunday at 7AM) - {"task" : "Scan", "weekday" : "sunday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every Sunday at 3AM) - + # Optional field for task "Scan", "Auto Tag", and "Clean" is 'paths'. For detail usage, see examples #A3: in filemonitor_task_examples.py + {"task" : "Scan", "weekday" : "saturday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every saturday at 3AM) + {"task" : "Auto Tag", "weekday" : "saturday", "time" : "03:30"}, # Auto Tag -> [Auto Tag] (Weekly) (Every saturday at 3:30AM) + {"task" : "Generate", "weekday" : "saturday", "time" : "04:00"}, # Generated Content-> [Generate] (Every saturday at 4AM) + {"task" : "Clean", "weekday" : "saturday", "time" : "04:30"}, # Maintenance -> [Clean] (Every saturday at 4:30AM) + {"task" : "Clean Generated Files", "weekday" : "saturday", "time" : "05:00"}, # Maintenance -> [Clean Generated Files] (Every saturday at 5AM) + {"task" : "Optimise Database", "weekday" : "saturday", "time" : "05:30"}, # Maintenance -> [Optimise Database] (Every saturday at 5:30AM) + # To perform a task monthly, specify the day of the month as in the weekly schedule format, and add a monthly field. # The monthly field value must be 1, 2, 3, or 4. # 1 = 1st specified weekday of the month. Example 1st monday. # 2 = 2nd specified weekday of the month. Example 2nd monday of the month. # 3 = 3rd specified weekday of the month. # 4 = 4th specified weekday of the month. - # The following task is scheduled monthly - {"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00) - - # The following task is the syntax used for a plugins. A plugin task requires the plugin name for the [task] field, and the plugin-ID for the [pluginId] field. - # This task requires plugin [Path Parser], and it's disabled by default. - {"task" : "Create Tags", "pluginId" : "pathParser", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "DISABLED"}, # To enable this task change time "DISABLED" to a valid time. - - # Example#A1: Task to call call_GQL API with custom input - {"task" : "GQL", "input" : "mutation OptimiseDatabase { optimiseDatabase }", "weekday" : "sunday", "time" : "DISABLED"}, # To enable, change "DISABLED" to valid time - - # Example#A2: Task to call a python script. When this task is executed, the keyword is replaced by filemonitor.py current directory. - # The args field is NOT required. - {"task" : "python", "script" : "test_script_hello_world.py", "args" : "--MyArguments Hello", "weekday" : "monday", "time" : "DISABLED"}, # change "DISABLED" to valid time - - # Example#A3: The following task types can optionally take a [paths] field. If the paths field does not exists, the paths in the Stash library is used. - {"task" : "Scan", "paths" : ["E:\\MyVideos\\downloads", "V:\\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Library -> [Scan] - {"task" : "Auto Tag", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "DISABLED"}, # Auto Tag -> [Auto Tag] - {"task" : "Clean", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Generated Content-> [Generate] - - # Example#A4: Task which calls Migrations -> [Rename generated files] - {"task" : "RenameGeneratedFiles", "weekday" : "tuesday,thursday", "time" : "DISABLED"}, # (bi-weekly) example - - # The above weekday method is the more reliable method to schedule task, because it doesn't rely on FileMonitor running continuously (non-stop). - - # The below examples use frequency field method which can work with minutes and hours. A zero frequency value disables the task. - # Note: Both seconds and days are also supported for the frequency field. - # However, seconds is mainly used for test purposes. - # And days usage is discourage, because it only works if FileMonitor is running for X many days non-stop. - # The below example tasks are done using hours and minutes, however any of these task types can be converted to a daily, weekly, or monthly syntax. - - # Example#B1: Task for calling another Stash plugin, which needs plugin name and plugin ID. - {"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled. - - # Example#B2: Task to execute a command - {"task" : "execute", "command" : "C:\\MyPath\\HelloWorld.bat", "hours" : 0}, - - # Example#B3: Task to execute a command with optional args field, and using keyword , which gets replaced with filemonitor.py current directory. - {"task" : "execute", "command" : "HelloWorld.cmd", "args" : "--name David", "minutes" : 0}, + # The Backup task is scheduled monthly + # Optional field for task "Backup" is maxBackup. For detail usage, see example #A5 in filemonitor_task_examples.py + {"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00) + # The following task requires plugin DupFileManager and UI option [Delete Duplicate Scheduler] enabled. + {"task" : "DupFileManager", "taskName" : "Delete Duplicates", "validateDir" : "DupFileManager", + "weekday" : "sunday", "time" : "02:00", "monthly" : 2}, # [Plugin Tasks] -> DupFileManager -> [Delete Duplicates] 2nd sunday of the month at 2AM (02:00) + + # The [CheckStashIsRunning] task checks if Stash is running. If not running, it will start up stash. + # This task only works if FileMonitor is started as a service or in command line mode. + # Optional fields are 'command' and 'RunAfter'. For detail usage, see examples #C1 and #C2 in filemonitor_task_examples.py + {"task" : "CheckStashIsRunning", "minutes" :5}, # Checks every 5 minutes ], -```` -- To add plugins to the task list, both the Plugin-ID and the plugin name is required. The plugin ID is usually the file name of the script without the extension. +``` + +- To add plugins to the task list, use the Plugin-ID in the "task" field. The plugin ID is usually the file name of the script without the extension. + - Plugin task have the following optional fields: taskName, taskMode, validateDir, and taskQue + - The **validateDir** field can be used to define the plugin sub directory, which is checked to see if it exist before running the task. + - **taskName** field is used to name the task to call for the associated plugin. It can not be used with "taskQue":False + - **taskQue** field is used to call the plugin without using the Task Queue. I.E. "taskQue":False. When this field is set to False, the taskName field can NOT be used. Instead use taskMode to identify the task to call. + - **taskMode** field is used in order to run the plugin without using the Task Queue. The plugin runs immediatly. Be careful not to confuse taskMode with taskName. Look in the plugin \*.yml file under the **tasks** section where it defines both the task-name and the task-mode. - Task can be scheduled to run monthly, weekly, hourly, and by minutes. - The scheduler list uses two types of syntax. One is **weekday** based, and the other is **frequency** based. + - **weekday Based** + - Use the weekday based syntax for daily, weekly, and monthly schedules. - All the weekday based methods must have a **weekday** field and a **time** field, which specifies the day(s) of the week and the time to start the task. - **Daily**: - - A daily task populates the weekday field with all the days of the week. + - A daily task populates the weekday field with all the days of the week or with keyword **every**. - **Daily Example**: - Starts a task daily at 6AM. - `{"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "06:00"},` + - Starts a task daily at 2PM. + - `{"task" : "Optimise Database", "weekday" : "every", "time" : "14:00"},` - **Weekly**: - **Weekly Example**: - Starts a task weekly every monday and 9AM. @@ -140,6 +138,7 @@ To configure the schedule or to add new task, edit the **task_scheduler** sectio - For best results use the scheduler with FileMonitor running as a service. ## Requirements + - pip install -r requirements.txt - Or manually install each requirement: - `pip install stashapp-tools --upgrade` @@ -148,6 +147,7 @@ To configure the schedule or to add new task, edit the **task_scheduler** sectio - `pip install schedule` ## Installation + - Follow **Requirements** instructions. - In the stash plugin directory (C:\Users\MyUserName\.stash\plugins), create a folder named **FileMonitor**. - Copy all the plugin files to this folder.(**C:\Users\MyUserName\\.stash\plugins\FileMonitor**). @@ -156,9 +156,17 @@ To configure the schedule or to add new task, edit the **task_scheduler** sectio That's it!!! ## Options + - Main options are accessible in the GUI via Settings->Plugins->Plugins->[FileMonitor]. - When the UI option [Max DB Backups] is set to a value greater than 1, and when the scheduler is enabled, the quantity of database backup files are trim down to the set [**Max DB Backups**] value after the scheduler executes the Backup task. - The other options are self explanatory from the UI. - Additional options available in filemonitor_config.py. The options are well documented in the commented code. +## Bugs and Feature Request + +Please use the following link to report FileMonitor bugs: +[FileMonitor Bug Report](https://github.com/David-Maisonave/Axter-Stash/issues/new?assignees=&labels=Plugin_Bug&projects=&template=bug_report_plugin.yml&title=%F0%9F%AA%B2%5BFileMonitor%5D+Your_Short_title) + +Please use the following link to report FileMonitor Feature Request:[FileMonitor Feature Reques](https://github.com/David-Maisonave/Axter-Stash/issues/new?assignees=&labels=Enhancement&projects=&template=feature_request_plugin.yml&title=%F0%9F%92%A1%EF%B8%8F%5BEnhancement%5D%3A%5BFileMonitor%5D+Your_Short_title) +Please do **NOT** use the feature request to include any problems associated with errors. Instead use the bug report for error issues. diff --git a/plugins/FileMonitor/StashPluginHelper.py b/plugins/FileMonitor/StashPluginHelper.py index 218e055c..6f0d3d15 100644 --- a/plugins/FileMonitor/StashPluginHelper.py +++ b/plugins/FileMonitor/StashPluginHelper.py @@ -1,10 +1,12 @@ from stashapi.stashapp import StashInterface from logging.handlers import RotatingFileHandler -import inspect, sys, os, pathlib, logging, json +import re, inspect, sys, os, pathlib, logging, json import concurrent.futures from stashapi.stash_types import PhashDistance import __main__ +_ARGUMENT_UNSPECIFIED_ = "_ARGUMENT_UNSPECIFIED_" + # StashPluginHelper (By David Maisonave aka Axter) # See end of this file for example usage # Log Features: @@ -27,9 +29,9 @@ class StashPluginHelper(StashInterface): PLUGIN_TASK_NAME = None PLUGIN_ID = None PLUGIN_CONFIGURATION = None + PLUGINS_PATH = None pluginSettings = None pluginConfig = None - STASH_INTERFACE_INIT = False STASH_URL = None STASH_CONFIGURATION = None JSON_INPUT = None @@ -40,6 +42,8 @@ class StashPluginHelper(StashInterface): FRAGMENT_SERVER = None STASHPATHSCONFIG = None STASH_PATHS = [] + API_KEY = None + excludeMergeTags = None # printTo argument LOG_TO_FILE = 1 @@ -60,6 +64,10 @@ class StashPluginHelper(StashInterface): pluginLog = None logLinePreviousHits = [] thredPool = None + STASH_INTERFACE_INIT = False + _mergeMetadata = None + encodeToUtf8 = False + convertToAscii = False # If set True, it takes precedence over encodeToUtf8 # Prefix message value LEV_TRACE = "TRACE: " @@ -82,7 +90,7 @@ def __init__(self, debugTracing = None, # Set debugTracing to True so as to output debug and trace logging logFormat = LOG_FORMAT, # Plugin log line format dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file - maxbytes = 2*1024*1024, # Max size of plugin log file + maxbytes = 8*1024*1024, # Max size of plugin log file backupcount = 2, # Backup counts when log file size reaches max size logToWrnSet = 0, # Customize the target output set which will get warning logging logToErrSet = 0, # Customize the target output set which will get error logging @@ -94,6 +102,7 @@ def __init__(self, config = None, # From pluginName_config.py or pluginName_setting.py fragmentServer = None, stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999 + apiKey = None, # API Key only needed when username and password set while running script via command line DebugTraceFieldName = "zzdebugTracing", DryRunFieldName = "zzdryRun", setStashLoggerAsPluginLogger = False): @@ -103,7 +112,7 @@ def __init__(self, if logToNormSet: self.log_to_norm = logToNormSet if stash_url and len(stash_url): self.STASH_URL = stash_url self.MAIN_SCRIPT_NAME = mainScriptName if mainScriptName != "" else __main__.__file__ - self.PLUGIN_ID = pluginID if pluginID != "" else pathlib.Path(self.MAIN_SCRIPT_NAME).stem.lower() + self.PLUGIN_ID = pluginID if pluginID != "" else pathlib.Path(self.MAIN_SCRIPT_NAME).stem # print(f"self.MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME}, self.PLUGIN_ID={self.PLUGIN_ID}", file=sys.stderr) self.LOG_FILE_NAME = logFilePath if logFilePath != "" else f"{pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}{os.sep}{pathlib.Path(self.MAIN_SCRIPT_NAME).stem}.log" self.LOG_FILE_DIR = pathlib.Path(self.LOG_FILE_NAME).resolve().parent @@ -123,11 +132,13 @@ def __init__(self, if debugTracing: self.DEBUG_TRACING = debugTracing if config: self.pluginConfig = config - if DebugTraceFieldName in self.pluginConfig: - self.DEBUG_TRACING = self.pluginConfig[DebugTraceFieldName] - if DryRunFieldName in self.pluginConfig: - self.DRY_RUN = self.pluginConfig[DryRunFieldName] - + if self.Setting('apiKey', "") != "": + self.FRAGMENT_SERVER['ApiKey'] = self.Setting('apiKey') + + + if apiKey and apiKey != "": + self.FRAGMENT_SERVER['ApiKey'] = apiKey + if len(sys.argv) > 1: RUNNING_IN_COMMAND_LINE_MODE = True if not debugTracing or not stash_url: @@ -161,20 +172,26 @@ def __init__(self, super().__init__(self.FRAGMENT_SERVER) self.STASH_INTERFACE_INIT = True + if self.STASH_URL.startswith("http://0.0.0.0:"): + self.STASH_URL = self.STASH_URL.replace("http://0.0.0.0:", "http://localhost:") + if self.STASH_INTERFACE_INIT: self.PLUGIN_CONFIGURATION = self.get_configuration()["plugins"] self.STASH_CONFIGURATION = self.get_configuration()["general"] self.STASHPATHSCONFIG = self.STASH_CONFIGURATION['stashes'] + if 'pluginsPath' in self.STASH_CONFIGURATION: + self.PLUGINS_PATH = self.STASH_CONFIGURATION['pluginsPath'] for item in self.STASHPATHSCONFIG: self.STASH_PATHS.append(item["path"]) if settings: self.pluginSettings = settings if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION: self.pluginSettings.update(self.PLUGIN_CONFIGURATION[self.PLUGIN_ID]) - if DebugTraceFieldName in self.pluginSettings: - self.DEBUG_TRACING = self.pluginSettings[DebugTraceFieldName] - if DryRunFieldName in self.pluginSettings: - self.DRY_RUN = self.pluginSettings[DryRunFieldName] + if 'apiKey' in self.STASH_CONFIGURATION: + self.API_KEY = self.STASH_CONFIGURATION['apiKey'] + + self.DRY_RUN = self.Setting(DryRunFieldName, self.DRY_RUN) + self.DEBUG_TRACING = self.Setting(DebugTraceFieldName, self.DEBUG_TRACING) if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH]) @@ -185,7 +202,22 @@ def __init__(self, def __del__(self): self.thredPool.shutdown(wait=False) - def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False): + def Setting(self, name, default=_ARGUMENT_UNSPECIFIED_, raiseEx=True, notEmpty=False): + if self.pluginSettings != None and name in self.pluginSettings: + if notEmpty == False or self.pluginSettings[name] != "": + return self.pluginSettings[name] + if self.pluginConfig != None and name in self.pluginConfig: + if notEmpty == False or self.pluginConfig[name] != "": + return self.pluginConfig[name] + if default == _ARGUMENT_UNSPECIFIED_ and raiseEx: + raise Exception(f"Missing {name} from both UI settings and config file settings.") + return default + + def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False, toAscii = None): + if toAscii or (toAscii == None and (self.encodeToUtf8 or self.convertToAscii)): + logMsg = self.asc2(logMsg) + else: + logMsg = logMsg if printTo == 0: printTo = self.log_to_norm elif printTo == self.LOG_TO_ERROR and logLevel == logging.INFO: @@ -226,7 +258,7 @@ def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelSt if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways): print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr) - def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1): + def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None): if printTo == 0: printTo = self.LOG_TO_FILE if lineNo == -1: lineNo = inspect.currentframe().f_back.f_lineno @@ -234,40 +266,40 @@ def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1): if self.DEBUG_TRACING or logAlways: if logMsg == "": logMsg = f"Line number {lineNo}..." - self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways) + self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways, toAscii=toAscii) # Log once per session. Only logs the first time called from a particular line number in the code. - def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False): + def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None): lineNo = inspect.currentframe().f_back.f_lineno if self.DEBUG_TRACING or logAlways: FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" if FuncAndLineNo in self.logLinePreviousHits: return self.logLinePreviousHits.append(FuncAndLineNo) - self.Trace(logMsg, printTo, logAlways, lineNo) + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) # Log INFO on first call, then do Trace on remaining calls. - def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True): + def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True, toAscii = None): if printTo == 0: printTo = self.LOG_TO_FILE lineNo = inspect.currentframe().f_back.f_lineno FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" if FuncAndLineNo in self.logLinePreviousHits: if traceOnRemainingCalls: - self.Trace(logMsg, printTo, logAlways, lineNo) + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) else: self.logLinePreviousHits.append(FuncAndLineNo) - self.Log(logMsg, printTo, logging.INFO, lineNo) + self.Log(logMsg, printTo, logging.INFO, lineNo, toAscii=toAscii) - def Warn(self, logMsg, printTo = 0): + def Warn(self, logMsg, printTo = 0, toAscii = None): if printTo == 0: printTo = self.log_to_wrn_set lineNo = inspect.currentframe().f_back.f_lineno - self.Log(logMsg, printTo, logging.WARN, lineNo) + self.Log(logMsg, printTo, logging.WARN, lineNo, toAscii=toAscii) - def Error(self, logMsg, printTo = 0): + def Error(self, logMsg, printTo = 0, toAscii = None): if printTo == 0: printTo = self.log_to_err_set lineNo = inspect.currentframe().f_back.f_lineno - self.Log(logMsg, printTo, logging.ERROR, lineNo) - + self.Log(logMsg, printTo, logging.ERROR, lineNo, toAscii=toAscii) + def Status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1): if printTo == 0: printTo = self.log_to_norm if lineNo == -1: @@ -298,7 +330,86 @@ def ExecutePythonScript(self, args, ExecDetach=True): argsWithPython = [f"{PythonExe}"] + args return self.ExecuteProcess(argsWithPython,ExecDetach=ExecDetach) - # Extends class StashInterface with functions which are not yet in the class + def Submit(self, *args, **kwargs): + return self.thredPool.submit(*args, **kwargs) + + def asc2(self, data, convertToAscii=None): + if convertToAscii or (convertToAscii == None and self.convertToAscii): + return ascii(data) + return str(str(data).encode('utf-8'))[2:-1] # This works better for logging than ascii function + # data = str(data).encode('ascii','ignore') # This works better for logging than ascii function + # return str(data)[2:-1] # strip out b'str' + + def init_mergeMetadata(self, excludeMergeTags=None): + self.excludeMergeTags = excludeMergeTags + self._mergeMetadata = mergeMetadata(self, self.excludeMergeTags) + + # Must call init_mergeMetadata, before calling merge_metadata + def merge_metadata(self, SrcData, DestData): # Input arguments can be scene ID or scene metadata + if type(SrcData) is int: + SrcData = self.find_scene(SrcData) + DestData = self.find_scene(DestData) + return self._mergeMetadata.merge(SrcData, DestData) + + def Progress(self, currentIndex, maxCount): + progress = (currentIndex / maxCount) if currentIndex < maxCount else (maxCount / currentIndex) + self.log.progress(progress) + + def run_plugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False): + """Runs a plugin operation. + The operation is run immediately and does not use the job queue. + Args: + plugin_id (ID): plugin_id + task_name (str, optional): Plugin task to perform + args (dict, optional): Arguments to pass to plugin. Plugin access via JSON_INPUT['args'] + Returns: + A map of the result. + """ + query = """mutation RunPluginOperation($plugin_id: ID!, $args: Map!) { + runPluginOperation(plugin_id: $plugin_id, args: $args) + }""" + if task_mode != None: + args.update({"mode" : task_mode}) + variables = { + "plugin_id": plugin_id, + "args": args, + } + if asyn: + self.Submit(self.call_GQL, query, variables) + return f"Made asynchronous call for plugin {plugin_id}" + else: + return self.call_GQL(query, variables) + + def find_duplicate_scenes_diff(self, distance: PhashDistance=PhashDistance.EXACT, fragment='id', duration_diff: float=10.00 ): + query = """ + query FindDuplicateScenes($distance: Int, $duration_diff: Float) { + findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) { + ...SceneSlim + } + } + """ + if fragment: + query = re.sub(r'\.\.\.SceneSlim', fragment, query) + else: + query += "fragment SceneSlim on Scene { id }" + + variables = { "distance": distance, "duration_diff": duration_diff } + result = self.call_GQL(query, variables) + return result['findDuplicateScenes'] + + # ################################################################################################# + # The below functions extends class StashInterface with functions which are not yet in the class + def get_all_scenes(self): + query_all_scenes = """ + query AllScenes { + allScenes { + id + updated_at + } + } + """ + return self.call_GQL(query_all_scenes) + def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]): query = """ mutation MetadataAutoTag($input:AutoTagMetadataInput!) { @@ -340,24 +451,76 @@ def metadata_clean_generated(self, blobFiles=True, dryRun=False, imageThumbnails def rename_generated_files(self): return self.call_GQL("mutation MigrateHashNaming {migrateHashNaming}") - # def find_duplicate_scenes(self, distance: PhashDistance=PhashDistance.EXACT, fragment=None): - # query = """ - # query FindDuplicateScenes($distance: Int) { - # findDuplicateScenes(distance: $distance) { - # ...SceneSlim - # } - # } - # """ - # if fragment: - # query = re.sub(r'\.\.\.SceneSlim', fragment, query) - # else: - # query = """ - # query FindDuplicateScenes($distance: Int) { - # findDuplicateScenes(distance: $distance) - # } - # """ - # variables = { - # "distance": distance - # } - # result = self.call_GQL(query, variables) - # return result['findDuplicateScenes'] \ No newline at end of file + +class mergeMetadata: # A class to merge scene metadata from source scene to destination scene + srcData = None + destData = None + stash = None + excludeMergeTags = None + dataDict = None + result = "Nothing To Merge" + def __init__(self, stash, excludeMergeTags=None): + self.stash = stash + self.excludeMergeTags = excludeMergeTags + + def merge(self, SrcData, DestData): + self.srcData = SrcData + self.destData = DestData + ORG_DATA_DICT = {'id' : self.destData['id']} + self.dataDict = ORG_DATA_DICT.copy() + self.mergeItems('tags', 'tag_ids', [], excludeName=self.excludeMergeTags) + self.mergeItems('performers', 'performer_ids', []) + self.mergeItems('galleries', 'gallery_ids', []) + self.mergeItems('movies', 'movies', []) + self.mergeItems('urls', listToAdd=self.destData['urls'], NotStartWith=self.stash.STASH_URL) + self.mergeItem('studio', 'studio_id', 'id') + self.mergeItem('title') + self.mergeItem('director') + self.mergeItem('date') + self.mergeItem('details') + self.mergeItem('rating100') + self.mergeItem('code') + if self.dataDict != ORG_DATA_DICT: + self.stash.Trace(f"Updating scene ID({self.destData['id']}) with {self.dataDict}; path={self.destData['files'][0]['path']}", toAscii=True) + self.result = self.stash.update_scene(self.dataDict) + return self.result + + def Nothing(self, Data): + if not Data or Data == "" or (type(Data) is str and Data.strip() == ""): + return True + return False + + def mergeItem(self,fieldName, updateFieldName=None, subField=None): + if updateFieldName == None: + updateFieldName = fieldName + if self.Nothing(self.destData[fieldName]) and not self.Nothing(self.srcData[fieldName]): + if subField == None: + self.dataDict.update({ updateFieldName : self.srcData[fieldName]}) + else: + self.dataDict.update({ updateFieldName : self.srcData[fieldName][subField]}) + def mergeItems(self, fieldName, updateFieldName=None, listToAdd=[], NotStartWith=None, excludeName=None): + dataAdded = "" + for item in self.srcData[fieldName]: + if item not in self.destData[fieldName]: + if NotStartWith == None or not item.startswith(NotStartWith): + if excludeName == None or item['name'] not in excludeName: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + dataAdded += f"{item['movie']['id']} " + elif updateFieldName == None: + listToAdd += [item] + dataAdded += f"{item} " + else: + listToAdd += [item['id']] + dataAdded += f"{item['id']} " + if dataAdded != "": + if updateFieldName == None: + updateFieldName = fieldName + else: + for item in self.destData[fieldName]: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + else: + listToAdd += [item['id']] + self.dataDict.update({ updateFieldName : listToAdd}) + # self.stash.Trace(f"Added {fieldName} ({dataAdded}) to scene ID({self.destData['id']})", toAscii=True) diff --git a/plugins/FileMonitor/filemonitor.py b/plugins/FileMonitor/filemonitor.py index 6d6752d1..03575b3f 100644 --- a/plugins/FileMonitor/filemonitor.py +++ b/plugins/FileMonitor/filemonitor.py @@ -3,13 +3,21 @@ # Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor # Note: To call this script outside of Stash, pass argument --url and the Stash URL. # Example: python filemonitor.py --url http://localhost:9999 -import os, sys, time, pathlib, argparse +import os, sys, time, pathlib, argparse, platform, traceback, logging from StashPluginHelper import StashPluginHelper import watchdog # pip install watchdog # https://pythonhosted.org/watchdog/ from watchdog.observers import Observer # This is also needed for event attributes from threading import Lock, Condition from multiprocessing import shared_memory -from filemonitor_config import config # Import settings from filemonitor_config.py +from filemonitor_config import config +from filemonitor_task_examples import task_examples +from filemonitor_self_unit_test import self_unit_test + +config['task_scheduler'] = config['task_scheduler'] + task_examples['task_scheduler'] +if self_unit_test['selfUnitTest_repeat']: + config['task_scheduler'] = config['task_scheduler'] + self_unit_test['task_scheduler_repeat'] +if self_unit_test['selfUnitTest_set_time']: + config['task_scheduler'] = config['task_scheduler'] + self_unit_test['task_scheduler_set_time'] CONTINUE_RUNNING_SIG = 99 STOP_RUNNING_SIG = 32 @@ -20,6 +28,7 @@ parser.add_argument('--stop', '-s', dest='stop', action='store_true', help='Stop (kill) a running FileMonitor task.') parser.add_argument('--restart', '-r', dest='restart', action='store_true', help='Restart FileMonitor.') parser.add_argument('--silent', '--quit', '-q', dest='quit', action='store_true', help='Run in silent mode. No output to console or stderr. Use this when running from pythonw.exe') +parser.add_argument('--apikey', '-a', dest='apikey', type=str, help='API Key') parse_args = parser.parse_args() logToErrSet = 0 @@ -31,7 +40,8 @@ settings = { "recursiveDisabled": False, "turnOnScheduler": False, - "zmaximumBackups": 0, + "turnOnSchedulerDeleteDup": False, + "zmaximumBackups": 1, "zzdebugTracing": False } stash = StashPluginHelper( @@ -40,13 +50,13 @@ settings=settings, config=config, logToErrSet=logToErrSet, - logToNormSet=logToNormSet + logToNormSet=logToNormSet, + maxbytes=5*1024*1024, + apiKey=parse_args.apikey ) -stash.Status() +stash.Status(logLevel=logging.DEBUG) stash.Log(f"\nStarting (__file__={__file__}) (stash.CALLED_AS_STASH_PLUGIN={stash.CALLED_AS_STASH_PLUGIN}) (stash.DEBUG_TRACING={stash.DEBUG_TRACING}) (stash.DRY_RUN={stash.DRY_RUN}) (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})************************************************") -# stash.Log(f"{stash.find_duplicate_scenes()}") - exitMsg = "Change success!!" mutex = Lock() signal = Condition(mutex) @@ -59,6 +69,8 @@ RUN_GENERATE_CONTENT = stash.pluginConfig['runGenerateContent'] SCAN_ON_ANY_EVENT = stash.pluginConfig['onAnyEvent'] SIGNAL_TIMEOUT = stash.pluginConfig['timeOut'] if stash.pluginConfig['timeOut'] > 0 else 1 +MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS = stash.pluginConfig['timeOutDelayProcess'] +MAX_SECONDS_WAIT_SCANJOB_COMPLETE = stash.pluginConfig['maxWaitTimeJobFinish'] CREATE_SPECIAL_FILE_TO_EXIT = stash.pluginConfig['createSpecFileToExit'] DELETE_SPECIAL_FILE_ON_STOP = stash.pluginConfig['deleteSpecFileInStop'] @@ -73,8 +85,7 @@ fileExtTypes = stash.pluginConfig['fileExtTypes'].split(",") if stash.pluginConfig['fileExtTypes'] != "" else [] includePathChanges = stash.pluginConfig['includePathChanges'] if len(stash.pluginConfig['includePathChanges']) > 0 else stash.STASH_PATHS excludePathChanges = stash.pluginConfig['excludePathChanges'] - -stash.Trace(f"(includePathChanges={includePathChanges})") +turnOnSchedulerDeleteDup = stash.pluginSettings['turnOnSchedulerDeleteDup'] if stash.DRY_RUN: stash.Log("Dry run mode is enabled.") @@ -85,7 +96,6 @@ StartFileMonitorAsAPluginTaskID = "start_library_monitor" StartFileMonitorAsAServiceTaskID = "start_library_monitor_service" - FileMonitorPluginIsOnTaskQue = stash.CALLED_AS_STASH_PLUGIN StopLibraryMonitorWaitingInTaskQueue = False JobIdInTheQue = 0 @@ -151,30 +161,32 @@ def __init__(self): else: weekDays = task['weekday'].lower() if 'monthly' in task: - stash.Log(f"Adding to scheduler task '{task['task']}' monthly on number {task['monthly']} {task['weekday']} at {task['time']}") + stash.Log(f"Adding to scheduler task '{self.taskName(task)}' monthly on number {task['monthly']} {task['weekday']} at {task['time']}") + elif task['weekday'] == "every": + stash.Log(f"Adding to scheduler task '{self.taskName(task)}' (weekly) every day at {task['time']}") else: - stash.Log(f"Adding to scheduler task '{task['task']}' (weekly) every {task['weekday']} at {task['time']}") + stash.Log(f"Adding to scheduler task '{self.taskName(task)}' (weekly) every {task['weekday']} at {task['time']}") hasValidDay = False - if "monday" in weekDays: + if "monday" in weekDays or "every" in weekDays: schedule.every().monday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "tuesday" in weekDays: + if "tuesday" in weekDays or "every" in weekDays: schedule.every().tuesday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "wednesday" in weekDays: + if "wednesday" in weekDays or "every" in weekDays: schedule.every().wednesday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "thursday" in weekDays: + if "thursday" in weekDays or "every" in weekDays: schedule.every().thursday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "friday" in weekDays: + if "friday" in weekDays or "every" in weekDays: schedule.every().friday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "saturday" in weekDays: + if "saturday" in weekDays or "every" in weekDays or "weekend" in weekDays: schedule.every().saturday.at(task['time']).do(self.runTask, task) hasValidDay = True - if "sunday" in weekDays: + if "sunday" in weekDays or "every" in weekDays or "weekend" in weekDays: schedule.every().sunday.at(task['time']).do(self.runTask, task) hasValidDay = True @@ -184,6 +196,16 @@ def __init__(self): stash.Error(f"Task '{task['task']}' is missing fields.") self.checkSchedulePending() + def taskName(self, task): + pluginTask = None + if 'taskName' in task: + pluginTask = task['taskName'] + elif 'taskMode' in task: + pluginTask = task['taskMode'] + if pluginTask == None or pluginTask == "": + return task['task'] + return f"{task['task']}->{pluginTask}" + # ToDo: Add asynchronous threading logic to running task. def runTask(self, task): import datetime @@ -202,77 +224,202 @@ def runTask(self, task): result = None if task['task'] == "Clean": - result = stash.metadata_clean(paths=targetPaths, dry_run=stash.DRY_RUN) + result = self.jobIdOutput(stash.metadata_clean(paths=targetPaths, dry_run=stash.DRY_RUN)) elif task['task'] == "Clean Generated Files": - result = stash.metadata_clean_generated() + result = self.jobIdOutput(stash.metadata_clean_generated()) elif task['task'] == "Generate": - result = stash.metadata_generate() + result = self.jobIdOutput(stash.metadata_generate()) elif task['task'] == "Backup": - stash.LogOnce("Note: Backup task does not get listed in the Task Queue, but user can verify that it started by looking in the Stash log file as an INFO level log line.") - result = stash.backup_database() - maximumBackup = stash.pluginSettings['zmaximumBackups'] - if "maxBackups" in task: - maximumBackup = task['maxBackups'] - if maximumBackup < 2: - stash.TraceOnce(f"Skipping DB backup file trim because zmaximumBackups={maximumBackup}. Value has to be greater than 1.") - elif 'backupDirectoryPath' in stash.STASH_CONFIGURATION: - if len(stash.STASH_CONFIGURATION['backupDirectoryPath']) < 5: - stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath length is to short. Len={len(stash.STASH_CONFIGURATION['backupDirectoryPath'])}. Only support length greater than 4 characters.") - elif os.path.exists(stash.STASH_CONFIGURATION['backupDirectoryPath']): - stash.LogOnce(f"Checking quantity of DB backups if path {stash.STASH_CONFIGURATION['backupDirectoryPath']} exceeds {maximumBackup} backup files.") - self.trimDbFiles(stash.STASH_CONFIGURATION['backupDirectoryPath'], maximumBackup) - else: - stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath does NOT exist. backupDirectoryPath={stash.STASH_CONFIGURATION['backupDirectoryPath']}") + result = self.jobIdOutput(self.runBackupTask(task)) elif task['task'] == "Scan": - result = stash.metadata_scan(paths=targetPaths) + result = self.jobIdOutput(stash.metadata_scan(paths=targetPaths)) elif task['task'] == "Auto Tag": - result = stash.metadata_autotag(paths=targetPaths) + result = self.jobIdOutput(stash.metadata_autotag(paths=targetPaths)) elif task['task'] == "Optimise Database": - result = stash.optimise_database() + result = self.jobIdOutput(stash.optimise_database()) elif task['task'] == "RenameGeneratedFiles": - result = stash.rename_generated_files() + result = self.jobIdOutput(stash.rename_generated_files()) elif task['task'] == "GQL": - result = stash.call_GQL(task['input']) + result = self.jobIdOutput(stash.call_GQL(task['input'])) + elif task['task'] == "Log": + Msg = "Scheduled Logging (INFO)." + if 'msg' in task and task['msg'] != "": + Msg = task['msg'] + result = stash.Log(Msg) + elif task['task'] == "Trace": + Msg = "Scheduled Logging (DBG)." + if 'msg' in task and task['msg'] != "": + Msg = task['msg'] + result = stash.Trace(Msg) + elif task['task'] == "LogOnce": + Msg = "Scheduled LogOnce." + if 'msg' in task and task['msg'] != "": + Msg = task['msg'] + result = stash.LogOnce(Msg) + elif task['task'] == "TraceOnce": + Msg = "Scheduled TraceOnce." + if 'msg' in task and task['msg'] != "": + Msg = task['msg'] + result = stash.TraceOnce(Msg) + elif task['task'] == "CheckStashIsRunning": + result = self.checkStashIsRunning(task) elif task['task'] == "python": - if 'script' in task and task['script'] != "": - script = task['script'].replace("", f"{pathlib.Path(__file__).resolve().parent}{os.sep}") - stash.Log(f"Executing python script {script}.") - args = [script] - if 'args' in task and len(task['args']) > 0: - args = args + [task['args']] - detached = True - if 'detach' in task: - detached = task['detach'] - result = f"Python process PID = {stash.ExecutePythonScript(args, ExecDetach=detached)}" - else: - stash.Error(f"Can not run task '{task['task']}', because it's missing 'script' field.") + result = self.runPythonScript(task) elif task['task'] == "execute": - if 'command' in task and task['command'] != "": - cmd = task['command'].replace("", f"{pathlib.Path(__file__).resolve().parent}{os.sep}") - args = [cmd] - if 'args' in task and len(task['args']) > 0: - args = args + [task['args']] - stash.Log(f"Executing command arguments {args}.") - result = f"Execute process PID = {stash.ExecuteProcess(args)}" - else: - stash.Error(f"Can not run task '{task['task']}', because it's missing 'command' field.") + result = self.runExecuteProcessTask(task) else: - # ToDo: Add code to check if plugin is installed. - try: - if 'pluginId' in task and task['pluginId'] != "": - stash.Trace(f"Running plugin task pluginID={task['pluginId']}, task name = {task['task']}") - stash.run_plugin_task(plugin_id=task['pluginId'], task_name=task['task']) - else: - stash.Error(f"Can not run task '{task['task']}', because it's an invalid task.") - stash.LogOnce(f"If task '{task['task']}' is supposed to be a built-in task, check for correct task name spelling.") - stash.LogOnce(f"If task '{task['task']}' is supposed to be a plugin, make sure to include the pluginId field in the task. task={task}") - except Exception as e: - stash.LogOnce(f"Failed to call plugin {task['task']} with plugin-ID {task['pluginId']}. Error: {e}") - pass + result = self.jobIdOutput(self.runPluginTask(task)) if result: stash.Trace(f"Task '{task['task']}' result={result}") + def jobIdOutput(self, result): + if result == None or result == "": + return result + jobId = None + if type(result) is int: + jobId = result + elif str(result).isnumeric(): + jobId = int(result) + else: + return result + return f"Task started with Job-ID#({jobId})" + + def runExecuteProcessTask(self, task): + if 'command' in task and task['command'] != "": + cmd = task['command'].replace("", f"{pathlib.Path(__file__).resolve().parent}{os.sep}") + args = [cmd] + if 'args' in task and len(task['args']) > 0: + args = args + [task['args']] + stash.Log(f"Executing command arguments {args}.") + return f"Execute process PID = {stash.ExecuteProcess(args)}" + else: + stash.Error(f"Can not run task '{task['task']}', because it's missing 'command' field.") + return None + + def runPythonScript(self, task): + if 'script' in task and task['script'] != "": + script = task['script'].replace("", f"{pathlib.Path(__file__).resolve().parent}{os.sep}") + stash.Log(f"Executing python script {script}.") + args = [script] + if 'args' in task and len(task['args']) > 0: + args = args + [task['args']] + detached = True + if 'detach' in task: + detached = task['detach'] + return f"Python process PID = {stash.ExecutePythonScript(args, ExecDetach=detached)}" + else: + stash.Error(f"Can not run task '{task['task']}', because it's missing 'script' field.") + return None + + def runPluginTask(self, task): + try: + invalidDir = False + validDirMsg = "" + if 'validateDir' in task and task['validateDir'] != "": + invalidDir = True + communityPluginPath = f"{stash.PLUGINS_PATH}{os.sep}community{os.sep}{task['validateDir']}" + basePluginPath = f"{stash.PLUGINS_PATH}{os.sep}{task['validateDir']}" + if os.path.exists(communityPluginPath): + invalidDir = False + validDirMsg = f"Valid path in {communityPluginPath}" + elif os.path.exists(basePluginPath): + invalidDir = False + validDirMsg = f"Valid path in {basePluginPath}" + if invalidDir: + stash.Error(f"Could not run task '{task['task']}' because sub directory '{task['validateDir']}' does not exist under path '{stash.PLUGINS_PATH}'") + return None + if not turnOnSchedulerDeleteDup and (task['task'] == "Delete Duplicates" or ('taskName' in task and (task['taskName'] == "Delete Duplicates" or task['taskName'] == "Delete Tagged Duplicates")) or ('taskMode' in task and task['taskMode'] == "delete_duplicates_task")): + stash.Warn(f"Not running task {task['task']}, because [Delete Duplicate Scheduler] is NOT enabled. See Stash UI option Settings->Plugins->Plugins->FileMonitor->[Delete Duplicate Scheduler]") + return None + # The pluginId field is only here for backward compatibility, and should not be used in future scheduler configurations + if 'pluginId' in task and task['pluginId'] != "": # Obsolete method + stash.Trace(f"Adding to Task Queue plugin task pluginID={task['pluginId']}, task name = {task['task']}. {validDirMsg}") + return stash.run_plugin_task(plugin_id=task['pluginId'], task_name=task['task']) + else: + taskName = None + taskMode = None + if 'taskName' in task: + taskName = task['taskName'] + if 'taskMode' in task: + taskMode = task['taskMode'] + if ('taskQue' in task and task['taskQue'] == False) or taskName == None: + stash.Log(f"Running plugin task pluginID={task['task']}, task mode = {taskMode}. {validDirMsg}") + # Asynchronous threading logic to call run_plugin, because it's a blocking call. + stash.run_plugin(plugin_id=task['task'], task_mode=taskMode, asyn=True) + return None + else: + stash.Trace(f"Adding to Task Queue plugin task pluginID={task['task']}, task name = {taskName}. {validDirMsg}") + return stash.run_plugin_task(plugin_id=task['task'], task_name=taskName) + except Exception as e: + stash.LogOnce(f"Failed to call plugin {task['task']} with plugin-ID {task['pluginId']}. Error: {e}") + pass + return None + + def checkStashIsRunning(self, task = {}, sleepAfterStart = 10): + try: + result = stash.stash_version() + except: + pass + stash.Error("Failed to get response from Stash.") + if platform.system() == "Windows": + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-win.exe" + elif platform.system() == "Darwin": # MacOS + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep} stash-macos " + elif platform.system().lower().startswith("linux"): + # ToDo: Need to verify this method will work for (stash-linux-arm32v6, stash-linux-arm32v7, and stash-linux-arm64v8) + if platform.system().lower().find("32v6") > -1: + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-linux-arm32v6" + elif platform.system().lower().find("32v7") > -1: + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-linux-arm32v7" + elif platform.system().lower().find("64v8 ") > -1: + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-linux-arm64v8" + else: + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-linux" + elif platform.system().lower().startswith("freebsd"): + execPath = f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}stash-freebsd" + elif 'command' not in task or task['command'] == "": + stash.Error("Can not start Stash, because failed to determine platform OS. As a workaround, add 'command' field to this task.") + return None + + if 'command' in task and task['command'] != "": + cmd = task['command'].replace("", f"{pathlib.Path(stash.PLUGINS_PATH).resolve().parent}{os.sep}") + args = [cmd] + else: + if os.path.isfile(execPath): + args = [execPath] + else: + stash.Error("Could not start Stash, because could not find executable Stash file '{execPath}'") + return None + result = f"Execute process PID = {stash.ExecuteProcess(args)}" + time.sleep(sleepAfterStart) + if "RunAfter" in task and len(task['RunAfter']) > 0: + for runAfterTask in task['RunAfter']: + self.runTask(runAfterTask) + return result + + def runBackupTask(self, task): + stash.LogOnce("Note: Backup task does not get listed in the Task Queue, but user can verify that it started by looking in the Stash log file as an INFO level log line.") + result = stash.backup_database() + maximumBackup = stash.pluginSettings['zmaximumBackups'] + stash.Trace(f"maximumBackup={maximumBackup}") + if "maxBackups" in task: + maximumBackup = task['maxBackups'] + stash.Trace(f"maximumBackup={maximumBackup}") + if isinstance(maximumBackup,str): + maximumBackup = int(maximumBackup) + if maximumBackup < 2: + stash.TraceOnce(f"Skipping DB backup file trim because zmaximumBackups={maximumBackup}. Value has to be greater than 1.") + elif 'backupDirectoryPath' in stash.STASH_CONFIGURATION: + if len(stash.STASH_CONFIGURATION['backupDirectoryPath']) < 5: + stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath length is to short. Len={len(stash.STASH_CONFIGURATION['backupDirectoryPath'])}. Only support length greater than 4 characters.") + elif os.path.exists(stash.STASH_CONFIGURATION['backupDirectoryPath']): + stash.LogOnce(f"Checking quantity of DB backups if path {stash.STASH_CONFIGURATION['backupDirectoryPath']} exceeds {maximumBackup} backup files.") + self.trimDbFiles(stash.STASH_CONFIGURATION['backupDirectoryPath'], maximumBackup) + else: + stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath does NOT exist. backupDirectoryPath={stash.STASH_CONFIGURATION['backupDirectoryPath']}") + return result + def trimDbFiles(self, dbPath, maxFiles): if not os.path.exists(dbPath): stash.LogOnce(f"Exiting trimDbFiles, because path {dbPath} does not exists.") @@ -297,10 +444,21 @@ def checkSchedulePending(self): schedule.run_pending() stash.TraceOnce("Pending check complete.") -TargetPaths = [] +TargetPaths = [] +lastScanJob = { + "id": -1, + "TargetPaths": [], + "DelayedProcessTargetPaths": [], + "timeAddedToTaskQueue": None, + "timeOutDelayProcess": 1, + "lastStatus" : "" +} +JOB_ENDED_STATUSES = ["FINISHED", "CANCELLED"] + def start_library_monitor(): global shouldUpdate global TargetPaths + global lastScanJob try: # Create shared memory buffer which can be used as singleton logic or to get a signal to quit task from external script shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=True, size=4) @@ -427,9 +585,21 @@ def on_any_event(event): break if stash.pluginSettings['turnOnScheduler']: stashScheduler.checkSchedulePending() - stash.LogOnce("Waiting for a file change-trigger.") - signal.wait(timeout=SIGNAL_TIMEOUT) - if stash.pluginSettings['turnOnScheduler'] and not shouldUpdate: + timeOutInSeconds = SIGNAL_TIMEOUT + if lastScanJob['DelayedProcessTargetPaths'] != [] and timeOutInSeconds > lastScanJob['timeOutDelayProcess']: + if lastScanJob['timeOutDelayProcess'] < MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS: + lastScanJob['timeOutDelayProcess'] = lastScanJob['timeOutDelayProcess'] * 2 + if lastScanJob['timeOutDelayProcess'] > MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS: + lastScanJob['timeOutDelayProcess'] = MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS + timeOutInSeconds = lastScanJob['timeOutDelayProcess'] + stash.LogOnce(f"Awaiting file change-trigger, with a short timeout ({timeOutInSeconds} seconds), because of active delay path processing.") + else: + stash.LogOnce(f"Waiting for a file change-trigger. Timeout = {timeOutInSeconds} seconds.") + signal.wait(timeout=timeOutInSeconds) + if lastScanJob['DelayedProcessTargetPaths'] != []: + stash.TraceOnce(f"Processing delay scan for path(s) {lastScanJob['DelayedProcessTargetPaths']}") + break + elif stash.pluginSettings['turnOnScheduler'] and not shouldUpdate: stash.TraceOnce("Checking the scheduler.") elif shouldUpdate: stash.LogOnce("File change trigger occurred.") @@ -453,12 +623,45 @@ def on_any_event(event): stash.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = stash.LOG_TO_FILE + stash.LOG_TO_CONSOLE + stash.LOG_TO_STASH) TargetPaths = [] TmpTargetPaths = list(set(TmpTargetPaths)) - if TmpTargetPaths != []: + if TmpTargetPaths != [] or lastScanJob['DelayedProcessTargetPaths'] != []: stash.Log(f"Triggering Stash scan for path(s) {TmpTargetPaths}") - if len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR: + if lastScanJob['DelayedProcessTargetPaths'] != [] or len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR: if not stash.DRY_RUN: - # ToDo: Consider using create_scene, update_scene, and destroy_scene over general method metadata_scan - stash.metadata_scan(paths=TmpTargetPaths) + if lastScanJob['id'] > -1: + if stashScheduler: + stashScheduler.checkStashIsRunning() + lastScanJob['lastStatus'] = stash.find_job(lastScanJob['id']) + elapsedTime = time.time() - lastScanJob['timeAddedToTaskQueue'] + if lastScanJob['lastStatus'] == None or lastScanJob['lastStatus'] == "" or 'status' not in lastScanJob['lastStatus']: + stash.Warn(f"Could not get a status from scan job {lastScanJob['id']}; result = {lastScanJob['lastStatus']}; Elapse-Time = {elapsedTime}") + else: + stash.Trace(f"Last Scan Job ({lastScanJob['id']}); Status = {lastScanJob['lastStatus']['status']}; result = {lastScanJob['lastStatus']}; Elapse-Time = {elapsedTime}") + if lastScanJob['lastStatus'] == None or lastScanJob['lastStatus'] == "" or 'status' not in lastScanJob['lastStatus'] or lastScanJob['lastStatus']['status'] in JOB_ENDED_STATUSES or elapsedTime > MAX_SECONDS_WAIT_SCANJOB_COMPLETE: + if elapsedTime > MAX_SECONDS_WAIT_SCANJOB_COMPLETE: + stash.Warn(f"Timeout occurred waiting for scan job {lastScanJob['id']} to complete. Elapse-Time = {elapsedTime}; Max-Time={MAX_SECONDS_WAIT_SCANJOB_COMPLETE}; Scan-Path(s) = {lastScanJob['TargetPaths']}") + lastScanJob['id'] = -1 + lastScanJob['timeOutDelayProcess'] = 1 + if len(lastScanJob['DelayedProcessTargetPaths']) > 0: + stash.Trace(f"Adding {lastScanJob['DelayedProcessTargetPaths']} to {TmpTargetPaths}") + for path in lastScanJob['DelayedProcessTargetPaths']: + if path not in TmpTargetPaths: + TmpTargetPaths.append(path) + # TmpTargetPaths += [lastScanJob['DelayedProcessTargetPaths']] + stash.Trace(f"TmpTargetPaths = {TmpTargetPaths}") + lastScanJob['DelayedProcessTargetPaths'] = [] + else: + if TmpTargetPaths != []: + stash.Trace(f"Adding {TmpTargetPaths} to {lastScanJob['DelayedProcessTargetPaths']}") + for path in TmpTargetPaths: + if path not in lastScanJob['DelayedProcessTargetPaths']: + lastScanJob['DelayedProcessTargetPaths'].append(path) + stash.Trace(f"lastScanJob['DelayedProcessTargetPaths'] = {lastScanJob['DelayedProcessTargetPaths']}") + if lastScanJob['id'] == -1: + stash.Trace(f"Calling metadata_scan for paths '{TmpTargetPaths}'") + lastScanJob['id'] = int(stash.metadata_scan(paths=TmpTargetPaths)) + lastScanJob['TargetPaths'] = TmpTargetPaths + lastScanJob['timeAddedToTaskQueue'] = time.time() + stash.Trace(f"metadata_scan JobId = {lastScanJob['id']}, Start-Time = {lastScanJob['timeAddedToTaskQueue']}, paths = {lastScanJob['TargetPaths']}") if RUN_CLEAN_AFTER_DELETE and RunCleanMetadata: stash.metadata_clean(paths=TmpTargetPaths, dry_run=stash.DRY_RUN) if RUN_GENERATE_CONTENT: @@ -520,8 +723,10 @@ def start_library_monitor_service(): pass stash.Trace("FileMonitor is not running, so it's safe to start it as a service.") args = [f"{pathlib.Path(__file__).resolve().parent}{os.sep}filemonitor.py", '--url', f"{stash.STASH_URL}"] + if stash.API_KEY: + args = args + ["-a", stash.API_KEY] stash.ExecutePythonScript(args) - + if parse_args.stop or parse_args.restart or stash.PLUGIN_TASK_NAME == "stop_library_monitor": stop_library_monitor() if parse_args.restart: @@ -539,9 +744,11 @@ def start_library_monitor_service(): elif not stash.CALLED_AS_STASH_PLUGIN: try: start_library_monitor() - stash.Trace(f"Command line FileMonitor EXIT") + stash.Trace("Command line FileMonitor EXIT") except Exception as e: - stash.Error(f"Exception while running FileMonitor from the command line. Error: {e}") + tb = traceback.format_exc() + stash.Error(f"Exception while running FileMonitor from the command line. Error: {e}\nTraceBack={tb}") + stash.log.exception('Got exception on main handler') else: stash.Log(f"Nothing to do!!! (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})") diff --git a/plugins/FileMonitor/filemonitor.yml b/plugins/FileMonitor/filemonitor.yml index 4d2adff4..19f448cd 100644 --- a/plugins/FileMonitor/filemonitor.yml +++ b/plugins/FileMonitor/filemonitor.yml @@ -1,6 +1,6 @@ name: FileMonitor description: Monitors the Stash library folders, and updates Stash if any changes occurs in the Stash library paths. -version: 0.8.2 +version: 0.9.0 url: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor settings: recursiveDisabled: @@ -11,9 +11,13 @@ settings: displayName: Scheduler description: Enable to turn on the scheduler. See filemonitor_config.py for more details. type: BOOLEAN + turnOnSchedulerDeleteDup: + displayName: Delete Duplicate Scheduler + description: Turn on scheduler for deleting duplicates in Stash library. (Requires plugin DupFileManager and [Scheduler] enabled) + type: BOOLEAN zmaximumBackups: displayName: Max DB Backups - description: When value greater than 1, will trim the number of database backup files to set value. Requires [Scheduler] enabled and backupDirectoryPath populated with path length longer than 4. + description: Trim database backup files to set value. Requires [Scheduler] enabled and backupDirectoryPath path length longer than 4. type: NUMBER zzdebugTracing: displayName: Debug Tracing @@ -29,10 +33,10 @@ tasks: defaultArgs: mode: start_library_monitor_service - name: Stop Library Monitor - description: Stops library monitoring within 2 minute. + description: Stops library monitoring within 2 minutes. defaultArgs: mode: stop_library_monitor - name: Monitor as a Plugin - description: Run [Library Monitor] as a plugin (*not recommended method*) + description: Run [Library Monitor] as a plugin (*Not recommended*) defaultArgs: mode: start_library_monitor diff --git a/plugins/FileMonitor/filemonitor_config.py b/plugins/FileMonitor/filemonitor_config.py index a5f6f00a..a2456471 100644 --- a/plugins/FileMonitor/filemonitor_config.py +++ b/plugins/FileMonitor/filemonitor_config.py @@ -8,19 +8,31 @@ # The [Auto Tag] task is an example of a daily scheduled task. # The [Generate] task is an example of a weekly scheduled task. # The [Backup] task is an example of a monthly scheduled task. - # Note: The hour section in time MUST be a two digit number, and use military time format. Example: 1PM = "13:00" and 1AM = "01:00" + # The hour section in time MUST be a two digit number, and use military time format. Example: 1PM = "13:00" and 1AM = "01:00" + # Note: Look at filemonitor_task_examples.py for many example task having more detailed usage. "task_scheduler": [ - # To create a daily task, include each day of the week for the weekday field. - {"task" : "Auto Tag", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "06:00"}, # Auto Tag -> [Auto Tag] (Daily at 6AM) - {"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "07:00"}, # Maintenance -> [Optimise Database] (Daily at 7AM) - - # The following tasks are scheduled for 3 days out of the week. - {"task" : "Clean", "weekday" : "monday,wednesday,friday", "time" : "08:00"}, # Maintenance -> [Clean] (3 days per week at 8AM) - {"task" : "Clean Generated Files", "weekday" : "tuesday,thursday,saturday", "time" : "08:00"}, # Maintenance -> [Clean Generated Files] (3 days per week at 8AM) + # To create a daily task, include each day of the week for the weekday field or "every" + # Optional field for task "Auto Tag" is 'paths'. For detail usage, see example #A3: in filemonitor_task_examples.py + {"task" : "Auto Tag", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "05:00"}, # Auto Tag -> [Auto Tag] (Daily at 6AM) + # Task "Create Tags" is a plugin task. Optional fields are taskName and validateDir field. For detail usage, see examples #B1, #B2, #B3, and #B4 in filemonitor_task_examples.py + {"task" : "pathParser", "taskName" : "Create Tags", "validateDir" : "pathParser", + "weekday" : "every", "time" : "05:30"}, # [Plugin Tasks] - > [Path Parser] -> [Create Tags] (Daily at 5AM) : This task requires plugin [Path Parser] + {"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday", "time" : "07:00"}, # Maintenance -> [Optimise Database] (Every weekday at 7AM) # The following tasks are scheduled weekly - {"task" : "Generate", "weekday" : "sunday", "time" : "07:00"}, # Generated Content-> [Generate] (Every Sunday at 7AM) - {"task" : "Scan", "weekday" : "sunday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every Sunday at 3AM) + # Optional field for task "Scan", "Auto Tag", and "Clean" is 'paths'. For detail usage, see examples #A3: in filemonitor_task_examples.py + {"task" : "Scan", "weekday" : "saturday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every saturday at 3AM) + {"task" : "Auto Tag", "weekday" : "saturday", "time" : "03:30"}, # Auto Tag -> [Auto Tag] (Weekly) (Every saturday at 3:30AM) + {"task" : "Generate", "weekday" : "saturday", "time" : "04:00"}, # Generated Content-> [Generate] (Every saturday at 4AM) + {"task" : "Clean", "weekday" : "saturday", "time" : "04:30"}, # Maintenance -> [Clean] (Every saturday at 4:30AM) + {"task" : "Clean Generated Files", "weekday" : "saturday", "time" : "05:00"}, # Maintenance -> [Clean Generated Files] (Every saturday at 5AM) + {"task" : "Optimise Database", "weekday" : "saturday", "time" : "05:30"}, # Maintenance -> [Optimise Database] (Every saturday at 5:30AM) + # The following task runs plugin DupFileManager (tag_duplicates_task) if the plugin is installed. The task runs in the background because of "taskQue" : False + {"task" : "DupFileManager", "taskMode" : "tag_duplicates_task", "validateDir" : "DupFileManager", "taskQue" : False, + "weekday" : "sunday", "time" : "02:30"}, # [Plugin Tasks] -> DupFileManager -> [Tag Duplicates] (Sunday at 2:30AM) + # The following task requires plugin DupFileManager and UI option [Delete Duplicate Scheduler] enabled. + {"task" : "DupFileManager", "taskName" : "Delete Tagged Duplicates", "validateDir" : "DupFileManager", + "weekday" : "saturday", "time" : "02:30"}, # [Plugin Tasks] -> DupFileManager -> [Delete Tagged Duplicates] 6 days after tagging at 2:30AM # To perform a task monthly, specify the day of the month as in the weekly schedule format, and add a monthly field. # The monthly field value must be 1, 2, 3, or 4. @@ -28,84 +40,30 @@ # 2 = 2nd specified weekday of the month. Example 2nd monday of the month. # 3 = 3rd specified weekday of the month. # 4 = 4th specified weekday of the month. - # The following task is scheduled monthly - {"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00) - - # The following task is the syntax used for a plugins. A plugin task requires the plugin name for the [task] field, and the plugin-ID for the [pluginId] field. - # This task requires plugin [Path Parser], and it's disabled by default. - {"task" : "Create Tags", "pluginId" : "pathParser", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "DISABLED"}, # To enable this task change time "DISABLED" to a valid time. - - # Example#A1: Task to call call_GQL API with custom input - {"task" : "GQL", "input" : "mutation OptimiseDatabase { optimiseDatabase }", "weekday" : "sunday", "time" : "DISABLED"}, # To enable, change "DISABLED" to valid time - - # Example#A2: Task to call a python script. When this task is executed, the keyword is replaced by filemonitor.py current directory. - # The args field is NOT required. - {"task" : "python", "script" : "test_script_hello_world.py", "args" : "--MyArguments Hello", "weekday" : "monday", "time" : "DISABLED"}, # change "DISABLED" to valid time - - # Example#A3: The following task types can optionally take a [paths] field. If the paths field does not exists, the paths in the Stash library is used. - {"task" : "Scan", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Library -> [Scan] - {"task" : "Auto Tag", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "DISABLED"}, # Auto Tag -> [Auto Tag] - {"task" : "Clean", "paths" : ["E:\\MyVideos\\downloads", "V:\\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Generated Content-> [Generate] - - # Example#A4: Task which calls Migrations -> [Rename generated files] - {"task" : "RenameGeneratedFiles", "weekday" : "tuesday,thursday", "time" : "DISABLED"}, # (bi-weekly) example - - # Example#A5: The Backup task using optional field maxBackup, which overrides the UI [Max DB Backups] value - {"task" : "Backup", "maxBackup" : 12, "weekday" : "sunday", "time" : "DISABLED"}, # Trim the DB backup files down to 12 backup files. - {"task" : "Backup", "maxBackup" : 0, "weekday" : "sunday", "time" : "DISABLED"}, # When used with a zero value, it will make sure no file trimming will occur no matter the value of the UI [Max DB Backups] + # The Backup task is scheduled monthly + # Optional field for task "Backup" is maxBackup. For detail usage, see example #A5 in filemonitor_task_examples.py + {"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00) - # The above weekday method is the more reliable method to schedule task, because it doesn't rely on FileMonitor running continuously (non-stop). - - # The below examples use frequency field method which can work with minutes and hours. A zero frequency value disables the task. - # Note: Both seconds and days are also supported for the frequency field. - # However, seconds is mainly used for test purposes. - # And days usage is discourage, because it only works if FileMonitor is running for X many days non-stop. - # The below example tasks are done using hours and minutes, however any of these task types can be converted to a daily, weekly, or monthly syntax. - - # Example#B1: Task for calling another Stash plugin, which needs plugin name and plugin ID. - {"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled. - - # Example#B2: Task to execute a command - {"task" : "execute", "command" : "C:\\MyPath\\HelloWorld.bat", "hours" : 0}, - - # Example#B3: Task to execute a command with optional args field, and using keyword , which gets replaced with filemonitor.py current directory. - {"task" : "execute", "command" : "HelloWorld.cmd", "args" : "--name David", "minutes" : 0}, - - # Comment out **test** tasks. - # To run test, enable all task, and start FileMonitor as a service. - # When executed, these task should be seen in the Task Queue unless otherwise stated in comments. - # These tasks are usually executed before updating major releases on https://github.com/David-Maisonave/Axter-Stash/blob/main/plugins/FileMonitor - # These tasks are ALWAYS executed before updating to https://github.com/stashapp/CommunityScripts - # MUST ToDo: Always comment out below test task before checking in this code!!! - # {"task" : "TestBadTaskNameError", "minutes" : 1}, # Test invalid task name - # {"task" : "execute", "minutes" : 1}, # Test invalid task (missing command) - # {"task" : "python", "minutes" : 1}, # Test invalid task (missing scripts) - # {"task" : "PluginWithOutID", "minutes" : 1}, # Test invalid task (missing pluginId) - # {"task" : "execute", "command" : "", "minutes" : 1}, # Test invalid task (missing command) - # {"task" : "python", "script" : "", "minutes" : 1}, # Test invalid task (missing scripts) - # {"task" : "PluginWithOutID", "pluginId" : "", "minutes" : 1}, # Test invalid task (missing pluginId) - # {"task" : "Generate", "weekday" : "friday", "time" : "00:00"}, - # {"task" : "Clean", "weekday" : "friday", "time" : "00:00"}, - # {"task" : "Auto Tag", "weekday" : "friday", "time" : "00:00"}, - # {"task" : "Optimise Database", "weekday" : "friday", "time" : "00:00"}, - # {"task" : "Create Tags", "pluginId" : "pathParser", "weekday" : "friday", "time" : "00:00"}, # In task queue as -> Running plugin task: Create Tags - # {"task" : "Scan","paths": [r"B:\_\SpecialSet", r"C:\foo"], "weekday" : "friday", "time" : "00:00"}, - # {"task" : "GQL", "input" : "mutation OptimiseDatabase { optimiseDatabase }", "weekday" : "friday", "time" : "00:00"}, # In task queue as -> Optimising database... - # {"task" : "Clean Generated Files", "weekday" : "friday", "time" : "00:00"}, - # {"task" : "RenameGeneratedFiles", "weekday" : "friday", "time" : "00:00"}, # In task queue as -> Migrating scene hashes... - # {"task" : "Backup", "maxBackups" : 0, "weekday" : "friday", "time" : "00:00"}, # Does NOT show up in the Task Queue. Must check STASH log file to verify run. - # {"task" : "python", "script" : "test_hello_world2.py", "weekday" : "friday", "time" : "00:00"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'python' result=??? - # {"task" : "python", "script" : "test_hello_world.py", "detach" : False, "weekday" : "friday", "time" : "00:00"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'python' result=??? - # {"task" : "execute", "command" : "test_hello_world2.cmd", "weekday" : "friday", "time" : "00:00"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'execute' result=??? - # {"task" : "execute", "command" : "test_hello_world.bat", "args" : "--name David", "weekday" : "friday", "time" : "00:00"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'execute' result=??? + # The [CheckStashIsRunning] task checks if Stash is running. If not running, it will start up stash. + # This task only works if FileMonitor is started as a service or in command line mode. + # Optional fields are 'command' and 'RunAfter'. For detail usage, see examples #C1 and #C2 in filemonitor_task_examples.py + {"task" : "CheckStashIsRunning", "minutes" :5}, # Checks every 5 minutes ], # Timeout in seconds. This is how often FileMonitor will check the scheduler and (in-plugin mode) check if another job (Task) is in the queue. "timeOut": 60, + # Timeout in seconds for delay processing of path scan jobs. This value should always be smaller than timeOut + "timeOutDelayProcess": 32, + # Maximum time to wait for a scan job to complete. Need this incase Stash gets restarted in the middle of a scan job. + "maxWaitTimeJobFinish": 30 * 60, # Wait 30 minutes max + + # ApiKey only needed when Stash credentials are set and while calling FileMonitor via command line. + "apiKey" : "", # Example: "eyJabccideJIUfg1NigRInD345I6dfpXVCfd.eyJ1abcDEfGheHRlHJiJklMonPQ32FsVewtsfSIsImlhdCI6MTcyMzg2NzkwOH0.5bkHU6sfs3532dsryu1ki3iFBwnd_4AHs325yHljsPw" # Enable to run metadata clean task after file deletion. "runCleanAfterDelete": False, # Enable to run metadata_generate (Generate Content) after metadata scan. "runGenerateContent": False, + # When populated (comma separated list [lower-case]), only scan for changes for specified file extension "fileExtTypes" : "", # Example: "mp4,mpg,mpeg,m2ts,wmv,avi,m4v,flv,mov,asf,mkv,divx,webm,ts,mp2t" # When populated, only include file changes in specified paths. @@ -113,7 +71,7 @@ # When populated, exclude file changes in paths that start with specified entries. "excludePathChanges" :[], # Example: ["C:\\MyVideos\\SomeSubFolder\\", "C:\\MyImages\\folder\\Sub\\"] - # The following fields are ONLY used when running FileMonitor in script mode. + # The following fields are ONLY used when running FileMonitor in command line mode. "endpoint_Scheme" : "http", # Define endpoint to use when contacting the Stash server "endpoint_Host" : "0.0.0.0", # Define endpoint to use when contacting the Stash server "endpoint_Port" : 9999, # Define endpoint to use when contacting the Stash server diff --git a/plugins/FileMonitor/filemonitor_self_unit_test.py b/plugins/FileMonitor/filemonitor_self_unit_test.py new file mode 100644 index 00000000..135a1eba --- /dev/null +++ b/plugins/FileMonitor/filemonitor_self_unit_test.py @@ -0,0 +1,46 @@ +# **test** tasks which are disabled by default. To enable test tasks, set selfUnitTest to True. +# To run test, enable all task, and start FileMonitor as a service. +# When executed, these task should be seen in the Task Queue unless otherwise stated in comments. +# These tasks are usually executed before updating major releases on https://github.com/David-Maisonave/Axter-Stash/blob/main/plugins/FileMonitor +# These tasks are ALWAYS executed before updating to https://github.com/stashapp/CommunityScripts +self_unit_test = { + "task_scheduler_repeat": [ + {"task" : "TestBadTaskNameError", "minutes" : 1}, # Test invalid task name + {"task" : "execute", "minutes" : 1}, # Test invalid task (missing command) + {"task" : "python", "minutes" : 1}, # Test invalid task (missing scripts) + {"task" : "execute", "command" : "", "minutes" : 1}, # Test invalid task (missing command) + {"task" : "python", "script" : "", "minutes" : 1}, # Test invalid task (missing scripts) + {"task" : "Foo","taskName":"foo","validateDir":"foo", "minutes" : 1}, # Test invalid task (missing plugin directory) + {"task" : "Log", "msg" : "Testing Scheduled Log", "minutes" : 1}, # Test plugin log file + {"task" : "Trace", "minutes" : 1}, # Test plugin trace logging + {"task" : "LogOnce", "seconds" :15}, # Test LogOnce + {"task" : "TraceOnce", "seconds" : 5}, # Test TraceOnce + {"task" : "CheckStashIsRunning", "RunAfter" : [{"task" : "Scan"},{"task" : "Backup", "maxBackup" : 0},{"task" : "Clean"}], "seconds" :15}, # Test RunAfter + {"task" : "CheckStashIsRunning", "command" : "stash-win.exe", "seconds" :10}, # Check if Stash is running. If not running, start up Stash. + # {"task" : "CheckStashIsRunning", "RunAfter" : [{"task" : "Scan"}], "seconds" :15}, # To test CheckStashIsRunning, kill Stash after starting FileMonitor service via following command:taskkill /F /IM "stash-win.exe" + ], + "task_scheduler_set_time": [ + # Test [Delete Duplicates] with [Delete Duplicate Scheduler] disabled, and then with it enabled. + {"task" : "DupFileManager", "taskName" : "Delete Duplicates", "validateDir" : "DupFileManager", "weekday" : "every", "time" : "06:17"}, # [Plugin Tasks] -> DupFileManager -> [Delete Duplicates] + {"task" : "Generate", "weekday" : "every", "time" : "06:17"}, + {"task" : "Clean", "weekday" : "every", "time" : "06:17"}, + {"task" : "Auto Tag", "weekday" : "every", "time" : "06:17"}, + {"task" : "Optimise Database", "weekday" : "every", "time" : "06:17"}, + {"task" : "pathParser", "taskName" : "Create Tags", "validateDir" : "pathParser", "weekday" : "every", "time" : "06:17"}, # In task queue as -> Running plugin task: Create Tags + {"task" : "DupFileManager", "taskMode" : "tag_duplicates_task", "taskQue":False, "weekday" : "every", "time" : "06:17"}, # Does NOT run in the task queue + {"task" : "DupFileManager", "taskName" : "Tag Duplicates", "validateDir" : "DupFileManager", "weekday" : "every", "time" : "06:17"}, # [Plugin Tasks] -> DupFileManager -> [Tag Duplicates] + {"task" : "DupFileManager", "taskName" : "Delete Tagged Duplicates", "weekday" : "every", "time" : "06:17"}, # [Plugin Tasks] -> DupFileManager -> [Tag Duplicates] + {"task" : "Scan","paths": [r"B:\_\SpecialSet", r"C:\foo"], "weekday" : "every", "time" : "06:17"}, + {"task" : "GQL", "input" : "mutation OptimiseDatabase { optimiseDatabase }", "weekday" : "every", "time" : "06:17"}, # In task queue as -> Optimising database... + {"task" : "Clean Generated Files", "weekday" : "every", "time" : "06:17"}, + {"task" : "RenameGeneratedFiles", "weekday" : "every", "time" : "06:17"}, # In task queue as -> Migrating scene hashes... + {"task" : "Backup", "maxBackups" : 0, "weekday" : "every", "time" : "06:17"}, # Does NOT show up in the Task Queue. Must check STASH log file to verify run. + {"task" : "python", "script" : "test_hello_world2.py", "weekday" : "every", "time" : "06:17"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'python' result=??? + {"task" : "python", "script" : "test_hello_world.py", "detach" : False, "weekday" : "every", "time" : "06:17"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'python' result=??? + {"task" : "execute", "command" : "test_hello_world2.cmd", "weekday" : "every", "time" : "06:17"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'execute' result=??? + {"task" : "execute", "command" : "test_hello_world.bat", "args" : "--name David", "weekday" : "every", "time" : "06:17"}, # Does NOT show up in the Task Queue. Check FileMonitor log file, and look for -> Task 'execute' result=??? + ], + # MUST ToDo: Always set selfUnitTest to False before checking in this code!!! + "selfUnitTest_repeat" : False , # Enable to turn on self unit test. + "selfUnitTest_set_time" : False , # Enable to turn on self unit test. +} diff --git a/plugins/FileMonitor/filemonitor_task_examples.py b/plugins/FileMonitor/filemonitor_task_examples.py new file mode 100644 index 00000000..2bd58126 --- /dev/null +++ b/plugins/FileMonitor/filemonitor_task_examples.py @@ -0,0 +1,53 @@ +# Below are example tasks. +# They are all disabled by default, by having zero value for time frequency, or by having "DISABLED" set for the time field. +# To enable these tasks, set the frequency or the time value to a valid frequency or time stamp. +task_examples = { + "task_scheduler": [ + # Example#A1: Task to call call_GQL API with custom input + {"task" : "GQL", "input" : "mutation OptimiseDatabase { optimiseDatabase }", "weekday" : "sunday", "time" : "DISABLED"}, # To enable, change "DISABLED" to valid time + + # Example#A2: Task to call a python script. When this task is executed, the keyword is replaced by filemonitor.py current directory. + # The args field is NOT required. + {"task" : "python", "script" : "test_script_hello_world.py", "args" : "--MyArguments Hello", "weekday" : "monday", "time" : "DISABLED"}, # change "DISABLED" to valid time + + # Example#A3: The following task types can optionally take a [paths] field. If the paths field does not exists, the paths in the Stash library is used. + {"task" : "Scan", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Library -> [Scan] + {"task" : "Auto Tag", "paths" : [r"E:\MyVideos\downloads", r"V:\MyOtherVideos"], "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "DISABLED"}, # Auto Tag -> [Auto Tag] + {"task" : "Clean", "paths" : ["E:\\MyVideos\\downloads", "V:\\MyOtherVideos"], "weekday" : "sunday", "time" : "DISABLED"}, # Generated Content-> [Generate] + + # Example#A4: Task which calls Migrations -> [Rename generated files] + {"task" : "RenameGeneratedFiles", "weekday" : "tuesday,thursday", "time" : "DISABLED"}, # (bi-weekly) example + + # Example#A5: The Backup task using optional field maxBackup, which overrides the UI [Max DB Backups] value + {"task" : "Backup", "maxBackup" : 12, "weekday" : "sunday", "time" : "DISABLED"}, # Trim the DB backup files down to 12 backup files. + {"task" : "Backup", "maxBackup" : 0, "weekday" : "sunday", "time" : "DISABLED"}, # When used with a zero value, it will make sure no file trimming will occur no matter the value of the UI [Max DB Backups] + + # The above weekday method is the more reliable method to schedule task, because it doesn't rely on FileMonitor running continuously (non-stop). + + # The below examples use frequency field method which can work with minutes and hours. A zero frequency value disables the task. + # Note: Both seconds and days are also supported for the frequency field. + # However, seconds is mainly used for test purposes. + # And days usage is discourage, because it only works if FileMonitor is running for X many days non-stop. + # The below example tasks are done using hours and minutes, however any of these task types can be converted to a daily, weekly, or monthly syntax. + + # Example#B1: The following task is the syntax used for a plugin. A plugin task requires the plugin-ID for the [task] field. Optional fields are taskName, taskMode, validateDir, and taskQue. + {"task" : "PluginId_Here", "taskName" : "Task Name or Plugin Button Name Here", "hours" : 0}, # The zero frequency value makes this task disabled. + # Example#B2: Optionally, the validateDir field can be included which is used to validate that the plugin is installed either under the plugins folder or under the plugins-community folder. + {"task" : "PluginId_Here", "taskName" : "Task Name or Plugin Button Name Here", "validateDir" : "UsuallySameAsPluginID", "hours" : 0}, + # Example#B3: To run a plugin WITHOUT using the Task Queue, use taskMode instead of taskName and/or add field "taskQue":False. The plugin will run immediately + {"task" : "PluginId_Here", "taskMode" : "Plugin_Task_MODE", "taskQue" : False, "hours" : 0}, # Do NOT use taskName when including "taskQue":False + # Example#B4: When taskName field is missing, it will always run the task without using the Task Queue. The plugin will run immediately + {"task" : "PluginId_Here", "hours" : 0}, + + # Example#C1: Task to execute a command + {"task" : "execute", "command" : "C:\\MyPath\\HelloWorld.bat", "hours" : 0}, + + # Example#C2: Task to execute a command with optional args field, and using keyword , which gets replaced with filemonitor.py current directory. + {"task" : "execute", "command" : "HelloWorld.cmd", "args" : "--name David", "minutes" : 0}, + + # Example#D1 Some OS may need the "command" field, which specifies the binary path. + {"task" : "CheckStashIsRunning", "command" : "stash-linux-arm64v8", "minutes" :0}, + # Example#D2 RunAfter field can be used to specify task to run after starting Stash + {"task" : "CheckStashIsRunning", "RunAfter" : [{"task" : "Scan"},{"task" : "Backup", "maxBackup" : 0},{"task" : "Clean"}], "minutes" :0}, + ], +} diff --git a/plugins/FileMonitor/requirements.txt b/plugins/FileMonitor/requirements.txt index b219ea1e..19a1174d 100644 --- a/plugins/FileMonitor/requirements.txt +++ b/plugins/FileMonitor/requirements.txt @@ -1,4 +1,3 @@ -stashapp-tools >= 0.2.49 +stashapp-tools >= 0.2.50 pyYAML -watchdog -schedule \ No newline at end of file +watchdog \ No newline at end of file diff --git a/plugins/RenameFile/README.md b/plugins/RenameFile/README.md index acf06b9a..c2f7041e 100644 --- a/plugins/RenameFile/README.md +++ b/plugins/RenameFile/README.md @@ -1,4 +1,4 @@ -# RenameFile: Ver 0.4.1 (By David Maisonave) +# RenameFile: Ver 0.4.6 (By David Maisonave) RenameFile is a [Stash](https://github.com/stashapp/stash) plugin which performs the following tasks. - **Rename Scene File Name** (On-The-Fly) - **Append tag names** to file name @@ -57,3 +57,11 @@ That's it!!! - Main options are accessible in the GUI via Settings->Plugins->Plugins->[RenameFile]. - Advanced options are avialable in the **renamefile_settings.py** file. After making changes, go to http://localhost:9999/settings?tab=plugins, and click [Reload Plugins]. +## Bugs and Feature Request +Please use the following link to report RenameFile bugs: +[RenameFile Bug Report](https://github.com/David-Maisonave/Axter-Stash/issues/new?assignees=&labels=Plugin_Bug&projects=&template=bug_report_plugin.yml&title=%F0%9F%AA%B2%5BRenameFile%5D+Your_Short_title) + +Please use the following link to report RenameFile Feature Request:[RenameFile Feature Reques](https://github.com/David-Maisonave/Axter-Stash/issues/new?assignees=&labels=Enhancement&projects=&template=feature_request_plugin.yml&title=%F0%9F%92%A1%EF%B8%8F%5BEnhancement%5D%3A%5BRenameFile%5D+Your_Short_title) + +Please do **NOT** use the feature request to include any problems associated with errors. Instead use the bug report for error issues. + diff --git a/plugins/RenameFile/StashPluginHelper.py b/plugins/RenameFile/StashPluginHelper.py new file mode 100644 index 00000000..6f0d3d15 --- /dev/null +++ b/plugins/RenameFile/StashPluginHelper.py @@ -0,0 +1,526 @@ +from stashapi.stashapp import StashInterface +from logging.handlers import RotatingFileHandler +import re, inspect, sys, os, pathlib, logging, json +import concurrent.futures +from stashapi.stash_types import PhashDistance +import __main__ + +_ARGUMENT_UNSPECIFIED_ = "_ARGUMENT_UNSPECIFIED_" + +# StashPluginHelper (By David Maisonave aka Axter) + # See end of this file for example usage + # Log Features: + # Can optionally log out to multiple outputs for each Log or Trace call. + # Logging includes source code line number + # Sets a maximum plugin log file size + # Stash Interface Features: + # Gets STASH_URL value from command line argument and/or from STDIN_READ + # Sets FRAGMENT_SERVER based on command line arguments or STDIN_READ + # Sets PLUGIN_ID based on the main script file name (in lower case) + # Gets PLUGIN_TASK_NAME value + # Sets pluginSettings (The plugin UI settings) + # Misc Features: + # Gets DRY_RUN value from command line argument and/or from UI and/or from config file + # Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file + # Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments + # Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ +class StashPluginHelper(StashInterface): + # Primary Members for external reference + PLUGIN_TASK_NAME = None + PLUGIN_ID = None + PLUGIN_CONFIGURATION = None + PLUGINS_PATH = None + pluginSettings = None + pluginConfig = None + STASH_URL = None + STASH_CONFIGURATION = None + JSON_INPUT = None + DEBUG_TRACING = False + DRY_RUN = False + CALLED_AS_STASH_PLUGIN = False + RUNNING_IN_COMMAND_LINE_MODE = False + FRAGMENT_SERVER = None + STASHPATHSCONFIG = None + STASH_PATHS = [] + API_KEY = None + excludeMergeTags = None + + # printTo argument + LOG_TO_FILE = 1 + LOG_TO_CONSOLE = 2 # Note: Only see output when running in command line mode. In plugin mode, this output is lost. + LOG_TO_STDERR = 4 # Note: In plugin mode, output to StdErr ALWAYS gets sent to stash logging as an error. + LOG_TO_STASH = 8 + LOG_TO_WARN = 16 + LOG_TO_ERROR = 32 + LOG_TO_CRITICAL = 64 + LOG_TO_ALL = LOG_TO_FILE + LOG_TO_CONSOLE + LOG_TO_STDERR + LOG_TO_STASH + + # Misc class variables + MAIN_SCRIPT_NAME = None + LOG_LEVEL = logging.INFO + LOG_FILE_DIR = None + LOG_FILE_NAME = None + STDIN_READ = None + pluginLog = None + logLinePreviousHits = [] + thredPool = None + STASH_INTERFACE_INIT = False + _mergeMetadata = None + encodeToUtf8 = False + convertToAscii = False # If set True, it takes precedence over encodeToUtf8 + + # Prefix message value + LEV_TRACE = "TRACE: " + LEV_DBG = "DBG: " + LEV_INF = "INF: " + LEV_WRN = "WRN: " + LEV_ERR = "ERR: " + LEV_CRITICAL = "CRITICAL: " + + # Default format + LOG_FORMAT = "[%(asctime)s] %(message)s" + + # Externally modifiable variables + log_to_err_set = LOG_TO_FILE + LOG_TO_STDERR # This can be changed by the calling source in order to customize what targets get error messages + log_to_norm = LOG_TO_FILE + LOG_TO_CONSOLE # Can be change so-as to set target output for normal logging + # Warn message goes to both plugin log file and stash when sent to Stash log file. + log_to_wrn_set = LOG_TO_STASH # This can be changed by the calling source in order to customize what targets get warning messages + + def __init__(self, + debugTracing = None, # Set debugTracing to True so as to output debug and trace logging + logFormat = LOG_FORMAT, # Plugin log line format + dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file + maxbytes = 8*1024*1024, # Max size of plugin log file + backupcount = 2, # Backup counts when log file size reaches max size + logToWrnSet = 0, # Customize the target output set which will get warning logging + logToErrSet = 0, # Customize the target output set which will get error logging + logToNormSet = 0, # Customize the target output set which will get normal logging + logFilePath = "", # Plugin log file. If empty, the log file name will be set based on current python file name and path + mainScriptName = "", # The main plugin script file name (full path) + pluginID = "", + settings = None, # Default settings for UI fields + config = None, # From pluginName_config.py or pluginName_setting.py + fragmentServer = None, + stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999 + apiKey = None, # API Key only needed when username and password set while running script via command line + DebugTraceFieldName = "zzdebugTracing", + DryRunFieldName = "zzdryRun", + setStashLoggerAsPluginLogger = False): + self.thredPool = concurrent.futures.ThreadPoolExecutor(max_workers=2) + if logToWrnSet: self.log_to_wrn_set = logToWrnSet + if logToErrSet: self.log_to_err_set = logToErrSet + if logToNormSet: self.log_to_norm = logToNormSet + if stash_url and len(stash_url): self.STASH_URL = stash_url + self.MAIN_SCRIPT_NAME = mainScriptName if mainScriptName != "" else __main__.__file__ + self.PLUGIN_ID = pluginID if pluginID != "" else pathlib.Path(self.MAIN_SCRIPT_NAME).stem + # print(f"self.MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME}, self.PLUGIN_ID={self.PLUGIN_ID}", file=sys.stderr) + self.LOG_FILE_NAME = logFilePath if logFilePath != "" else f"{pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}{os.sep}{pathlib.Path(self.MAIN_SCRIPT_NAME).stem}.log" + self.LOG_FILE_DIR = pathlib.Path(self.LOG_FILE_NAME).resolve().parent + RFH = RotatingFileHandler( + filename=self.LOG_FILE_NAME, + mode='a', + maxBytes=maxbytes, + backupCount=backupcount, + encoding=None, + delay=0 + ) + if fragmentServer: + self.FRAGMENT_SERVER = fragmentServer + else: + self.FRAGMENT_SERVER = {'Scheme': 'http', 'Host': '0.0.0.0', 'Port': '9999', 'SessionCookie': {'Name': 'session', 'Value': '', 'Path': '', 'Domain': '', 'Expires': '0001-01-01T00:00:00Z', 'RawExpires': '', 'MaxAge': 0, 'Secure': False, 'HttpOnly': False, 'SameSite': 0, 'Raw': '', 'Unparsed': None}, 'Dir': os.path.dirname(pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent), 'PluginDir': pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent} + + if debugTracing: self.DEBUG_TRACING = debugTracing + if config: + self.pluginConfig = config + if self.Setting('apiKey', "") != "": + self.FRAGMENT_SERVER['ApiKey'] = self.Setting('apiKey') + + + if apiKey and apiKey != "": + self.FRAGMENT_SERVER['ApiKey'] = apiKey + + if len(sys.argv) > 1: + RUNNING_IN_COMMAND_LINE_MODE = True + if not debugTracing or not stash_url: + for argValue in sys.argv[1:]: + if argValue.lower() == "--trace": + self.DEBUG_TRACING = True + elif argValue.lower() == "--dry_run" or argValue.lower() == "--dryrun": + self.DRY_RUN = True + elif ":" in argValue and not self.STASH_URL: + self.STASH_URL = argValue + if self.STASH_URL: + endpointUrlArr = self.STASH_URL.split(":") + if len(endpointUrlArr) == 3: + self.FRAGMENT_SERVER['Scheme'] = endpointUrlArr[0] + self.FRAGMENT_SERVER['Host'] = endpointUrlArr[1][2:] + self.FRAGMENT_SERVER['Port'] = endpointUrlArr[2] + super().__init__(self.FRAGMENT_SERVER) + self.STASH_INTERFACE_INIT = True + else: + try: + self.STDIN_READ = sys.stdin.read() + self.CALLED_AS_STASH_PLUGIN = True + except: + pass + if self.STDIN_READ: + self.JSON_INPUT = json.loads(self.STDIN_READ) + if "args" in self.JSON_INPUT and "mode" in self.JSON_INPUT["args"]: + self.PLUGIN_TASK_NAME = self.JSON_INPUT["args"]["mode"] + self.FRAGMENT_SERVER = self.JSON_INPUT["server_connection"] + self.STASH_URL = f"{self.FRAGMENT_SERVER['Scheme']}://{self.FRAGMENT_SERVER['Host']}:{self.FRAGMENT_SERVER['Port']}" + super().__init__(self.FRAGMENT_SERVER) + self.STASH_INTERFACE_INIT = True + + if self.STASH_URL.startswith("http://0.0.0.0:"): + self.STASH_URL = self.STASH_URL.replace("http://0.0.0.0:", "http://localhost:") + + if self.STASH_INTERFACE_INIT: + self.PLUGIN_CONFIGURATION = self.get_configuration()["plugins"] + self.STASH_CONFIGURATION = self.get_configuration()["general"] + self.STASHPATHSCONFIG = self.STASH_CONFIGURATION['stashes'] + if 'pluginsPath' in self.STASH_CONFIGURATION: + self.PLUGINS_PATH = self.STASH_CONFIGURATION['pluginsPath'] + for item in self.STASHPATHSCONFIG: + self.STASH_PATHS.append(item["path"]) + if settings: + self.pluginSettings = settings + if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION: + self.pluginSettings.update(self.PLUGIN_CONFIGURATION[self.PLUGIN_ID]) + if 'apiKey' in self.STASH_CONFIGURATION: + self.API_KEY = self.STASH_CONFIGURATION['apiKey'] + + self.DRY_RUN = self.Setting(DryRunFieldName, self.DRY_RUN) + self.DEBUG_TRACING = self.Setting(DebugTraceFieldName, self.DEBUG_TRACING) + if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG + + logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH]) + self.pluginLog = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem) + if setStashLoggerAsPluginLogger: + self.log = self.pluginLog + + def __del__(self): + self.thredPool.shutdown(wait=False) + + def Setting(self, name, default=_ARGUMENT_UNSPECIFIED_, raiseEx=True, notEmpty=False): + if self.pluginSettings != None and name in self.pluginSettings: + if notEmpty == False or self.pluginSettings[name] != "": + return self.pluginSettings[name] + if self.pluginConfig != None and name in self.pluginConfig: + if notEmpty == False or self.pluginConfig[name] != "": + return self.pluginConfig[name] + if default == _ARGUMENT_UNSPECIFIED_ and raiseEx: + raise Exception(f"Missing {name} from both UI settings and config file settings.") + return default + + def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False, toAscii = None): + if toAscii or (toAscii == None and (self.encodeToUtf8 or self.convertToAscii)): + logMsg = self.asc2(logMsg) + else: + logMsg = logMsg + if printTo == 0: + printTo = self.log_to_norm + elif printTo == self.LOG_TO_ERROR and logLevel == logging.INFO: + logLevel = logging.ERROR + printTo = self.log_to_err_set + elif printTo == self.LOG_TO_CRITICAL and logLevel == logging.INFO: + logLevel = logging.CRITICAL + printTo = self.log_to_err_set + elif printTo == self.LOG_TO_WARN and logLevel == logging.INFO: + logLevel = logging.WARN + printTo = self.log_to_wrn_set + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + LN_Str = f"[LN:{lineNo}]" + # print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}") + if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG): + if levelStr == "": levelStr = self.LEV_DBG + if printTo & self.LOG_TO_FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.INFO or logLevel == logging.DEBUG: + if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG + if printTo & self.LOG_TO_FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.WARN: + if levelStr == "": levelStr = self.LEV_WRN + if printTo & self.LOG_TO_FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.ERROR: + if levelStr == "": levelStr = self.LEV_ERR + if printTo & self.LOG_TO_FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}") + elif logLevel == logging.CRITICAL: + if levelStr == "": levelStr = self.LEV_CRITICAL + if printTo & self.LOG_TO_FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}") + if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}") + if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways): + print(f"{LN_Str} {levelStr}{logMsg}") + if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways): + print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr) + + def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None): + if printTo == 0: printTo = self.LOG_TO_FILE + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + logLev = logging.INFO if logAlways else logging.DEBUG + if self.DEBUG_TRACING or logAlways: + if logMsg == "": + logMsg = f"Line number {lineNo}..." + self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways, toAscii=toAscii) + + # Log once per session. Only logs the first time called from a particular line number in the code. + def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None): + lineNo = inspect.currentframe().f_back.f_lineno + if self.DEBUG_TRACING or logAlways: + FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" + if FuncAndLineNo in self.logLinePreviousHits: + return + self.logLinePreviousHits.append(FuncAndLineNo) + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) + + # Log INFO on first call, then do Trace on remaining calls. + def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True, toAscii = None): + if printTo == 0: printTo = self.LOG_TO_FILE + lineNo = inspect.currentframe().f_back.f_lineno + FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}" + if FuncAndLineNo in self.logLinePreviousHits: + if traceOnRemainingCalls: + self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii) + else: + self.logLinePreviousHits.append(FuncAndLineNo) + self.Log(logMsg, printTo, logging.INFO, lineNo, toAscii=toAscii) + + def Warn(self, logMsg, printTo = 0, toAscii = None): + if printTo == 0: printTo = self.log_to_wrn_set + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(logMsg, printTo, logging.WARN, lineNo, toAscii=toAscii) + + def Error(self, logMsg, printTo = 0, toAscii = None): + if printTo == 0: printTo = self.log_to_err_set + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(logMsg, printTo, logging.ERROR, lineNo, toAscii=toAscii) + + def Status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1): + if printTo == 0: printTo = self.log_to_norm + if lineNo == -1: + lineNo = inspect.currentframe().f_back.f_lineno + self.Log(f"StashPluginHelper Status: (CALLED_AS_STASH_PLUGIN={self.CALLED_AS_STASH_PLUGIN}), (RUNNING_IN_COMMAND_LINE_MODE={self.RUNNING_IN_COMMAND_LINE_MODE}), (DEBUG_TRACING={self.DEBUG_TRACING}), (DRY_RUN={self.DRY_RUN}), (PLUGIN_ID={self.PLUGIN_ID}), (PLUGIN_TASK_NAME={self.PLUGIN_TASK_NAME}), (STASH_URL={self.STASH_URL}), (MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME})", + printTo, logLevel, lineNo) + + def ExecuteProcess(self, args, ExecDetach=False): + import platform, subprocess + is_windows = any(platform.win32_ver()) + pid = None + self.Trace(f"is_windows={is_windows} args={args}") + if is_windows: + if ExecDetach: + self.Trace("Executing process using Windows DETACHED_PROCESS") + DETACHED_PROCESS = 0x00000008 + pid = subprocess.Popen(args,creationflags=DETACHED_PROCESS, shell=True).pid + else: + pid = subprocess.Popen(args, shell=True).pid + else: + self.Trace("Executing process using normal Popen") + pid = subprocess.Popen(args).pid + self.Trace(f"pid={pid}") + return pid + + def ExecutePythonScript(self, args, ExecDetach=True): + PythonExe = f"{sys.executable}" + argsWithPython = [f"{PythonExe}"] + args + return self.ExecuteProcess(argsWithPython,ExecDetach=ExecDetach) + + def Submit(self, *args, **kwargs): + return self.thredPool.submit(*args, **kwargs) + + def asc2(self, data, convertToAscii=None): + if convertToAscii or (convertToAscii == None and self.convertToAscii): + return ascii(data) + return str(str(data).encode('utf-8'))[2:-1] # This works better for logging than ascii function + # data = str(data).encode('ascii','ignore') # This works better for logging than ascii function + # return str(data)[2:-1] # strip out b'str' + + def init_mergeMetadata(self, excludeMergeTags=None): + self.excludeMergeTags = excludeMergeTags + self._mergeMetadata = mergeMetadata(self, self.excludeMergeTags) + + # Must call init_mergeMetadata, before calling merge_metadata + def merge_metadata(self, SrcData, DestData): # Input arguments can be scene ID or scene metadata + if type(SrcData) is int: + SrcData = self.find_scene(SrcData) + DestData = self.find_scene(DestData) + return self._mergeMetadata.merge(SrcData, DestData) + + def Progress(self, currentIndex, maxCount): + progress = (currentIndex / maxCount) if currentIndex < maxCount else (maxCount / currentIndex) + self.log.progress(progress) + + def run_plugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False): + """Runs a plugin operation. + The operation is run immediately and does not use the job queue. + Args: + plugin_id (ID): plugin_id + task_name (str, optional): Plugin task to perform + args (dict, optional): Arguments to pass to plugin. Plugin access via JSON_INPUT['args'] + Returns: + A map of the result. + """ + query = """mutation RunPluginOperation($plugin_id: ID!, $args: Map!) { + runPluginOperation(plugin_id: $plugin_id, args: $args) + }""" + if task_mode != None: + args.update({"mode" : task_mode}) + variables = { + "plugin_id": plugin_id, + "args": args, + } + if asyn: + self.Submit(self.call_GQL, query, variables) + return f"Made asynchronous call for plugin {plugin_id}" + else: + return self.call_GQL(query, variables) + + def find_duplicate_scenes_diff(self, distance: PhashDistance=PhashDistance.EXACT, fragment='id', duration_diff: float=10.00 ): + query = """ + query FindDuplicateScenes($distance: Int, $duration_diff: Float) { + findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) { + ...SceneSlim + } + } + """ + if fragment: + query = re.sub(r'\.\.\.SceneSlim', fragment, query) + else: + query += "fragment SceneSlim on Scene { id }" + + variables = { "distance": distance, "duration_diff": duration_diff } + result = self.call_GQL(query, variables) + return result['findDuplicateScenes'] + + # ################################################################################################# + # The below functions extends class StashInterface with functions which are not yet in the class + def get_all_scenes(self): + query_all_scenes = """ + query AllScenes { + allScenes { + id + updated_at + } + } + """ + return self.call_GQL(query_all_scenes) + + def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]): + query = """ + mutation MetadataAutoTag($input:AutoTagMetadataInput!) { + metadataAutoTag(input: $input) + } + """ + metadata_autotag_input = { + "paths":paths, + "performers": performers, + "studios":studios, + "tags":tags, + } + result = self.call_GQL(query, {"input": metadata_autotag_input}) + return result + + def backup_database(self): + return self.call_GQL("mutation { backupDatabase(input: {download: false})}") + + def optimise_database(self): + return self.call_GQL("mutation OptimiseDatabase { optimiseDatabase }") + + def metadata_clean_generated(self, blobFiles=True, dryRun=False, imageThumbnails=True, markers=True, screenshots=True, sprites=True, transcodes=True): + query = """ + mutation MetadataCleanGenerated($input: CleanGeneratedInput!) { + metadataCleanGenerated(input: $input) + } + """ + clean_metadata_input = { + "blobFiles": blobFiles, + "dryRun": dryRun, + "imageThumbnails": imageThumbnails, + "markers": markers, + "screenshots": screenshots, + "sprites": sprites, + "transcodes": transcodes, + } + result = self.call_GQL(query, {"input": clean_metadata_input}) + return result + + def rename_generated_files(self): + return self.call_GQL("mutation MigrateHashNaming {migrateHashNaming}") + +class mergeMetadata: # A class to merge scene metadata from source scene to destination scene + srcData = None + destData = None + stash = None + excludeMergeTags = None + dataDict = None + result = "Nothing To Merge" + def __init__(self, stash, excludeMergeTags=None): + self.stash = stash + self.excludeMergeTags = excludeMergeTags + + def merge(self, SrcData, DestData): + self.srcData = SrcData + self.destData = DestData + ORG_DATA_DICT = {'id' : self.destData['id']} + self.dataDict = ORG_DATA_DICT.copy() + self.mergeItems('tags', 'tag_ids', [], excludeName=self.excludeMergeTags) + self.mergeItems('performers', 'performer_ids', []) + self.mergeItems('galleries', 'gallery_ids', []) + self.mergeItems('movies', 'movies', []) + self.mergeItems('urls', listToAdd=self.destData['urls'], NotStartWith=self.stash.STASH_URL) + self.mergeItem('studio', 'studio_id', 'id') + self.mergeItem('title') + self.mergeItem('director') + self.mergeItem('date') + self.mergeItem('details') + self.mergeItem('rating100') + self.mergeItem('code') + if self.dataDict != ORG_DATA_DICT: + self.stash.Trace(f"Updating scene ID({self.destData['id']}) with {self.dataDict}; path={self.destData['files'][0]['path']}", toAscii=True) + self.result = self.stash.update_scene(self.dataDict) + return self.result + + def Nothing(self, Data): + if not Data or Data == "" or (type(Data) is str and Data.strip() == ""): + return True + return False + + def mergeItem(self,fieldName, updateFieldName=None, subField=None): + if updateFieldName == None: + updateFieldName = fieldName + if self.Nothing(self.destData[fieldName]) and not self.Nothing(self.srcData[fieldName]): + if subField == None: + self.dataDict.update({ updateFieldName : self.srcData[fieldName]}) + else: + self.dataDict.update({ updateFieldName : self.srcData[fieldName][subField]}) + def mergeItems(self, fieldName, updateFieldName=None, listToAdd=[], NotStartWith=None, excludeName=None): + dataAdded = "" + for item in self.srcData[fieldName]: + if item not in self.destData[fieldName]: + if NotStartWith == None or not item.startswith(NotStartWith): + if excludeName == None or item['name'] not in excludeName: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + dataAdded += f"{item['movie']['id']} " + elif updateFieldName == None: + listToAdd += [item] + dataAdded += f"{item} " + else: + listToAdd += [item['id']] + dataAdded += f"{item['id']} " + if dataAdded != "": + if updateFieldName == None: + updateFieldName = fieldName + else: + for item in self.destData[fieldName]: + if fieldName == 'movies': + listToAdd += [{"movie_id" : item['movie']['id'], "scene_index" : item['scene_index']}] + else: + listToAdd += [item['id']] + self.dataDict.update({ updateFieldName : listToAdd}) + # self.stash.Trace(f"Added {fieldName} ({dataAdded}) to scene ID({self.destData['id']})", toAscii=True) diff --git a/plugins/RenameFile/renamefile.py b/plugins/RenameFile/renamefile.py index 884eaa86..4a00d84c 100644 --- a/plugins/RenameFile/renamefile.py +++ b/plugins/RenameFile/renamefile.py @@ -2,30 +2,17 @@ # By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/) # Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/RenameFile # Based on source code from https://github.com/Serechops/Serechops-Stash/tree/main/plugins/Renamer -import os -import sys -import shutil -import hashlib -import json +import os, sys, shutil, json, requests, hashlib, pathlib, logging from pathlib import Path -import requests -import logging -from logging.handlers import RotatingFileHandler import stashapi.log as log # Importing stashapi.log as log for critical events ONLY from stashapi.stashapp import StashInterface +from StashPluginHelper import StashPluginHelper from renamefile_settings import config # Import settings from renamefile_settings.py # ********************************************************************** # Constant global variables -------------------------------------------- -LOG_FILE_PATH = f"{Path(__file__).resolve().parent}\\{Path(__file__).stem}.log" -FORMAT = "[%(asctime)s - LN:%(lineno)s] %(message)s" DEFAULT_FIELD_KEY_LIST = "title,performers,studio,tags" # Default Field Key List with the desired order -PLUGIN_ID = Path(__file__).stem.lower() DEFAULT_SEPERATOR = "-" -PLUGIN_ARGS = False -PLUGIN_ARGS_MODE = False -WRAPPER_STYLES = config["wrapper_styles"] -POSTFIX_STYLES = config["postfix_styles"] # GraphQL query to fetch all scenes QUERY_ALL_SCENES = """ query AllScenes { @@ -35,32 +22,13 @@ } } """ -RFH = RotatingFileHandler( - filename=LOG_FILE_PATH, - mode='a', - maxBytes=2*1024*1024, # Configure logging for this script with max log file size of 2000K - backupCount=2, - encoding=None, - delay=0 -) - # ********************************************************************** # Global variables -------------------------------------------- inputToUpdateScenePost = False exitMsg = "Change success!!" -# Configure local log file for plugin within plugin folder having a limited max log file size -logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt="%y%m%d %H:%M:%S", handlers=[RFH]) -logger = logging.getLogger(PLUGIN_ID) - # ********************************************************************** # ---------------------------------------------------------------------- -# Code section to fetch variables from Plugin UI and from renamefile_settings.py -json_input = json.loads(sys.stdin.read()) -FRAGMENT_SERVER = json_input['server_connection'] -stash = StashInterface(FRAGMENT_SERVER) -pluginConfiguration = stash.get_configuration()["plugins"] - settings = { "performerAppend": False, "studioAppend": False, @@ -73,94 +41,62 @@ "zzdebugTracing": False, "zzdryRun": False, } -if PLUGIN_ID in pluginConfiguration: - settings.update(pluginConfiguration[PLUGIN_ID]) +stash = StashPluginHelper( + settings=settings, + config=config, + maxbytes=10*1024*1024, + ) +stash.Status(logLevel=logging.DEBUG) +if stash.PLUGIN_ID in stash.PLUGIN_CONFIGURATION: + stash.pluginSettings.update(stash.PLUGIN_CONFIGURATION[stash.PLUGIN_ID]) # ---------------------------------------------------------------------- -debugTracing = settings["zzdebugTracing"] +WRAPPER_STYLES = config["wrapper_styles"] +POSTFIX_STYLES = config["postfix_styles"] # Extract dry_run setting from settings -dry_run = settings["zzdryRun"] +dry_run = stash.pluginSettings["zzdryRun"] dry_run_prefix = '' try: - PLUGIN_ARGS = json_input['args'] - PLUGIN_ARGS_MODE = json_input['args']["mode"] + if stash.JSON_INPUT['args']['hookContext']['input']: inputToUpdateScenePost = True # This avoids calling rename logic twice except: pass -try: - if json_input['args']['hookContext']['input']: inputToUpdateScenePost = True # This avoids calling rename logic twice -except: - pass -logger.info(f"\nStarting (debugTracing={debugTracing}) (dry_run={dry_run}) (PLUGIN_ARGS_MODE={PLUGIN_ARGS_MODE}) (inputToUpdateScenePost={inputToUpdateScenePost})************************************************") -if debugTracing: logger.info("settings: %s " % (settings,)) - -if PLUGIN_ID in pluginConfiguration: - if debugTracing: logger.info(f"Debug Tracing (pluginConfiguration[PLUGIN_ID]={pluginConfiguration[PLUGIN_ID]})................") - # if 'zmaximumTagKeys' not in pluginConfiguration[PLUGIN_ID]: - # if debugTracing: logger.info("Debug Tracing................") - # try: - # stash.configure_plugin(PLUGIN_ID, settings) - # stash.configure_plugin("renamefile", {"zmaximumTagKeys": 12}) - # except Exception as e: - # logger.error(f"configure_plugin failed!!! Error: {e}") - # logger.exception('Got exception on main handler') - # pass - # # stash.configure_plugin(PLUGIN_ID, settings) # , init_defaults=True - # if debugTracing: logger.info("Debug Tracing................") +stash.Trace("settings: %s " % (stash.pluginSettings,)) if dry_run: - logger.info("Dry run mode is enabled.") + stash.Log("Dry run mode is enabled.") dry_run_prefix = "Would've " -if debugTracing: logger.info("Debug Tracing................") -max_tag_keys = settings["zmaximumTagKeys"] if settings["zmaximumTagKeys"] != 0 else 12 # Need this incase use explicitly sets value to zero in UI -if debugTracing: logger.info("Debug Tracing................") +max_tag_keys = stash.pluginSettings["zmaximumTagKeys"] if stash.pluginSettings["zmaximumTagKeys"] != 0 else 12 # Need this incase use explicitly sets value to zero in UI # ToDo: Add split logic here to slpit possible string array into an array exclude_paths = config["pathToExclude"] exclude_paths = exclude_paths.split() -if debugTracing: logger.info(f"Debug Tracing (exclude_paths={exclude_paths})................") +stash.Trace(f"(exclude_paths={exclude_paths})") +excluded_tags = config["excludeTags"] # Extract tag whitelist from settings tag_whitelist = config["tagWhitelist"] -if debugTracing: logger.info("Debug Tracing................") if not tag_whitelist: tag_whitelist = "" -if debugTracing: logger.info(f"Debug Tracing (tag_whitelist={tag_whitelist})................") +stash.Trace(f"(tag_whitelist={tag_whitelist})") -endpointHost = json_input['server_connection']['Host'] +endpointHost = stash.JSON_INPUT['server_connection']['Host'] if endpointHost == "0.0.0.0": endpointHost = "localhost" -endpoint = f"{json_input['server_connection']['Scheme']}://{endpointHost}:{json_input['server_connection']['Port']}/graphql" +endpoint = f"{stash.JSON_INPUT['server_connection']['Scheme']}://{endpointHost}:{stash.JSON_INPUT['server_connection']['Port']}/graphql" -if debugTracing: logger.info(f"Debug Tracing (endpoint={endpoint})................") -# Extract rename_files and move_files settings from renamefile_settings.py -rename_files = config["rename_files"] -move_files = settings["zafileRenameViaMove"] -if debugTracing: logger.info("Debug Tracing................") -fieldKeyList = settings["zfieldKeyList"] # Default Field Key List with the desired order +stash.Trace(f"(endpoint={endpoint})") +move_files = stash.pluginSettings["zafileRenameViaMove"] +fieldKeyList = stash.pluginSettings["zfieldKeyList"] # Default Field Key List with the desired order if not fieldKeyList or fieldKeyList == "": fieldKeyList = DEFAULT_FIELD_KEY_LIST fieldKeyList = fieldKeyList.replace(" ", "") fieldKeyList = fieldKeyList.replace(";", ",") fieldKeyList = fieldKeyList.split(",") -if debugTracing: logger.info(f"Debug Tracing (fieldKeyList={fieldKeyList})................") -separator = settings["zseparators"] +stash.Trace(f"(fieldKeyList={fieldKeyList})") +separator = stash.pluginSettings["zseparators"] # ---------------------------------------------------------------------- # ********************************************************************** double_separator = separator + separator -if debugTracing: logger.info(f"Debug Tracing (PLUGIN_ARGS={PLUGIN_ARGS}) (WRAPPER_STYLES={WRAPPER_STYLES}) (POSTFIX_STYLES={POSTFIX_STYLES})................") -if debugTracing: logger.info(f"Debug Tracing (PLUGIN_ID=\"{PLUGIN_ID}\")................") -if debugTracing: logger.info("Debug Tracing................") - -# Function to make GraphQL requests -def graphql_request(query, variables=None): - if debugTracing: logger.info("Debug Tracing................%s", query) - data = {'query': query} - if variables: - data['variables'] = variables - if debugTracing: logger.info("Debug Tracing................") - if debugTracing: logger.info("Debug Tracing................") - response = requests.post(endpoint, json=data) - if debugTracing: logger.info("Debug Tracing................") - return response.json() +stash.Trace(f"(WRAPPER_STYLES={WRAPPER_STYLES}) (POSTFIX_STYLES={POSTFIX_STYLES})") # Function to replace illegal characters in filenames def replace_illegal_characters(filename): @@ -178,12 +114,11 @@ def should_exclude_path(scene_details): # Function to form the new filename based on scene details and user settings def form_filename(original_file_stem, scene_details): - if debugTracing: logger.info("Debug Tracing................") filename_parts = [] tag_keys_added = 0 default_title = '' if_notitle_use_org_filename = config["if_notitle_use_org_filename"] - include_keyField_if_in_name = settings["z_keyFIeldsIncludeInFileName"] + include_keyField_if_in_name = stash.pluginSettings["z_keyFIeldsIncludeInFileName"] if if_notitle_use_org_filename: default_title = original_file_stem # ................... @@ -194,42 +129,39 @@ def form_filename(original_file_stem, scene_details): title = default_title # ................... - if debugTracing: logger.info(f"Debug Tracing (title=\"{title}\")................") + stash.Trace(f"(title=\"{title}\")") # Function to add tag to filename def add_tag(tag_name): nonlocal tag_keys_added nonlocal filename_parts - if debugTracing: logger.info(f"Debug Tracing (tag_name={tag_name})................") + stash.Trace(f"(tag_name={tag_name})") if max_tag_keys == -1 or (max_tag_keys is not None and tag_keys_added >= int(max_tag_keys)): return # Skip adding more tags if the maximum limit is reached - + if tag_name in excluded_tags: + stash.Trace(f"EXCLUDING (tag_name={tag_name})") + return # Check if the tag name is in the whitelist if tag_whitelist == "" or tag_whitelist == None or (tag_whitelist and tag_name in tag_whitelist): if WRAPPER_STYLES.get('tag'): filename_parts.append(f"{WRAPPER_STYLES['tag'][0]}{tag_name}{WRAPPER_STYLES['tag'][1]}") - if debugTracing: logger.info("Debug Tracing................") else: filename_parts.append(tag_name) - if debugTracing: logger.info("Debug Tracing................") tag_keys_added += 1 - if debugTracing: logger.info("Debug Tracing................") else: - logger.info(f"Skipping tag not in whitelist: {tag_name}") - if debugTracing: logger.info(f"Debug Tracing (tag_keys_added={tag_keys_added})................") + stash.Log(f"Skipping tag not in whitelist: {tag_name}") + stash.Trace(f"(tag_keys_added={tag_keys_added})") for key in fieldKeyList: if key == 'studio': - if settings["studioAppend"]: - if debugTracing: logger.info("Debug Tracing................") + if stash.pluginSettings["studioAppend"]: studio_name = scene_details.get('studio', {}) - if debugTracing: logger.info(f"Debug Tracing (studio_name={studio_name})................") + stash.Trace(f"(studio_name={studio_name})") if studio_name: studio_name = scene_details.get('studio', {}).get('name', '') - if debugTracing: logger.info(f"Debug Tracing (studio_name={studio_name})................") + stash.Trace(f"(studio_name={studio_name})") if studio_name: studio_name += POSTFIX_STYLES.get('studio') - if debugTracing: logger.info("Debug Tracing................") if include_keyField_if_in_name or studio_name.lower() not in title.lower(): if WRAPPER_STYLES.get('studio'): filename_parts.append(f"{WRAPPER_STYLES['studio'][0]}{studio_name}{WRAPPER_STYLES['studio'][1]}") @@ -243,26 +175,24 @@ def add_tag(tag_name): else: filename_parts.append(title) elif key == 'performers': - if settings["performerAppend"]: + if stash.pluginSettings["performerAppend"]: performers = '-'.join([performer.get('name', '') for performer in scene_details.get('performers', [])]) if performers: performers += POSTFIX_STYLES.get('performers') - if debugTracing: logger.info(f"Debug Tracing (include_keyField_if_in_name={include_keyField_if_in_name})................") + stash.Trace(f"(include_keyField_if_in_name={include_keyField_if_in_name})") if include_keyField_if_in_name or performers.lower() not in title.lower(): - if debugTracing: logger.info(f"Debug Tracing (performers={performers})................") + stash.Trace(f"(performers={performers})") if WRAPPER_STYLES.get('performers'): filename_parts.append(f"{WRAPPER_STYLES['performers'][0]}{performers}{WRAPPER_STYLES['performers'][1]}") else: filename_parts.append(performers) elif key == 'date': scene_date = scene_details.get('date', '') - if debugTracing: logger.info("Debug Tracing................") if scene_date: scene_date += POSTFIX_STYLES.get('date') - if debugTracing: logger.info("Debug Tracing................") if WRAPPER_STYLES.get('date'): - filename_parts.append(f"{WRAPPER_STYLES['date'][0]}{scene_date}{WRAPPER_STYLES['date'][1]}") - else: + scene_date = f"{WRAPPER_STYLES['date'][0]}{scene_date}{WRAPPER_STYLES['date'][1]}" + if scene_date not in title: filename_parts.append(scene_date) elif key == 'resolution': width = str(scene_details.get('files', [{}])[0].get('width', '')) # Convert width to string @@ -270,234 +200,90 @@ def add_tag(tag_name): if width and height: resolution = width + POSTFIX_STYLES.get('width_height_seperator') + height + POSTFIX_STYLES.get('resolution') if WRAPPER_STYLES.get('resolution'): - filename_parts.append(f"{WRAPPER_STYLES['resolution'][0]}{resolution}{WRAPPER_STYLES['width'][1]}") - else: + resolution = f"{WRAPPER_STYLES['resolution'][0]}{resolution}{WRAPPER_STYLES['width'][1]}" + if resolution not in title: filename_parts.append(resolution) elif key == 'width': width = str(scene_details.get('files', [{}])[0].get('width', '')) # Convert width to string if width: width += POSTFIX_STYLES.get('width') if WRAPPER_STYLES.get('width'): - filename_parts.append(f"{WRAPPER_STYLES['width'][0]}{width}{WRAPPER_STYLES['width'][1]}") - else: + width = f"{WRAPPER_STYLES['width'][0]}{width}{WRAPPER_STYLES['width'][1]}" + if width not in title: filename_parts.append(width) elif key == 'height': height = str(scene_details.get('files', [{}])[0].get('height', '')) # Convert height to string if height: height += POSTFIX_STYLES.get('height') if WRAPPER_STYLES.get('height'): - filename_parts.append(f"{WRAPPER_STYLES['height'][0]}{height}{WRAPPER_STYLES['height'][1]}") - else: + height = f"{WRAPPER_STYLES['height'][0]}{height}{WRAPPER_STYLES['height'][1]}" + if height not in title: filename_parts.append(height) elif key == 'video_codec': video_codec = scene_details.get('files', [{}])[0].get('video_codec', '').upper() # Convert to uppercase if video_codec: video_codec += POSTFIX_STYLES.get('video_codec') if WRAPPER_STYLES.get('video_codec'): - filename_parts.append(f"{WRAPPER_STYLES['video_codec'][0]}{video_codec}{WRAPPER_STYLES['video_codec'][1]}") - else: + video_codec = f"{WRAPPER_STYLES['video_codec'][0]}{video_codec}{WRAPPER_STYLES['video_codec'][1]}" + if video_codec not in title: filename_parts.append(video_codec) elif key == 'frame_rate': frame_rate = str(scene_details.get('files', [{}])[0].get('frame_rate', '')) + 'FPS' # Convert to string and append ' FPS' if frame_rate: frame_rate += POSTFIX_STYLES.get('frame_rate') if WRAPPER_STYLES.get('frame_rate'): - filename_parts.append(f"{WRAPPER_STYLES['frame_rate'][0]}{frame_rate}{WRAPPER_STYLES['frame_rate'][1]}") - else: + frame_rate = f"{WRAPPER_STYLES['frame_rate'][0]}{frame_rate}{WRAPPER_STYLES['frame_rate'][1]}" + if frame_rate not in title: filename_parts.append(frame_rate) elif key == 'galleries': galleries = [gallery.get('title', '') for gallery in scene_details.get('galleries', [])] - if debugTracing: logger.info("Debug Tracing................") for gallery_name in galleries: - if debugTracing: logger.info(f"Debug Tracing (include_keyField_if_in_name={include_keyField_if_in_name}) (gallery_name={gallery_name})................") + stash.Trace(f"(include_keyField_if_in_name={include_keyField_if_in_name}) (gallery_name={gallery_name})") if include_keyField_if_in_name or gallery_name.lower() not in title.lower(): gallery_name += POSTFIX_STYLES.get('galleries') if WRAPPER_STYLES.get('galleries'): filename_parts.append(f"{WRAPPER_STYLES['galleries'][0]}{gallery_name}{WRAPPER_STYLES['galleries'][1]}") - if debugTracing: logger.info("Debug Tracing................") else: filename_parts.append(gallery_name) - if debugTracing: logger.info("Debug Tracing................") - if debugTracing: logger.info(f"Debug Tracing (gallery_name={gallery_name})................") - if debugTracing: logger.info("Debug Tracing................") + stash.Trace(f"(gallery_name={gallery_name})") elif key == 'tags': - if settings["tagAppend"]: + if stash.pluginSettings["tagAppend"]: tags = [tag.get('name', '') for tag in scene_details.get('tags', [])] - if debugTracing: logger.info("Debug Tracing................") for tag_name in tags: - if debugTracing: logger.info(f"Debug Tracing (include_keyField_if_in_name={include_keyField_if_in_name}) (tag_name={tag_name})................") + stash.Trace(f"(include_keyField_if_in_name={include_keyField_if_in_name}) (tag_name={tag_name})") if include_keyField_if_in_name or tag_name.lower() not in title.lower(): add_tag(tag_name + POSTFIX_STYLES.get('tag')) - if debugTracing: logger.info(f"Debug Tracing (tag_name={tag_name})................") - if debugTracing: logger.info("Debug Tracing................") + stash.Trace(f"(tag_name={tag_name})") - if debugTracing: logger.info(f"Debug Tracing (filename_parts={filename_parts})................") + stash.Trace(f"(filename_parts={filename_parts})") new_filename = separator.join(filename_parts).replace(double_separator, separator) - if debugTracing: logger.info(f"Debug Tracing (new_filename={new_filename})................") + stash.Trace(f"(new_filename={new_filename})") # Check if the scene's path matches any of the excluded paths if exclude_paths and should_exclude_path(scene_details): - logger.info(f"Scene belongs to an excluded path. Skipping filename modification.") + stash.Log(f"Scene belongs to an excluded path. Skipping filename modification.") return Path(scene_details['files'][0]['path']).name # Return the original filename return replace_illegal_characters(new_filename) -def find_scene_by_id(scene_id): - query_find_scene = """ - query FindScene($scene_id: ID!) { - findScene(id: $scene_id) { - id - title - date - files { - path - width - height - video_codec - frame_rate - } - galleries { - title - } - studio { - name - } - performers { - name - } - tags { - name - } - } - } -""" - scene_result = graphql_request(query_find_scene, variables={"scene_id": scene_id}) - return scene_result.get('data', {}).get('findScene') - -def move_or_rename_files(scene_details, new_filename, original_parent_directory): +def rename_scene(scene_id): global exitMsg - studio_directory = None - for file_info in scene_details['files']: - path = file_info['path'] - original_path = Path(path) - - # Check if the file's path matches any of the excluded paths - if exclude_paths and any(original_path.match(exclude_path) for exclude_path in exclude_paths): - logger.info(f"File {path} belongs to an excluded path. Skipping modification.") - continue - - new_path = original_parent_directory if not move_files else original_parent_directory / scene_details['studio']['name'] - if rename_files: - new_path = new_path / (new_filename + original_path.suffix) - try: - if move_files: - if studio_directory is None: - studio_directory = original_parent_directory / scene_details['studio']['name'] - studio_directory.mkdir(parents=True, exist_ok=True) - if rename_files: # Check if rename_files is True - if not dry_run: - shutil.move(original_path, new_path) - logger.info(f"{dry_run_prefix}Moved and renamed file: {path} -> {new_path}") - else: - if not dry_run: - shutil.move(original_path, new_path) - logger.info(f"{dry_run_prefix}Moved file: {path} -> {new_path}") - else: - if rename_files: # Check if rename_files is True - if not dry_run: - original_path.rename(new_path) - logger.info(f"{dry_run_prefix}Renamed file: {path} -> {new_path}") - else: - if not dry_run: - shutil.move(original_path, new_path) - logger.info(f"{dry_run_prefix}Moved file: {path} -> {new_path}") - except FileNotFoundError: - log.error(f"File not found: {path}. Skipping...") - logger.error(f"File not found: {path}. Skipping...") - exitMsg = "File not found" - continue - except OSError as e: - log.error(f"Failed to move or rename file: {path}. Error: {e}") - logger.error(f"Failed to move or rename file: {path}. Error: {e}") - exitMsg = "Failed to move or rename file" - continue - return new_path # Return the new_path variable after the loop - -def perform_metadata_scan(metadata_scan_path): - metadata_scan_path_windows = metadata_scan_path.resolve().as_posix() - mutation_metadata_scan = """ - mutation { - metadataScan(input: { paths: "%s" }) - } - """ % metadata_scan_path_windows - if debugTracing: - logger.info(f"Attempting metadata scan mutation with path: {metadata_scan_path_windows}") - logger.info(f"Mutation string: {mutation_metadata_scan}") - graphql_request(mutation_metadata_scan) - -def rename_scene(scene_id, stash_directory): - global exitMsg - scene_details = find_scene_by_id(scene_id) - if debugTracing: logger.info(f"Debug Tracing (scene_details={scene_details})................") + scene_details = stash.find_scene(scene_id) + stash.Trace(f"(scene_details1={scene_details})") if not scene_details: - log.error(f"Scene with ID {scene_id} not found.") - logger.error(f"Scene with ID {scene_id} not found.") - return - - if debugTracing: logger.info(f"Debug Tracing................") - + stash.Error(f"Scene with ID {scene_id} not found.") + return None original_file_path = scene_details['files'][0]['path'] original_parent_directory = Path(original_file_path).parent - if debugTracing: logger.info(f"Debug Tracing (original_file_path={original_file_path})................") - + stash.Trace(f"(original_file_path={original_file_path})") # Check if the scene's path matches any of the excluded paths if exclude_paths and any(Path(original_file_path).match(exclude_path) for exclude_path in exclude_paths): - logger.info(f"Scene with ID {scene_id} belongs to an excluded path. Skipping modifications.") - return - - if debugTracing: logger.info(f"Debug Tracing................") - original_path_info = {'original_file_path': original_file_path, - 'original_parent_directory': original_parent_directory} - - new_path_info = None - - original_file_stem = Path(original_file_path).stem - original_file_name = Path(original_file_path).name - new_filename = form_filename(original_file_stem, scene_details) - newFilenameWithExt = new_filename + Path(original_file_path).suffix - if debugTracing: logger.info(f"Debug Tracing (original_file_name={original_file_name})(newFilenameWithExt={newFilenameWithExt})................") - if original_file_name == newFilenameWithExt: - logger.info(f"Nothing to do, because new file name matches original file name: (newFilenameWithExt={newFilenameWithExt})") - return - if debugTracing: logger.info(f"Debug Tracing................") - - if rename_files: - new_path = original_parent_directory / (newFilenameWithExt) - new_path_info = {'new_file_path': new_path} - if debugTracing: logger.info(f"{dry_run_prefix}New filename: {new_path}") - - if move_files and original_parent_directory.name != scene_details['studio']['name']: - new_path = original_parent_directory / scene_details['studio']['name'] / (new_filename + Path(original_file_path).suffix) - new_path_info = {'new_file_path': new_path} - move_or_rename_files(scene_details, new_filename, original_parent_directory) - logger.info(f"{dry_run_prefix}Moved to directory: '{new_path}'") - - # If rename_files is True, attempt renaming even if move_files is False - if rename_files: - new_file_path = original_parent_directory / (new_filename + Path(original_file_name).suffix) - if original_file_name != new_filename: - try: - if not dry_run: - os.rename(original_file_path, new_file_path) - logger.info(f"{dry_run_prefix}Renamed file: {original_file_path} -> {new_file_path}") - except Exception as e: - exitMsg = "Failed to rename file" - log.error(f"Failed to rename file: {original_file_path}. Error: {e}") - logger.error(f"Failed to rename file: {original_file_path}. Error: {e}") - - metadata_scan_path = original_parent_directory - perform_metadata_scan(metadata_scan_path) + stash.Log(f"Scene with ID {scene_id} belongs to an excluded path. Skipping modifications.") + return None + original_file_stem = Path(original_file_path).stem + original_file_name = Path(original_file_path).name + new_filename = form_filename(original_file_stem, scene_details) max_filename_length = int(config["max_filename_length"]) if len(new_filename) > max_filename_length: extension_length = len(Path(original_file_path).suffix) @@ -505,61 +291,61 @@ def rename_scene(scene_id, stash_directory): truncated_filename = new_filename[:max_base_filename_length] hash_suffix = hashlib.md5(new_filename.encode()).hexdigest() new_filename = truncated_filename + '_' + hash_suffix + Path(original_file_path).suffix + newFilenameWithExt = new_filename + Path(original_file_path).suffix + new_file_path = f"{original_parent_directory}{os.sep}{new_filename}{Path(original_file_name).suffix}" + stash.Trace(f"(original_file_name={original_file_name})(new_file_path={new_file_path})") + if original_file_name == newFilenameWithExt or original_file_name == new_filename: + stash.Log(f"Nothing to do, because new file name matches original file name: (newFilenameWithExt={newFilenameWithExt})") + return None + targetDidExist = True if os.path.isfile(new_file_path) else False + try: + if move_files: + if not dry_run: + shutil.move(original_file_path, new_file_path) + exitMsg = f"{dry_run_prefix}Moved file to '{new_file_path}' from '{original_file_path}'" + else: + if not dry_run: + os.rename(original_file_path, new_file_path) + exitMsg = f"{dry_run_prefix}Renamed file to '{new_file_path}' from '{original_file_path}'" + except OSError as e: + exitMsg = f"Failed to move/rename file: From {original_file_path} to {new_file_path}. Error: {e}" + stash.Error(exitMsg) + if not targetDidExist and os.path.isfile(new_file_path): + if os.path.isfile(original_file_path): + os.remove(original_file_path) + pass + else: + raise - if debugTracing: logger.info(f"Debug Tracing (exitMsg={exitMsg})................") - return new_filename, original_path_info, new_path_info + stash.metadata_scan(paths=[original_parent_directory.resolve().as_posix()]) + stash.Log(exitMsg) + return new_filename -# Main default function for rename scene def rename_files_task(): - if debugTracing: logger.info("Debug Tracing................") - # Execute the GraphQL query to fetch all scenes - scene_result = graphql_request(QUERY_ALL_SCENES) - if debugTracing: logger.info("Debug Tracing................") - all_scenes = scene_result.get('data', {}).get('allScenes', []) - if debugTracing: logger.info("Debug Tracing................") + scene_result = stash.get_all_scenes() + all_scenes = scene_result['allScenes'] if not all_scenes: - if debugTracing: logger.info("Debug Tracing................") - log.error("No scenes found.") - logger.error("No scenes found.") + stash.Error("No scenes found.") exit() - if debugTracing: logger.info("Debug Tracing................") - # Find the scene with the latest updated_at timestamp latest_scene = max(all_scenes, key=lambda scene: scene['updated_at']) - # Extract the ID of the latest scene latest_scene_id = latest_scene.get('id') - - # Read stash directory from renamefile_settings.py - stash_directory = config.get('stash_directory', '') - if debugTracing: logger.info("Debug Tracing................") - # Rename the latest scene and trigger metadata scan - new_filename = rename_scene(latest_scene_id, stash_directory) - if debugTracing: logger.info(f"Debug Tracing (exitMsg={exitMsg})................") - + new_filename = rename_scene(latest_scene_id) # Log dry run state and indicate if no changes were made if dry_run: - log.info("Dry run: Script executed in dry run mode. No changes were made.") - logger.info("Dry run: Script executed in dry run mode. No changes were made.") + stash.Log("Dry run: Script executed in dry run mode. No changes were made.") elif not new_filename: - logger.info("No changes were made.") - else: - logger.info(f"{exitMsg}") - return - -def fetch_dup_filename_tags(): # Place holder for new implementation + stash.Log("No changes were made.") return -if PLUGIN_ARGS_MODE == "fetch_dup_filename_tags": - fetch_dup_filename_tags() -elif PLUGIN_ARGS_MODE == "rename_files_task": +if stash.PLUGIN_TASK_NAME == "rename_files_task": rename_files_task() elif inputToUpdateScenePost: rename_files_task() -if debugTracing: logger.info("\n*********************************\nEXITING ***********************\n*********************************") +stash.Trace("\n*********************************\nEXITING ***********************\n*********************************") # ToDo: Wish List - # Add logic to update Sqlite DB on file name change, instead of perform_metadata_scan. # Add code to get tags from duplicate filenames \ No newline at end of file diff --git a/plugins/RenameFile/renamefile.yml b/plugins/RenameFile/renamefile.yml index 20778b34..d2bcf1a3 100644 --- a/plugins/RenameFile/renamefile.yml +++ b/plugins/RenameFile/renamefile.yml @@ -1,6 +1,6 @@ name: RenameFile description: Renames video (scene) file names when the user edits the [Title] field located in the scene [Edit] tab. -version: 0.4.1 +version: 0.4.6 url: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/RenameFile settings: performerAppend: @@ -20,8 +20,8 @@ settings: description: Enable to append performer, tags, studios, & galleries even if name already exists in the original file name. type: BOOLEAN zafileRenameViaMove: - displayName: Rename Using Move - description: Enable to have file moved when renaming file. + displayName: Move Instead of Rename + description: Enable to move file instead of rename file. (Not recommended for Windows OS) type: BOOLEAN zfieldKeyList: displayName: Key Fields diff --git a/plugins/RenameFile/renamefile_settings.py b/plugins/RenameFile/renamefile_settings.py index 24052f8a..a84aef41 100644 --- a/plugins/RenameFile/renamefile_settings.py +++ b/plugins/RenameFile/renamefile_settings.py @@ -37,12 +37,12 @@ "frame_rate": 'FR', "date": '', }, + # Add tags to exclude from RenameFile. + "excludeTags": ["DuplicateMarkForDeletion", "DuplicateMarkForSwap", "DuplicateWhitelistFile","_DuplicateMarkForDeletion","_DuplicateMarkForSwap", "_DuplicateWhitelistFile"], # Add path(s) to exclude from RenameFile. Example Usage: r"/path/to/exclude1" When entering multiple paths, use space. Example: r"/path_1_to/exclude" r"/someOtherPath2Exclude" r"/yetAnotherPath" "pathToExclude": "", # Define a whitelist of allowed tags or EMPTY to allow all tags. Example Usage: "tag1", "tag2", "tag3" "tagWhitelist": "", - # Define whether files should be renamed when moved - "rename_files": True, # Define whether the original file name should be used if title is empty "if_notitle_use_org_filename": True, # Warning: Do not recommend setting this to False. # Current Stash DB schema only allows maximum base file name length to be 255 diff --git a/plugins/RenameFile/requirements.txt b/plugins/RenameFile/requirements.txt index 14af1d68..d4e029a9 100644 --- a/plugins/RenameFile/requirements.txt +++ b/plugins/RenameFile/requirements.txt @@ -1,2 +1,3 @@ -stashapp-tools >= 0.2.49 +stashapp-tools >= 0.2.50 +pyYAML requests \ No newline at end of file