",
+ # HTML report table header
+ "htmlReportTableHeader" : "",
+ # HTML report table data
+ "htmlReportTableData" : " | ",
+ # HTML report video preview
+ "htmlReportVideoPreview" : "width='160' height='120' controls", # Alternative option "autoplay loop controls" or "autoplay controls"
+ # The number off seconds in time difference for supper highlight on htmlReport
+ "htmlHighlightTimeDiff" : 3,
+ # Supper highlight for details with higher resolution or duration
+ "htmlSupperHighlight" : "yellow",
+ # Lower highlight for details with slightly higher duration
+ "htmlLowerHighlight" : "nyanza",
+ # Text color for details with different resolution, duration, size, bitrate,codec, or framerate
+ "htmlDetailDiffTextColor" : "red",
+ # If enabled, create an HTML report when tagging duplicate files
+ "createHtmlReport" : True,
+ # If enabled, report displays stream instead of preview for video
+ "streamOverPreview" : False, # This option works in Chrome, but does not work very well on firefox.
+}
diff --git a/plugins/DupFileManager/ModulesValidate.py b/plugins/DupFileManager/ModulesValidate.py
new file mode 100644
index 00000000..4de2f3a4
--- /dev/null
+++ b/plugins/DupFileManager/ModulesValidate.py
@@ -0,0 +1,126 @@
+# ModulesValidate (By David Maisonave aka Axter)
+# Description:
+# Checks if packages are installed, and optionally install packages if missing.
+# The below example usage code should be plave at the very top of the scource code before any other imports.
+# Example Usage:
+# import ModulesValidate
+# ModulesValidate.modulesInstalled(["watchdog", "schedule", "requests"])
+# Testing:
+# To test, uninstall packages via command line: pip uninstall -y watchdog schedule requests
+import sys, os, pathlib, platform, traceback
+# ToDo: Add logic to optionally pull package requirements from requirements.txt file.
+
+def modulesInstalled(moduleNames, install=True, silent=False):
+ retrnValue = True
+ for moduleName in moduleNames:
+ try: # Try Python 3.3 > way
+ import importlib
+ import importlib.util
+ if moduleName in sys.modules:
+ if not silent: print(f"{moduleName!r} already in sys.modules")
+ elif isModuleInstalled(moduleName):
+ if not silent: print(f"Module {moduleName!r} is available.")
+ else:
+ if install and (results:=installModule(moduleName)) > 0:
+ if results == 1:
+ print(f"Module {moduleName!r} has been installed")
+ else:
+ if not silent: print(f"Module {moduleName!r} is already installed")
+ continue
+ else:
+ if install:
+ print(f"Can't find the {moduleName!r} module")
+ retrnValue = False
+ except Exception as e:
+ try:
+ i = importlib.import_module(moduleName)
+ except ImportError as e:
+ if install and (results:=installModule(moduleName)) > 0:
+ if results == 1:
+ print(f"Module {moduleName!r} has been installed")
+ else:
+ if not silent: print(f"Module {moduleName!r} is already installed")
+ continue
+ else:
+ if install:
+ tb = traceback.format_exc()
+ print(f"Can't find the {moduleName!r} module! Error: {e}\nTraceBack={tb}")
+ retrnValue = False
+ return retrnValue
+
+def isModuleInstalled(moduleName):
+ try:
+ __import__(moduleName)
+ return True
+ except Exception as e:
+ pass
+ return False
+
+def installModule(moduleName):
+ try:
+ if isLinux():
+ # Note: Linux may first need : sudo apt install python3-pip
+ # if error starts with "Command 'pip' not found"
+ # or includes "No module named pip"
+ results = os.popen(f"pip --disable-pip-version-check --version").read()
+ if results.find("Command 'pip' not found") != -1 or results.find("No module named pip") != -1:
+ results = os.popen(f"sudo apt install python3-pip").read()
+ results = os.popen(f"pip --disable-pip-version-check --version").read()
+ if results.find("Command 'pip' not found") != -1 or results.find("No module named pip") != -1:
+ return -1
+ if isFreeBSD():
+ print("Warning: installModule may NOT work on freebsd")
+ pipArg = " --disable-pip-version-check"
+ if isDocker():
+ pipArg += " --break-system-packages"
+ results = os.popen(f"{sys.executable} -m pip install {moduleName}{pipArg}").read() # May need to be f"{sys.executable} -m pip install {moduleName}"
+ results = results.strip("\n")
+ if results.find("Requirement already satisfied:") > -1:
+ return 2
+ elif results.find("Successfully installed") > -1:
+ return 1
+ elif modulesInstalled(moduleNames=[moduleName], install=False):
+ return 1
+ except Exception as e:
+ pass
+ return 0
+
+def installPackage(package): # Should delete this. It doesn't work consistently
+ try:
+ import pip
+ if hasattr(pip, 'main'):
+ pip.main(['install', package])
+ else:
+ pip._internal.main(['install', package])
+ except Exception as e:
+ return False
+ return True
+
+def isDocker():
+ cgroup = pathlib.Path('/proc/self/cgroup')
+ return pathlib.Path('/.dockerenv').is_file() or cgroup.is_file() and 'docker' in cgroup.read_text()
+
+def isWindows():
+ if any(platform.win32_ver()):
+ return True
+ return False
+
+def isLinux():
+ if platform.system().lower().startswith("linux"):
+ return True
+ return False
+
+def isFreeBSD():
+ if platform.system().lower().startswith("freebsd"):
+ return True
+ return False
+
+def isMacOS():
+ if sys.platform == "darwin":
+ return True
+ return False
+
+def isWindows():
+ if any(platform.win32_ver()):
+ return True
+ return False
diff --git a/plugins/DupFileManager/README.md b/plugins/DupFileManager/README.md
index 7d0cf052..4e76a7f0 100644
--- a/plugins/DupFileManager/README.md
+++ b/plugins/DupFileManager/README.md
@@ -1,11 +1,40 @@
-# DupFileManager: Ver 0.1.2 (By David Maisonave)
+# DupFileManager: Ver 0.1.9 (By David Maisonave)
-DupFileManager is a [Stash](https://github.com/stashapp/stash) plugin which manages duplicate file in the Stash system.
+DupFileManager is a [Stash](https://github.com/stashapp/stash) plugin which manages duplicate files in the Stash system.
+It has both **task** and **tools-UI** components.
### Features
+- Creates a duplicate file report which can be accessed from the settings->tools menu options.The report is created as an HTML file and stored in local path under plugins\DupFileManager\report\DuplicateTagScenes.html.
+ - See screenshot at the bottom of this page for example report.
+ - Items on the left side of the report are the primary duplicates designated for deletion. By default, these duplicates are given a special _duplicate tag.
+ - Items on the right side of the report are designated as primary duplicates to keep. They usually have higher resolution, duration and/or preferred paths.
+ - The report has the following options:
+ - Delete: Delete file and remove from Stash library.
+ - Remove: Remove from Stash library.
+ - Rename: Rename file.
+ - Copy: Copy file from left (source) to right (to-keep).
+ - Move: Copy file and metadata left to right.
+ - Cpy-Name: Copy file name left to right.
+ - Add-Exclude: Add exclude tag to scene,so that scene is excluded from deletion.
+ - Remove-Tag: Remove duplicate tag from scene.
+ - Flag-Scene: Flag (mark) scene in report as reviewed (or as requiring further review). Optional flags (yellow, green, orange, cyan, pink, red, strike-through, & disable-scene)
+ - Merge: Copy Metadata (tags, performers,& studios) from left to right.
- Can merge potential source in the duplicate file names for tag names, performers, and studios.
- Normally when Stash searches the file name for tag names, performers, and studios, it only does so using the primary file.
+- Advance menu (for specially tagged duplicates)
+ ![Screenshot 2024-11-22 145139](https://github.com/user-attachments/assets/d76646f0-c5a8-4069-ad0f-a6e5e96e7ed0)
+ - Delete only specially tagged duplicates in blacklist path.
+ - Delete duplicates with specified file path.
+ - Delete duplicates with specific string in File name.
+ - Delete duplicates with specified file size range.
+ - Delete with specified duration range.
+ - Delete with resolution range.
+ - Delete duplicates having specified tags.
+ - Delete duplicates with specified rating.
+ - Delete duplicates with any of the above combinations.
+- Bottom extended portion of the Advanced Menu screen.
+ - ![Screenshot 2024-11-22 232005](https://github.com/user-attachments/assets/9a0d2e9d-783b-4ea2-8fa5-3805b40af4eb)
- Delete duplicate file task with the following options:
- Tasks (Settings->Task->[Plugin Tasks]->DupFileManager)
- **Tag Duplicates** - Set tag DuplicateMarkForDeletion to the duplicates with lower resolution, duration, file name length, and/or black list path.
@@ -28,12 +57,14 @@ DupFileManager is a [Stash](https://github.com/stashapp/stash) plugin which mana
- **dup_path** - Alternate path to move deleted files to. Example: "C:\TempDeleteFolder"
- **toRecycleBeforeSwap** - When enabled, moves destination file to recycle bin before swapping files.
- **addPrimaryDupPathToDetails** - If enabled, adds the primary duplicate path to the scene detail.
-
+- Tools UI Menu
+![Screenshot 2024-11-22 145512](https://github.com/user-attachments/assets/03e166eb-ddaa-4eb8-8160-4c9180ca1323)
+ - Can access either **Duplicate File Report (DupFileManager)** or **DupFileManager Tools and Utilities** menu options.
### Requirements
-`pip install --upgrade stashapp-tools`
-`pip install pyYAML`
-`pip install Send2Trash`
+- `pip install --upgrade stashapp-tools`
+- `pip install requests`
+- `pip install Send2Trash`
### Installation
@@ -48,3 +79,33 @@ That's it!!!
- Options are accessible in the GUI via Settings->Plugins->Plugins->[DupFileManager].
- More options available in DupFileManager_config.py.
+
+### Screenshots
+
+- Example DupFileManager duplicate report. (file names have been edited to PG).
+ - The report displays preview videos that are playable. Will play a few seconds sample of the video. This requires scan setting **[Generate animated image previews]** to be enabled when scanning all files.
+ - ![Screenshot 2024-11-22 225359](https://github.com/user-attachments/assets/dc705b24-e2d7-4663-92fd-1516aa7aacf5)
+ - If there's a scene on the left side that has a higher resolution or duration, it gets a yellow highlight on the report.
+ - There's an optional setting that allows both preview videos and preview images to be displayed on the report. See settings **htmlIncludeImagePreview** in the **DupFileManager_report_config.py** file.
+ - There are many more options available for how the report is created. These options are targeted for more advanced users. The options are all available in the **DupFileManager_report_config.py** file, and the settings have commented descriptions preceeding them. See the **DupFileManager_report_config.py** file in the DupFileManager plugin folder for more details.
+- Tools UI Menu
+![Screenshot 2024-11-22 145512](https://github.com/user-attachments/assets/03e166eb-ddaa-4eb8-8160-4c9180ca1323)
+ - Can access either **Duplicate File Report (DupFileManager)** or **DupFileManager Tools and Utilities** menu options.
+- DupFileManager Report Menu
+ - ![Screenshot 2024-11-22 151630](https://github.com/user-attachments/assets/834ee60f-1a4a-4a3e-bbf7-23aeca2bda1f)
+- DupFileManager Tools and Utilities
+ - ![Screenshot 2024-11-22 152023](https://github.com/user-attachments/assets/4daaea9e-f603-4619-b536-e6609135bab1)
+- Full bottom extended portion of the Advanced Menu screen.
+ - ![Screenshot 2024-11-22 232208](https://github.com/user-attachments/assets/bf1f3021-3a8c-4875-9737-60ee3d7fe675)
+
+### Future Planned Features
+- Currently, the report and advanced menu do not work with Stash settings requiring a password. Additional logic will be added to have them use the API Key. Planned for 1.0.0 Version.
+- Add an advanced menu that will work with non-tagged reports. It will iterated through the existing report file(s) to aplly deletions, instead of searching Stash DB for tagged files. Planned for 1.1.0 Version.
+- Greylist deletion option will be added to the advanced menu. Planned for 1.0.5 Version.
+- Add advanced menu directly to the Settings->Tools menu. Planned for 1.5.0 Version.
+- Add report directly to the Settings->Tools menu. Planned for 1.5.0 Version.
+- Remove all flags from all scenes option. Planned for 1.0.5 Version.
+- Transfer option settings **[Disable Complete Confirmation]** and **[Disable Delete Confirmation]** when paginating. Planned for 1.0.5 Version.
+
+
+
diff --git a/plugins/DupFileManager/StashPluginHelper.py b/plugins/DupFileManager/StashPluginHelper.py
index 6f0d3d15..a9be414e 100644
--- a/plugins/DupFileManager/StashPluginHelper.py
+++ b/plugins/DupFileManager/StashPluginHelper.py
@@ -1,12 +1,3 @@
-from stashapi.stashapp import StashInterface
-from logging.handlers import RotatingFileHandler
-import re, inspect, sys, os, pathlib, logging, json
-import concurrent.futures
-from stashapi.stash_types import PhashDistance
-import __main__
-
-_ARGUMENT_UNSPECIFIED_ = "_ARGUMENT_UNSPECIFIED_"
-
# StashPluginHelper (By David Maisonave aka Axter)
# See end of this file for example usage
# Log Features:
@@ -24,6 +15,14 @@
# Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file
# Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments
# Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ
+from stashapi.stashapp import StashInterface
+from logging.handlers import RotatingFileHandler
+import re, inspect, sys, os, pathlib, logging, json, platform, subprocess, traceback, time
+import concurrent.futures
+from stashapi.stash_types import PhashDistance
+from enum import Enum, IntEnum
+import __main__
+
class StashPluginHelper(StashInterface):
# Primary Members for external reference
PLUGIN_TASK_NAME = None
@@ -45,15 +44,44 @@ class StashPluginHelper(StashInterface):
API_KEY = None
excludeMergeTags = None
+ # class EnumInt(IntEnum):
+ # def __repr__(self) -> str:
+ # return f"{self.__class__.__name__}.{self.name}"
+ # def __str__(self) -> str:
+ # return str(self.value)
+ # def serialize(self):
+ # return self.value
+
+ class EnumValue(Enum):
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}.{self.name}"
+ def __str__(self) -> str:
+ return str(self.value)
+ def __add__(self, other):
+ return self.value + other.value
+ def serialize(self):
+ return self.value
+
# printTo argument
- LOG_TO_FILE = 1
- LOG_TO_CONSOLE = 2 # Note: Only see output when running in command line mode. In plugin mode, this output is lost.
- LOG_TO_STDERR = 4 # Note: In plugin mode, output to StdErr ALWAYS gets sent to stash logging as an error.
- LOG_TO_STASH = 8
- LOG_TO_WARN = 16
- LOG_TO_ERROR = 32
- LOG_TO_CRITICAL = 64
- LOG_TO_ALL = LOG_TO_FILE + LOG_TO_CONSOLE + LOG_TO_STDERR + LOG_TO_STASH
+ class LogTo(IntEnum):
+ FILE = 1
+ CONSOLE = 2 # Note: Only see output when running in command line mode. In plugin mode, this output is lost.
+ STDERR = 4 # Note: In plugin mode, output to StdErr ALWAYS gets sent to stash logging as an error.
+ STASH = 8
+ WARN = 16
+ ERROR = 32
+ CRITICAL = 64
+ ALL = FILE + CONSOLE + STDERR + STASH
+
+ class DbgLevel(IntEnum):
+ TRACE = 1
+ DBG = 2
+ INF = 3
+ WRN = 4
+ ERR = 5
+ CRITICAL = 6
+
+ DBG_LEVEL = DbgLevel.INF
# Misc class variables
MAIN_SCRIPT_NAME = None
@@ -61,6 +89,25 @@ class StashPluginHelper(StashInterface):
LOG_FILE_DIR = None
LOG_FILE_NAME = None
STDIN_READ = None
+ stopProcessBarSpin = True
+ updateProgressbarOnIter = 0
+ currentProgressbarIteration = 0
+
+ class OS_Type(IntEnum):
+ WINDOWS = 1
+ LINUX = 2
+ MAC_OS = 3
+ FREEBSD = 4
+ UNKNOWN_OS = 5
+
+ OS_TYPE = OS_Type.UNKNOWN_OS
+
+ IS_DOCKER = False
+ IS_WINDOWS = False
+ IS_LINUX = False
+ IS_FREEBSD = False
+ IS_MAC_OS = False
+
pluginLog = None
logLinePreviousHits = []
thredPool = None
@@ -68,45 +115,76 @@ class StashPluginHelper(StashInterface):
_mergeMetadata = None
encodeToUtf8 = False
convertToAscii = False # If set True, it takes precedence over encodeToUtf8
+ progressBarIsEnabled = True
# Prefix message value
- LEV_TRACE = "TRACE: "
- LEV_DBG = "DBG: "
- LEV_INF = "INF: "
- LEV_WRN = "WRN: "
- LEV_ERR = "ERR: "
- LEV_CRITICAL = "CRITICAL: "
-
- # Default format
- LOG_FORMAT = "[%(asctime)s] %(message)s"
+ class Level(EnumValue):
+ TRACE = "TRACE: "
+ DBG = "DBG: "
+ INF = "INF: "
+ WRN = "WRN: "
+ ERR = "ERR: "
+ CRITICAL = "CRITICAL: "
+ class Constant(EnumValue):
+ # Default format
+ LOG_FORMAT = "[%(asctime)s] %(message)s"
+ ARGUMENT_UNSPECIFIED = "_ARGUMENT_UNSPECIFIED_"
+ NOT_IN_LIST = 2147483646
+
# Externally modifiable variables
- log_to_err_set = LOG_TO_FILE + LOG_TO_STDERR # This can be changed by the calling source in order to customize what targets get error messages
- log_to_norm = LOG_TO_FILE + LOG_TO_CONSOLE # Can be change so-as to set target output for normal logging
+ log_to_err_set = LogTo.FILE + LogTo.STDERR # This can be changed by the calling source in order to customize what targets get error messages
+ log_to_norm = LogTo.FILE + LogTo.CONSOLE # Can be change so-as to set target output for normal logging
# Warn message goes to both plugin log file and stash when sent to Stash log file.
- log_to_wrn_set = LOG_TO_STASH # This can be changed by the calling source in order to customize what targets get warning messages
+ log_to_wrn_set = LogTo.STASH # This can be changed by the calling source in order to customize what targets get warning messages
def __init__(self,
- debugTracing = None, # Set debugTracing to True so as to output debug and trace logging
- logFormat = LOG_FORMAT, # Plugin log line format
- dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file
- maxbytes = 8*1024*1024, # Max size of plugin log file
- backupcount = 2, # Backup counts when log file size reaches max size
- logToWrnSet = 0, # Customize the target output set which will get warning logging
- logToErrSet = 0, # Customize the target output set which will get error logging
- logToNormSet = 0, # Customize the target output set which will get normal logging
- logFilePath = "", # Plugin log file. If empty, the log file name will be set based on current python file name and path
- mainScriptName = "", # The main plugin script file name (full path)
- pluginID = "",
- settings = None, # Default settings for UI fields
- config = None, # From pluginName_config.py or pluginName_setting.py
- fragmentServer = None,
- stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999
- apiKey = None, # API Key only needed when username and password set while running script via command line
+ debugTracing = None, # Set debugTracing to True so as to output debug and trace logging
+ logFormat = Constant.LOG_FORMAT.value, # Plugin log line format
+ dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file
+ maxbytes = 8*1024*1024, # Max size of plugin log file
+ backupcount = 2, # Backup counts when log file size reaches max size
+ logToWrnSet = 0, # Customize the target output set which will get warning logging
+ logToErrSet = 0, # Customize the target output set which will get error logging
+ logToNormSet = 0, # Customize the target output set which will get normal logging
+ logFilePath = "", # Plugin log file. If empty, the log file name will be set based on current python file name and path
+ mainScriptName = "", # The main plugin script file name (full path)
+ pluginID = "",
+ settings = None, # Default settings for UI fields
+ config = None, # From pluginName_config.py or pluginName_setting.py
+ fragmentServer = None,
+ stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999
+ apiKey = None, # API Key only needed when username and password set while running script via command line
DebugTraceFieldName = "zzdebugTracing",
+ DebugFieldName = "zzDebug",
DryRunFieldName = "zzdryRun",
- setStashLoggerAsPluginLogger = False):
+ setStashLoggerAsPluginLogger = False,
+ DBG_LEVEL = DbgLevel.INF):
+ if DBG_LEVEL in list(self.DbgLevel):
+ self.DBG_LEVEL = DBG_LEVEL
+ if debugTracing:
+ self.DEBUG_TRACING = debugTracing
+ if self.DBG_LEVEL > self.DbgLevel.DBG:
+ self.DBG_LEVEL = self.DbgLevel.TRACE
+ elif self.DBG_LEVEL < self.DbgLevel.INF:
+ self.DEBUG_TRACING = True
self.thredPool = concurrent.futures.ThreadPoolExecutor(max_workers=2)
+ if self.isWindows():
+ self.IS_WINDOWS = True
+ self.OS_TYPE = self.OS_Type.WINDOWS
+ elif self.isLinux():
+ self.IS_LINUX = True
+ self.OS_TYPE = self.OS_Type.LINUX
+ if self.isDocker():
+ self.IS_DOCKER = True
+ elif self.isFreeBSD():
+ self.IS_FREEBSD = True
+ self.OS_TYPE = self.OS_Type.FREEBSD
+ if self.isDocker():
+ self.IS_DOCKER = True
+ elif self.isMacOS():
+ self.IS_MAC_OS = True
+ self.OS_TYPE = self.OS_Type.MAC_OS
if logToWrnSet: self.log_to_wrn_set = logToWrnSet
if logToErrSet: self.log_to_err_set = logToErrSet
if logToNormSet: self.log_to_norm = logToNormSet
@@ -129,7 +207,6 @@ def __init__(self,
else:
self.FRAGMENT_SERVER = {'Scheme': 'http', 'Host': '0.0.0.0', 'Port': '9999', 'SessionCookie': {'Name': 'session', 'Value': '', 'Path': '', 'Domain': '', 'Expires': '0001-01-01T00:00:00Z', 'RawExpires': '', 'MaxAge': 0, 'Secure': False, 'HttpOnly': False, 'SameSite': 0, 'Raw': '', 'Unparsed': None}, 'Dir': os.path.dirname(pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent), 'PluginDir': pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}
- if debugTracing: self.DEBUG_TRACING = debugTracing
if config:
self.pluginConfig = config
if self.Setting('apiKey', "") != "":
@@ -191,8 +268,14 @@ def __init__(self,
self.API_KEY = self.STASH_CONFIGURATION['apiKey']
self.DRY_RUN = self.Setting(DryRunFieldName, self.DRY_RUN)
- self.DEBUG_TRACING = self.Setting(DebugTraceFieldName, self.DEBUG_TRACING)
- if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG
+ if self.Setting(DebugTraceFieldName, self.DEBUG_TRACING):
+ self.DEBUG_TRACING = True
+ self.LOG_LEVEL = logging.TRACE
+ self.DBG_LEVEL = self.DbgLevel.TRACE
+ elif self.Setting(DebugFieldName, self.DEBUG_TRACING):
+ self.DEBUG_TRACING = True
+ self.LOG_LEVEL = logging.DEBUG
+ self.DBG_LEVEL = self.DbgLevel.DBG
logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH])
self.pluginLog = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem)
@@ -202,74 +285,104 @@ def __init__(self,
def __del__(self):
self.thredPool.shutdown(wait=False)
- def Setting(self, name, default=_ARGUMENT_UNSPECIFIED_, raiseEx=True, notEmpty=False):
+ def Setting(self, name, default=Constant.ARGUMENT_UNSPECIFIED.value, raiseEx=True, notEmpty=False):
if self.pluginSettings != None and name in self.pluginSettings:
if notEmpty == False or self.pluginSettings[name] != "":
return self.pluginSettings[name]
if self.pluginConfig != None and name in self.pluginConfig:
if notEmpty == False or self.pluginConfig[name] != "":
return self.pluginConfig[name]
- if default == _ARGUMENT_UNSPECIFIED_ and raiseEx:
+ if default == self.Constant.ARGUMENT_UNSPECIFIED.value and raiseEx:
raise Exception(f"Missing {name} from both UI settings and config file settings.")
return default
- def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False, toAscii = None):
- if toAscii or (toAscii == None and (self.encodeToUtf8 or self.convertToAscii)):
- logMsg = self.asc2(logMsg)
- else:
- logMsg = logMsg
- if printTo == 0:
- printTo = self.log_to_norm
- elif printTo == self.LOG_TO_ERROR and logLevel == logging.INFO:
- logLevel = logging.ERROR
- printTo = self.log_to_err_set
- elif printTo == self.LOG_TO_CRITICAL and logLevel == logging.INFO:
- logLevel = logging.CRITICAL
- printTo = self.log_to_err_set
- elif printTo == self.LOG_TO_WARN and logLevel == logging.INFO:
- logLevel = logging.WARN
- printTo = self.log_to_wrn_set
+ def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False, toAscii = None, printLogException = False):
+ try:
+ if toAscii or (toAscii == None and (self.encodeToUtf8 or self.convertToAscii)):
+ logMsg = self.asc2(logMsg)
+ else:
+ logMsg = logMsg
+ if printTo == 0:
+ printTo = self.log_to_norm
+ elif printTo == self.LogTo.ERROR and logLevel == logging.INFO:
+ logLevel = logging.ERROR
+ printTo = self.log_to_err_set
+ elif printTo == self.LogTo.CRITICAL and logLevel == logging.INFO:
+ logLevel = logging.CRITICAL
+ printTo = self.log_to_err_set
+ elif printTo == self.LogTo.WARN and logLevel == logging.INFO:
+ logLevel = logging.WARN
+ printTo = self.log_to_wrn_set
+ if lineNo == -1:
+ lineNo = inspect.currentframe().f_back.f_lineno
+ LN_Str = f"[LN:{lineNo}]"
+ # print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}")
+ if logLevel == logging.TRACE and (logAlways == False or self.LOG_LEVEL == logging.TRACE):
+ if levelStr == "": levelStr = self.Level.DBG
+ if printTo & self.LogTo.FILE: self.pluginLog.trace(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.trace(f"{LN_Str} {levelStr}{logMsg}")
+ elif logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG or self.LOG_LEVEL == logging.TRACE):
+ if levelStr == "": levelStr = self.Level.DBG
+ if printTo & self.LogTo.FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}")
+ elif logLevel == logging.INFO or logLevel == logging.DEBUG:
+ if levelStr == "": levelStr = self.Level.INF if logLevel == logging.INFO else self.Level.DBG
+ if printTo & self.LogTo.FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}")
+ elif logLevel == logging.WARN:
+ if levelStr == "": levelStr = self.Level.WRN
+ if printTo & self.LogTo.FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}")
+ elif logLevel == logging.ERROR:
+ if levelStr == "": levelStr = self.Level.ERR
+ if printTo & self.LogTo.FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
+ elif logLevel == logging.CRITICAL:
+ if levelStr == "": levelStr = self.Level.CRITICAL
+ if printTo & self.LogTo.FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}")
+ if printTo & self.LogTo.STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
+ if (printTo & self.LogTo.CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
+ print(f"{LN_Str} {levelStr}{logMsg}")
+ if (printTo & self.LogTo.STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
+ print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr)
+ except Exception as e:
+ if printLogException:
+ tb = traceback.format_exc()
+ print(f"Exception calling [Log]; Error: {e}\nTraceBack={tb}")
+ pass
+
+ def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None):
+ if printTo == 0: printTo = self.LogTo.FILE
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
- LN_Str = f"[LN:{lineNo}]"
- # print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}")
- if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG):
- if levelStr == "": levelStr = self.LEV_DBG
- if printTo & self.LOG_TO_FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}")
- if printTo & self.LOG_TO_STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}")
- elif logLevel == logging.INFO or logLevel == logging.DEBUG:
- if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG
- if printTo & self.LOG_TO_FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}")
- if printTo & self.LOG_TO_STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}")
- elif logLevel == logging.WARN:
- if levelStr == "": levelStr = self.LEV_WRN
- if printTo & self.LOG_TO_FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}")
- if printTo & self.LOG_TO_STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}")
- elif logLevel == logging.ERROR:
- if levelStr == "": levelStr = self.LEV_ERR
- if printTo & self.LOG_TO_FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}")
- if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
- elif logLevel == logging.CRITICAL:
- if levelStr == "": levelStr = self.LEV_CRITICAL
- if printTo & self.LOG_TO_FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}")
- if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
- if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
- print(f"{LN_Str} {levelStr}{logMsg}")
- if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
- print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr)
+ logLev = logging.INFO if logAlways else logging.TRACE
+ if self.DBG_LEVEL == self.DbgLevel.TRACE or logAlways:
+ if logMsg == "":
+ logMsg = f"Line number {lineNo}..."
+ self.Log(logMsg, printTo, logLev, lineNo, self.Level.TRACE, logAlways, toAscii=toAscii)
- def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None):
- if printTo == 0: printTo = self.LOG_TO_FILE
+ # Log once per session. Only logs the first time called from a particular line number in the code.
+ def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None):
+ lineNo = inspect.currentframe().f_back.f_lineno
+ if self.DBG_LEVEL == self.DbgLevel.TRACE or logAlways:
+ FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}"
+ if FuncAndLineNo in self.logLinePreviousHits:
+ return
+ self.logLinePreviousHits.append(FuncAndLineNo)
+ self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii)
+
+ def Debug(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1, toAscii = None):
+ if printTo == 0: printTo = self.LogTo.FILE
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
logLev = logging.INFO if logAlways else logging.DEBUG
if self.DEBUG_TRACING or logAlways:
if logMsg == "":
logMsg = f"Line number {lineNo}..."
- self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways, toAscii=toAscii)
+ self.Log(logMsg, printTo, logLev, lineNo, self.Level.DBG, logAlways, toAscii=toAscii)
# Log once per session. Only logs the first time called from a particular line number in the code.
- def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None):
+ def DebugOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None):
lineNo = inspect.currentframe().f_back.f_lineno
if self.DEBUG_TRACING or logAlways:
FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}"
@@ -279,8 +392,8 @@ def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False, toAscii = None)
self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii)
# Log INFO on first call, then do Trace on remaining calls.
- def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True, toAscii = None):
- if printTo == 0: printTo = self.LOG_TO_FILE
+ def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True, toAscii = None, printLogException = False):
+ if printTo == 0: printTo = self.LogTo.FILE
lineNo = inspect.currentframe().f_back.f_lineno
FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}"
if FuncAndLineNo in self.logLinePreviousHits:
@@ -288,49 +401,97 @@ def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingC
self.Trace(logMsg, printTo, logAlways, lineNo, toAscii=toAscii)
else:
self.logLinePreviousHits.append(FuncAndLineNo)
- self.Log(logMsg, printTo, logging.INFO, lineNo, toAscii=toAscii)
+ self.Log(logMsg, printTo, logging.INFO, lineNo, toAscii=toAscii, printLogException=printLogException)
- def Warn(self, logMsg, printTo = 0, toAscii = None):
+ def Warn(self, logMsg, printTo = 0, toAscii = None, printLogException = False):
if printTo == 0: printTo = self.log_to_wrn_set
lineNo = inspect.currentframe().f_back.f_lineno
- self.Log(logMsg, printTo, logging.WARN, lineNo, toAscii=toAscii)
+ self.Log(logMsg, printTo, logging.WARN, lineNo, toAscii=toAscii, printLogException=printLogException)
- def Error(self, logMsg, printTo = 0, toAscii = None):
+ def Error(self, logMsg, printTo = 0, toAscii = None, printLogException = False):
if printTo == 0: printTo = self.log_to_err_set
lineNo = inspect.currentframe().f_back.f_lineno
- self.Log(logMsg, printTo, logging.ERROR, lineNo, toAscii=toAscii)
+ self.Log(logMsg, printTo, logging.ERROR, lineNo, toAscii=toAscii, printLogException=printLogException)
- def Status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1):
+ # Above logging functions all use UpperCamelCase naming convention to avoid conflict with parent class logging function names.
+ # The below non-loggging functions use (lower) camelCase naming convention.
+ def status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1):
if printTo == 0: printTo = self.log_to_norm
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
self.Log(f"StashPluginHelper Status: (CALLED_AS_STASH_PLUGIN={self.CALLED_AS_STASH_PLUGIN}), (RUNNING_IN_COMMAND_LINE_MODE={self.RUNNING_IN_COMMAND_LINE_MODE}), (DEBUG_TRACING={self.DEBUG_TRACING}), (DRY_RUN={self.DRY_RUN}), (PLUGIN_ID={self.PLUGIN_ID}), (PLUGIN_TASK_NAME={self.PLUGIN_TASK_NAME}), (STASH_URL={self.STASH_URL}), (MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME})",
printTo, logLevel, lineNo)
- def ExecuteProcess(self, args, ExecDetach=False):
- import platform, subprocess
- is_windows = any(platform.win32_ver())
+ # Replaces obsolete UI settings variable with new name. Only use this with strings and numbers.
+ # Example usage:
+ # obsoleteSettingsToConvert = {"OldVariableName" : "NewVariableName", "AnotherOldVarName" : "NewName2"}
+ # stash.replaceObsoleteSettings(obsoleteSettingsToConvert, "ObsoleteSettingsCheckVer2")
+ def replaceObsoleteSettings(self, settingSet:dict, SettingToCheckFirst="", init_defaults=False):
+ if SettingToCheckFirst == "" or self.Setting(SettingToCheckFirst) == False:
+ for key in settingSet:
+ obsoleteVar = self.Setting(key)
+ if isinstance(obsoleteVar, bool):
+ if obsoleteVar:
+ if self.Setting(settingSet[key]) == False:
+ self.Log(f"Detected obsolete (bool) settings ({key}). Moving obsolete settings to new setting name {settingSet[key]}.")
+ results = self.configure_plugin(self.PLUGIN_ID, {settingSet[key]:self.Setting(key), key : False}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ else:
+ self.Log(f"Detected obsolete (bool) settings ({key}), and deleting it's content because new setting name ({settingSet[key]}) is already populated.")
+ results = self.configure_plugin(self.PLUGIN_ID, {key : False}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ elif isinstance(obsoleteVar, int): # Both int and bool type returns true here
+ if obsoleteVar > 0:
+ if self.Setting(settingSet[key]) > 0:
+ self.Log(f"Detected obsolete (int) settings ({key}), and deleting it's content because new setting name ({settingSet[key]}) is already populated.")
+ results = self.configure_plugin(self.PLUGIN_ID, {key : 0}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ else:
+ self.Log(f"Detected obsolete (int) settings ({key}). Moving obsolete settings to new setting name {settingSet[key]}.")
+ results = self.configure_plugin(self.PLUGIN_ID, {settingSet[key]:self.Setting(key), key : 0}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ elif obsoleteVar != "":
+ if self.Setting(settingSet[key]) == "":
+ self.Log(f"Detected obsolete (str) settings ({key}). Moving obsolete settings to new setting name {settingSet[key]}.")
+ results = self.configure_plugin(self.PLUGIN_ID, {settingSet[key]:self.Setting(key), key : ""}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ else:
+ self.Log(f"Detected obsolete (str) settings ({key}), and deleting it's content because new setting name ({settingSet[key]}) is already populated.")
+ results = self.configure_plugin(self.PLUGIN_ID, {key : ""}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+ if SettingToCheckFirst != "":
+ results = self.configure_plugin(self.PLUGIN_ID, {SettingToCheckFirst : True}, init_defaults)
+ self.Debug(f"configure_plugin = {results}")
+
+
+ def executeProcess(self, args, ExecDetach=False):
pid = None
- self.Trace(f"is_windows={is_windows} args={args}")
- if is_windows:
+ self.Trace(f"self.IS_WINDOWS={self.IS_WINDOWS} args={args}")
+ if self.IS_WINDOWS:
if ExecDetach:
- self.Trace("Executing process using Windows DETACHED_PROCESS")
+ self.Trace(f"Executing process using Windows DETACHED_PROCESS; args=({args})")
DETACHED_PROCESS = 0x00000008
pid = subprocess.Popen(args,creationflags=DETACHED_PROCESS, shell=True).pid
else:
pid = subprocess.Popen(args, shell=True).pid
else:
- self.Trace("Executing process using normal Popen")
- pid = subprocess.Popen(args).pid
+ if ExecDetach:
+ # For linux detached, use nohup. I.E. subprocess.Popen(["nohup", "python", "test.py"])
+ if self.IS_LINUX:
+ args = ["nohup"] + args
+ self.Trace(f"Executing detached process using Popen({args})")
+ else:
+ self.Trace(f"Executing process using normal Popen({args})")
+ pid = subprocess.Popen(args).pid # On detach, may need the following for MAC OS subprocess.Popen(args, shell=True, start_new_session=True)
self.Trace(f"pid={pid}")
return pid
- def ExecutePythonScript(self, args, ExecDetach=True):
+ def executePythonScript(self, args, ExecDetach=True):
PythonExe = f"{sys.executable}"
argsWithPython = [f"{PythonExe}"] + args
- return self.ExecuteProcess(argsWithPython,ExecDetach=ExecDetach)
+ return self.executeProcess(argsWithPython,ExecDetach=ExecDetach)
- def Submit(self, *args, **kwargs):
+ def submit(self, *args, **kwargs):
return self.thredPool.submit(*args, **kwargs)
def asc2(self, data, convertToAscii=None):
@@ -340,24 +501,282 @@ def asc2(self, data, convertToAscii=None):
# data = str(data).encode('ascii','ignore') # This works better for logging than ascii function
# return str(data)[2:-1] # strip out b'str'
- def init_mergeMetadata(self, excludeMergeTags=None):
+ def initMergeMetadata(self, excludeMergeTags=None):
self.excludeMergeTags = excludeMergeTags
self._mergeMetadata = mergeMetadata(self, self.excludeMergeTags)
- # Must call init_mergeMetadata, before calling merge_metadata
- def merge_metadata(self, SrcData, DestData): # Input arguments can be scene ID or scene metadata
- if type(SrcData) is int:
- SrcData = self.find_scene(SrcData)
- DestData = self.find_scene(DestData)
- return self._mergeMetadata.merge(SrcData, DestData)
+ def mergeMetadata(self, SrcData, DestData, retryCount = 12, sleepSecondsBetweenRetry = 5, excludeMergeTags=None): # Input arguments can be scene ID or scene metadata
+ import requests
+ if self._mergeMetadata == None:
+ self.initMergeMetadata(excludeMergeTags)
+ errMsg = None
+ for i in range(0, retryCount):
+ try:
+ if errMsg != None:
+ self.Warn(errMsg)
+ if type(SrcData) is int:
+ SrcData = self.find_scene(SrcData)
+ DestData = self.find_scene(DestData)
+ return self._mergeMetadata.merge(SrcData, DestData)
+ except (requests.exceptions.ConnectionError, ConnectionResetError):
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [mergeMetadata]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ except Exception as e:
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [mergeMetadata]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ time.sleep(sleepSecondsBetweenRetry)
+
+ def getUpdateProgressBarIter(self, qtyResults):
+ if qtyResults > 40000:
+ return 100
+ if qtyResults > 20000:
+ return 80
+ if qtyResults > 10000:
+ return 40
+ if qtyResults > 5000:
+ return 20
+ if qtyResults > 2000:
+ return 10
+ if qtyResults > 1000:
+ return 5
+ if qtyResults > 500:
+ return 3
+ if qtyResults > 200:
+ return 2
+ return 1
+
+ def enableProgressBar(self, enable=True):
+ self.progressBarIsEnabled = enable
+
+ # Use setProgressBarIter to reduce traffic to the server by only updating the progressBar every X(updateProgressbarOnIter) iteration.
+ def setProgressBarIter(self, qtyResults):
+ if self.progressBarIsEnabled:
+ self.updateProgressbarOnIter = self.getUpdateProgressBarIter(qtyResults)
+ self.currentProgressbarIteration = 0
+
+ def progressBar(self, currentIndex, maxCount):
+ if self.progressBarIsEnabled:
+ if self.updateProgressbarOnIter > 0:
+ self.currentProgressbarIteration+=1
+ if self.currentProgressbarIteration > self.updateProgressbarOnIter:
+ self.currentProgressbarIteration = 0
+ else:
+ return
+ progress = (currentIndex / maxCount) if currentIndex < maxCount else (maxCount / currentIndex)
+ try:
+ self.log.progress(progress)
+ except Exception as e:
+ pass
+
+ def isDocker(self):
+ cgroup = pathlib.Path('/proc/self/cgroup')
+ return pathlib.Path('/.dockerenv').is_file() or cgroup.is_file() and 'docker' in cgroup.read_text()
+
+ def isWindows(self):
+ if any(platform.win32_ver()):
+ return True
+ return False
+
+ def isLinux(self):
+ if platform.system().lower().startswith("linux"):
+ return True
+ return False
+
+ def isFreeBSD(self):
+ if platform.system().lower().startswith("freebsd"):
+ return True
+ return False
+
+ def isMacOS(self):
+ if sys.platform == "darwin":
+ return True
+ return False
- def Progress(self, currentIndex, maxCount):
- progress = (currentIndex / maxCount) if currentIndex < maxCount else (maxCount / currentIndex)
- self.log.progress(progress)
+ def isWindows(self):
+ if any(platform.win32_ver()):
+ return True
+ return False
+
+ def spinProcessBar(self, sleepSeconds = 1, maxPos = 30, trace = False):
+ if trace:
+ self.Trace(f"Starting spinProcessBar loop; sleepSeconds={sleepSeconds}, maxPos={maxPos}")
+ pos = 1
+ while self.stopProcessBarSpin == False:
+ if trace:
+ self.Trace(f"progressBar({pos}, {maxPos})")
+ self.progressBar(pos, maxPos)
+ pos +=1
+ if pos > maxPos:
+ pos = 1
+ time.sleep(sleepSeconds)
+
+ def startSpinningProcessBar(self, sleepSeconds = 1, maxPos = 30, trace = False):
+ self.stopProcessBarSpin = False
+ if trace:
+ self.Trace(f"submitting spinProcessBar; sleepSeconds={sleepSeconds}, maxPos={maxPos}, trace={trace}")
+ self.submit(self.spinProcessBar, sleepSeconds, maxPos, trace)
+
+ def stopSpinningProcessBar(self, sleepSeconds = 1):
+ self.stopProcessBarSpin = True
+ time.sleep(sleepSeconds)
+
+ def startsWithInList(self, listToCk, itemToCk):
+ itemToCk = itemToCk.lower()
+ for listItem in listToCk:
+ if itemToCk.startswith(listItem.lower()):
+ return True
+ return False
+
+ def indexStartsWithInList(self, listToCk, itemToCk):
+ itemToCk = itemToCk.lower()
+ index = -1
+ lenItemMatch = 0
+ returnValue = self.Constant.NOT_IN_LIST.value
+ for listItem in listToCk:
+ index += 1
+ if itemToCk.startswith(listItem.lower()):
+ if len(listItem) > lenItemMatch: # Make sure the best match is selected by getting match with longest string.
+ lenItemMatch = len(listItem)
+ returnValue = index
+ return returnValue
+
+ def checkIfTagInlist(self, somelist, tagName, trace=False):
+ tagId = self.find_tags(q=tagName)
+ if len(tagId) > 0 and 'id' in tagId[0]:
+ tagId = tagId[0]['id']
+ else:
+ self.Warn(f"Could not find tag ID for tag '{tagName}'.")
+ return
+ somelist = somelist.split(",")
+ if trace:
+ self.Trace("#########################################################################")
+ scenes = self.find_scenes(f={"tags": {"value":tagId, "modifier":"INCLUDES"}}, fragment='id tags {id name} files {path width height duration size video_codec bit_rate frame_rate} details')
+ qtyResults = len(scenes)
+ self.Log(f"Found {qtyResults} scenes with tag ({tagName})")
+ Qty = 0
+ for scene in scenes:
+ Qty+=1
+ if self.startsWithInList(somelist, scene['files'][0]['path']):
+ self.Log(f"Found scene part of list; {scene['files'][0]['path']}")
+ elif trace:
+ self.Trace(f"Not part of list; {scene['files'][0]['path']}")
- def run_plugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False):
+ def createTagId(self, tagName, tagName_descp = "", deleteIfExist = False, ignoreAutoTag = False):
+ tagId = self.find_tags(q=tagName)
+ if len(tagId):
+ tagId = tagId[0]
+ if deleteIfExist:
+ self.destroy_tag(int(tagId['id']))
+ else:
+ return tagId['id']
+ tagId = self.create_tag({"name":tagName, "description":tagName_descp, "ignore_auto_tag": ignoreAutoTag})
+ self.Log(f"Dup-tagId={tagId['id']}")
+ return tagId['id']
+
+ def removeTag(self, scene, tagName): # scene can be scene ID or scene metadata
+ scene_details = scene
+ if isinstance(scene, int) or 'id' not in scene:
+ scene_details = self.find_scene(scene)
+ tagIds = []
+ doesHaveTagName = False
+ for tag in scene_details['tags']:
+ if tag['name'] != tagName:
+ tagIds += [tag['id']]
+ else:
+ doesHaveTagName = True
+ if doesHaveTagName:
+ dataDict = {'id' : scene_details['id']}
+ dataDict.update({'tag_ids' : tagIds})
+ self.update_scene(dataDict)
+ return doesHaveTagName
+
+ def addTag(self, scene, tagName, tagName_descp = "", ignoreAutoTag=False, retryCount = 12, sleepSecondsBetweenRetry = 5): # scene can be scene ID or scene metadata
+ errMsg = None
+ for i in range(0, retryCount):
+ try:
+ if errMsg != None:
+ self.Warn(errMsg)
+ scene_details = scene
+ if isinstance(scene, int) or 'id' not in scene:
+ scene_details = self.find_scene(scene)
+ tagIds = [self.createTagId(tagName, tagName_descp=tagName_descp, ignoreAutoTag=ignoreAutoTag)]
+ for tag in scene_details['tags']:
+ if tag['name'] == tagName:
+ return False
+ else:
+ tagIds += [tag['id']]
+ dataDict = {'id' : scene_details['id']}
+ dataDict.update({'tag_ids' : tagIds})
+ self.update_scene(dataDict)
+ return True
+ except (ConnectionResetError):
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [addTag]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ except Exception as e:
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [addTag]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ time.sleep(sleepSecondsBetweenRetry)
+
+ def copyFields(self, srcData, fieldsToCpy):
+ destData = {}
+ for key in srcData:
+ if key in fieldsToCpy:
+ destData.update({key : srcData[key]})
+ return destData
+
+ def renameTag(self,oldTagName, newTagName):
+ tagMetadata = self.find_tags(q=oldTagName)
+ if len(tagMetadata) > 0 and 'id' in tagMetadata[0]:
+ if tagMetadata[0]['name'] == newTagName:
+ return False
+ tagMetadata[0]['name'] = newTagName
+ fieldsToCpy = ["id", "name", "description", "aliases", "ignore_auto_tag", "favorite", "image", "parent_ids", "child_ids"]
+ tagUpdateInput = self.copyFields(tagMetadata[0], fieldsToCpy)
+ self.Trace(f"Renaming tag using tagUpdateInput = {tagUpdateInput}")
+ self.update_tag(tagUpdateInput)
+ return True
+ return False
+
+ def updateScene(self, update_input, create=False, retryCount = 24, sleepSecondsBetweenRetry = 5):
+ errMsg = None
+ for i in range(0, retryCount):
+ try:
+ if errMsg != None:
+ self.Warn(errMsg)
+ return self.update_scene(update_input, create)
+ except (ConnectionResetError):
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [updateScene]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ except Exception as e:
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [updateScene]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ time.sleep(sleepSecondsBetweenRetry)
+
+ def destroyScene(self, scene_id, delete_file=False, retryCount = 12, sleepSecondsBetweenRetry = 5):
+ errMsg = None
+ for i in range(0, retryCount):
+ try:
+ if errMsg != None:
+ self.Warn(errMsg)
+ if i > 0:
+ # Check if file still exist
+ scene = self.find_scene(scene_id)
+ if scene == None or len(scene) == 0:
+ self.Warn(f"Scene {scene_id} not found in Stash.")
+ return False
+ return self.destroy_scene(scene_id, delete_file)
+ except (ConnectionResetError):
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [updateScene]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ except Exception as e:
+ tb = traceback.format_exc()
+ errMsg = f"Exception calling [updateScene]. Will retry; count({i}); Error: {e}\nTraceBack={tb}"
+ time.sleep(sleepSecondsBetweenRetry)
+
+ def runPlugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False):
"""Runs a plugin operation.
The operation is run immediately and does not use the job queue.
+ This is a blocking call, and does not return until plugin completes.
Args:
plugin_id (ID): plugin_id
task_name (str, optional): Plugin task to perform
@@ -375,30 +794,73 @@ def run_plugin(self, plugin_id, task_mode=None, args:dict={}, asyn=False):
"args": args,
}
if asyn:
- self.Submit(self.call_GQL, query, variables)
+ self.submit(self.call_GQL, query, variables)
return f"Made asynchronous call for plugin {plugin_id}"
else:
return self.call_GQL(query, variables)
-
- def find_duplicate_scenes_diff(self, distance: PhashDistance=PhashDistance.EXACT, fragment='id', duration_diff: float=10.00 ):
- query = """
- query FindDuplicateScenes($distance: Int, $duration_diff: Float) {
- findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) {
- ...SceneSlim
- }
- }
- """
- if fragment:
- query = re.sub(r'\.\.\.SceneSlim', fragment, query)
- else:
- query += "fragment SceneSlim on Scene { id }"
-
- variables = { "distance": distance, "duration_diff": duration_diff }
- result = self.call_GQL(query, variables)
- return result['findDuplicateScenes']
- # #################################################################################################
- # The below functions extends class StashInterface with functions which are not yet in the class
+ def stopJobs(self, startPos = 0, startsWith = ""):
+ taskQue = self.job_queue()
+ if taskQue != None:
+ count = 0
+ for jobDetails in taskQue:
+ count+=1
+ if count > startPos:
+ if startsWith == "" or jobDetails['description'].startswith(startsWith):
+ self.Log(f"Killing Job ID({jobDetails['id']}); description={jobDetails['description']}")
+ self.stop_job(jobDetails['id'])
+ else:
+ self.Log(f"Excluding Job ID({jobDetails['id']}); description={jobDetails['description']}; {jobDetails})")
+ else:
+ self.Log(f"Skipping Job ID({jobDetails['id']}); description={jobDetails['description']}; {jobDetails})")
+
+ def toJson(self, data, replaceSingleQuote=False):
+ if replaceSingleQuote:
+ data = data.replace("'", '"')
+ data = data.replace("\\", "\\\\")
+ data = data.replace("\\\\\\\\", "\\\\")
+ return json.loads(data)
+
+ def isCorrectDbVersion(self, verNumber = 68):
+ results = self.sql_query("select version from schema_migrations")
+ # self.Log(results)
+ if len(results['rows']) == 0 or len(results['rows'][0]) == 0:
+ return False
+ return int(results['rows'][0][0]) == verNumber
+
+ def renameFileNameInDB(self, fileId, oldName, newName, UpdateUsingIdOnly = False):
+ if self.isCorrectDbVersion():
+ query = f'update files set basename = "{newName}" where basename = "{oldName}" and id = {fileId};'
+ if UpdateUsingIdOnly:
+ query = f'update files set basename = "{newName}" where id = {fileId};'
+ self.Trace(f"Executing query ({query})")
+ results = self.sql_commit(query)
+ if 'rows_affected' in results and results['rows_affected'] == 1:
+ return True
+ return False
+
+ def getFileNameFromDB(self, id):
+ results = self.sql_query(f'select basename from files where id = {id};')
+ self.Trace(f"results = ({results})")
+ if len(results['rows']) == 0 or len(results['rows'][0]) == 0:
+ return None
+ return results['rows'][0][0]
+
+ # ############################################################################################################
+ # Functions which are candidates to be added to parent class use snake_case naming convention.
+ # ############################################################################################################
+ # The below functions extends class StashInterface with functions which are not yet in the class or
+ # fixes for functions which have not yet made it into official class.
+ def metadata_scan(self, paths:list=[], flags={}): # ToDo: Add option to add path to library if path not included when calling metadata_scan
+ query = "mutation MetadataScan($input:ScanMetadataInput!) { metadataScan(input: $input) }"
+ scan_metadata_input = {"paths": paths}
+ if flags:
+ scan_metadata_input.update(flags)
+ elif scan_config := self.get_configuration_defaults("scan { ...ScanMetadataOptions }").get("scan"):
+ scan_metadata_input.update(scan_config)
+ result = self.call_GQL(query, {"input": scan_metadata_input})
+ return result["metadataScan"]
+
def get_all_scenes(self):
query_all_scenes = """
query AllScenes {
@@ -451,6 +913,43 @@ def metadata_clean_generated(self, blobFiles=True, dryRun=False, imageThumbnails
def rename_generated_files(self):
return self.call_GQL("mutation MigrateHashNaming {migrateHashNaming}")
+
+ def find_duplicate_scenes_diff(self, distance: PhashDistance=PhashDistance.EXACT, fragment='id', duration_diff: float=10.00 ):
+ query = """
+ query FindDuplicateScenes($distance: Int, $duration_diff: Float) {
+ findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) {
+ ...SceneSlim
+ }
+ }
+ """
+ if fragment:
+ query = re.sub(r'\.\.\.SceneSlim', fragment, query)
+ else:
+ query += "fragment SceneSlim on Scene { id }"
+
+ variables = { "distance": distance, "duration_diff": duration_diff }
+ result = self.call_GQL(query, variables)
+ return result['findDuplicateScenes']
+
+ # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ # Direct SQL associated functions
+ def get_file_metadata(self, data, raw_data = False): # data is either file ID or scene metadata
+ results = None
+ if data == None:
+ return results
+ if 'files' in data and len(data['files']) > 0 and 'id' in data['files'][0]:
+ results = self.sql_query(f"select * from files where id = {data['files'][0]['id']}")
+ else:
+ results = self.sql_query(f"select * from files where id = {data}")
+ if raw_data:
+ return results
+ if 'rows' in results:
+ return results['rows'][0]
+ self.Error(f"Unknown error while SQL query with data='{data}'; Results='{results}'.")
+ return None
+
+ def set_file_basename(self, id, basename):
+ return self.sql_commit(f"update files set basename = '{basename}' where id = {id}")
class mergeMetadata: # A class to merge scene metadata from source scene to destination scene
srcData = None
@@ -471,7 +970,8 @@ def merge(self, SrcData, DestData):
self.mergeItems('tags', 'tag_ids', [], excludeName=self.excludeMergeTags)
self.mergeItems('performers', 'performer_ids', [])
self.mergeItems('galleries', 'gallery_ids', [])
- self.mergeItems('movies', 'movies', [])
+ # Looks like movies has been removed from new Stash version
+ # self.mergeItems('movies', 'movies', [])
self.mergeItems('urls', listToAdd=self.destData['urls'], NotStartWith=self.stash.STASH_URL)
self.mergeItem('studio', 'studio_id', 'id')
self.mergeItem('title')
@@ -524,3 +1024,54 @@ def mergeItems(self, fieldName, updateFieldName=None, listToAdd=[], NotStartWith
listToAdd += [item['id']]
self.dataDict.update({ updateFieldName : listToAdd})
# self.stash.Trace(f"Added {fieldName} ({dataAdded}) to scene ID({self.destData['id']})", toAscii=True)
+
+class taskQueue:
+ taskqueue = None
+ def __init__(self, taskqueue):
+ self.taskqueue = taskqueue
+
+ def tooManyScanOnTaskQueue(self, tooManyQty = 5):
+ count = 0
+ if self.taskqueue == None:
+ return False
+ for jobDetails in self.taskqueue:
+ if jobDetails['description'] == "Scanning...":
+ count += 1
+ if count < tooManyQty:
+ return False
+ return True
+
+ def cleanJobOnTaskQueue(self):
+ for jobDetails in self.taskqueue:
+ if jobDetails['description'] == "Cleaning...":
+ return True
+ return False
+
+ def cleanGeneratedJobOnTaskQueue(self):
+ for jobDetails in self.taskqueue:
+ if jobDetails['description'] == "Cleaning generated files...":
+ return True
+ return False
+
+ def isRunningPluginTaskJobOnTaskQueue(self, taskName):
+ for jobDetails in self.taskqueue:
+ if jobDetails['description'] == "Running plugin task: {taskName}":
+ return True
+ return False
+
+ def tagDuplicatesJobOnTaskQueue(self):
+ return self.isRunningPluginTaskJobOnTaskQueue("Tag Duplicates")
+
+ def clearDupTagsJobOnTaskQueue(self):
+ return self.isRunningPluginTaskJobOnTaskQueue("Clear Tags")
+
+ def generatePhashMatchingJobOnTaskQueue(self):
+ return self.isRunningPluginTaskJobOnTaskQueue("Generate PHASH Matching")
+
+ def deleteDuplicatesJobOnTaskQueue(self):
+ return self.isRunningPluginTaskJobOnTaskQueue("Delete Duplicates")
+
+ def deleteTaggedScenesJobOnTaskQueue(self):
+ return self.isRunningPluginTaskJobOnTaskQueue("Delete Tagged Scenes")
+
+
diff --git a/plugins/DupFileManager/advance_options.html b/plugins/DupFileManager/advance_options.html
new file mode 100644
index 00000000..262c078d
--- /dev/null
+++ b/plugins/DupFileManager/advance_options.html
@@ -0,0 +1,1902 @@
+
+
+
+DupFileManager Advance Menus
+
+
+
+
+
+
+
+
+
+
+ Create report with different [Match Duplicate Distance] options
+
Overrides user [Match Duplicate Distance] and [significantTimeDiff] settings
+
+ |
+
+
+ Create Report with Tagging |
+
+
+ |
+
+
+ |
+
+
+ |
+
+
+ |
+ |
+
+ Create Report without Tagging |
+
+
+ |
+
+
+ |
+
+
+ |
+
+
+ |
+ |
+
+
+ Details:
+
+ - Match Duplicate Distance Number Details
+
+ - Exact Match
+
+ - Safest and most reliable option
+ - Uses tag name _DuplicateMarkForDeletion_0
+ - Has the fewest results, and it's very rare to have false matches.
+
+ - High Match
+
+ - Recommended Setting
+ - Safe and usually reliable
+ - Uses tag name _DuplicateMarkForDeletion_1
+ - Scenes tagged by Exact Match will have both tags (_DuplicateMarkForDeletion_0 and _DuplicateMarkForDeletion_1)
+
+ - Medium Match
+
+ - Not so safe. Some false matches
+ - To reduce false matches use a time difference of .96 or higher.
+ - Uses tag name _DuplicateMarkForDeletion_2
+ - Scenes tagged by 0 and 1 will have three tags.
+
+ - Low Match
+
+ - Unsafe, and many false matches
+ - To reduce false matches use a time difference of .98 or higher.
+ - Uses tag name _DuplicateMarkForDeletion_3
+ - Scenes tagged by 0, 1, and 2 will have four tags.
+ - Has the most results, but with many false matches.
+
+
+ - Time Difference
+
+ - Significant time difference setting, where 1 equals 100% and (.9) equals 90%.
+ - This setting overrides the setting in DupFileManager_config.py.
+
+ - See setting significantTimeDiff in DupFileManager_config.py
+
+ - This setting is generally not useful for [Exact Match] reports.
+ - This is an important setting when creating Low or Medium match reports. It will reduce false matches.
+
+ - Report with tagging
+
+ - Reports with tagging will work with above DupFileManager Advance Menu.
+ - The report can take serveral minutes to complete.
+ - It takes much more time to produce a report with tagging compare to creating a report without tagging.
+
+ - Report WITHOUT tagging
+
+ - Reports with no tagging can NOT be used with above DupFileManager Advance Menu.
+ - The report is created much faster. It usually takes a few seconds to complete.
+ - This is the recommended report type to create if the DupFileManager Advance Menu is not needed or desired.
+
+
+ |
+
+
+
+
+
diff --git a/plugins/DupFileManager/requirements.txt b/plugins/DupFileManager/requirements.txt
index d503550d..19069845 100644
--- a/plugins/DupFileManager/requirements.txt
+++ b/plugins/DupFileManager/requirements.txt
@@ -1,4 +1,3 @@
stashapp-tools >= 0.2.50
-pyYAML
-watchdog
+requests
Send2Trash
\ No newline at end of file
|