From eafb1a5a8279882528b011d979d9527d3517194a Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Fri, 30 Aug 2024 04:39:44 +0000 Subject: [PATCH 01/14] clean up & make --dump-tags more readable --- libtkldet/apt_file.py | 5 +++-- libtkldet/classifier.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/libtkldet/apt_file.py b/libtkldet/apt_file.py index 4ef0bb2..fa9d8bc 100644 --- a/libtkldet/apt_file.py +++ b/libtkldet/apt_file.py @@ -31,8 +31,9 @@ def is_in_path(name: str) -> bool: def is_installed(package_name: str) -> bool: """check if a given package is installed on the HOST system (tkldev)""" pkg_installed = subprocess.run( - ["dpkg-query", "-W", "--showformat='${Status}'", package_name] - ) + ["dpkg-query", "-W", "--showformat='${Status}'", package_name], + capture_output=True + ) return pkg_installed.returncode != 0 diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 0071ee1..830aabd 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -61,7 +61,7 @@ def pretty_print(self): """show item value as well as tags""" print(f"{self.value}") for src in self._tags: - print(src, self._tags[src]) + print("\t", src, self._tags[src]) @dataclass(frozen=True) From b1e3bb4709720d8d2c5dff67dea56dc39b827ed7 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Fri, 30 Aug 2024 04:40:48 +0000 Subject: [PATCH 02/14] more cleanup --- tkldet_modules/appliance_confd.py | 2 +- tkldet_modules/appliance_makefile.py | 2 +- tkldet_modules/missing_module_filter.py | 2 +- tkldet_modules/ruff.py | 1 - tkldet_modules/yaml_check.py | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tkldet_modules/appliance_confd.py b/tkldet_modules/appliance_confd.py index e8f14b5..0d2a072 100644 --- a/tkldet_modules/appliance_confd.py +++ b/tkldet_modules/appliance_confd.py @@ -37,7 +37,7 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: line=None, column=None, location_metadata=None, - message=f"conf.d script isn't executable", + message="conf.d script isn't executable", fix=f"`chmod +x {item.abspath}`", source="confd linter", level=ReportLevel.ERROR, diff --git a/tkldet_modules/appliance_makefile.py b/tkldet_modules/appliance_makefile.py index 1d4c1a6..713c040 100644 --- a/tkldet_modules/appliance_makefile.py +++ b/tkldet_modules/appliance_makefile.py @@ -57,7 +57,7 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: var = line.split("+=", 1)[0].strip() else: var = line.split("=", 1)[0].strip() - if not var in MK_CONFVARS: + if var not in MK_CONFVARS: suggested_var = fuzzy_suggest(var, MK_CONFVARS) if suggested_var: fix = f"did you mean {suggested_var!r} instead of {var!r} ?" diff --git a/tkldet_modules/missing_module_filter.py b/tkldet_modules/missing_module_filter.py index b08e98b..8dfb458 100644 --- a/tkldet_modules/missing_module_filter.py +++ b/tkldet_modules/missing_module_filter.py @@ -73,7 +73,7 @@ class MissingModuleFilter(ReportFilter): def filter(self, report: Report) -> Generator[Report, None, None]: if ( report.source == "pylint" - and report.raw != None + and report.raw is not None and report.raw["symbol"] == "import-error" ): match = MISSING_MODULE_RE.match(report.raw["message"]) diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 9e23c1e..a98420a 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -17,7 +17,6 @@ import json from typing import Generator import subprocess -from os.path import join, dirname, abspath from libtkldet.linter import FileLinter, FileItem, register_linter from libtkldet.report import Report, FileReport, parse_report_level diff --git a/tkldet_modules/yaml_check.py b/tkldet_modules/yaml_check.py index 5102cd5..930640d 100644 --- a/tkldet_modules/yaml_check.py +++ b/tkldet_modules/yaml_check.py @@ -33,7 +33,7 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: with open(item.abspath, "r") as fob: try: yaml.safe_load(fob) - except yaml.constructor.ConstructorError as e: + except yaml.constructor.ConstructorError: # ignore tags and other fancy stuff we can't easily check pass except yaml.parser.ParserError as e: From 73d51de37f2cc3d984b280e092504f95666708db Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Fri, 30 Aug 2024 05:36:33 +0000 Subject: [PATCH 03/14] Fixes & Additions - fix shebang module - add basic ability to lint arbitrary files --- libtkldet/__init__.py | 14 +++++++++++--- libtkldet/locator.py | 4 ++-- tkldet_modules/shebang.py | 2 +- tkldev-detective | 25 ++++++++++++++++++++----- 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/libtkldet/__init__.py b/libtkldet/__init__.py index 9512169..74b3ceb 100644 --- a/libtkldet/__init__.py +++ b/libtkldet/__init__.py @@ -19,12 +19,20 @@ from os.path import relpath, abspath from . import locator, common_data, classifier from .common_data import APPLIANCE_ROOT +from .error import ApplianceNotFound -def initialize(path: str): +def initialize(path: str, ignore_non_appliance: bool): """initialize everything, involves scraping makefiles, parsing plans, etc.""" - root = locator.get_appliance_root(path) - common_data.initialize_common_data(root) + try: + root = locator.get_appliance_root(path) + except ApplianceNotFound: + if not ignore_non_appliance: + raise + else: + root = path + else: + common_data.initialize_common_data(root) def yield_appliance_items() -> Generator[classifier.Item, None, None]: diff --git a/libtkldet/locator.py b/libtkldet/locator.py index 1a59c5b..ee4848c 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -72,14 +72,14 @@ def get_appliance_root(path: str) -> str: return root -def locator(root: str) -> Generator[str, None, None]: +def locator(root: str, ignore_non_appliance: bool) -> Generator[str, None, None]: """yields (pretty much) every file in an appliance of potential concern or a specific file only if given a path to a file inside an appliance""" if is_appliance_name(root): yield from full_appliance_locator(join(PRODUCTS_DIR, root)) elif is_appliance_path(root): yield from full_appliance_locator(root) - elif is_inside_appliance(root): + elif is_inside_appliance(root) or ignore_non_appliance: yield root else: raise ApplianceNotFound( diff --git a/tkldet_modules/shebang.py b/tkldet_modules/shebang.py index da863a0..365c2a6 100644 --- a/tkldet_modules/shebang.py +++ b/tkldet_modules/shebang.py @@ -32,7 +32,7 @@ def classify(self, item: FileItem): shebang = head.split(b"\n")[0].strip() if shebang: shebang = shebang.split()[0].strip() - shebang = str(shebang) + shebang = shebang.decode() if shebang.startswith("#!"): item.add_tags(self, [f"shebang:{shebang[2:]}"]) diff --git a/tkldev-detective b/tkldev-detective index a2edb7a..d93184a 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -23,18 +23,26 @@ import sys from libtkldet import locator, modman, report, colors import libtkldet import libtkldet.error +from libtkldet.error import ApplianceNotFound import libtkldet.classifier import libtkldet.linter def perform_lint( - path: str, dump_tags: bool, skip_lint: bool + path: str, dump_tags: bool, skip_lint: bool, ignore_non_appliance: bool ) -> Generator[report.Report, None, None]: - libtkldet.initialize(path) - root = locator.get_appliance_root(path) + libtkldet.initialize(path, ignore_non_appliance) + try: + root = locator.get_appliance_root(path) + except ApplianceNotFound: + if not ignore_non_appliance: + raise e + else: + root = path - for path in locator.locator(path): + + for path in locator.locator(path, ignore_non_appliance): item = libtkldet.classifier.FileItem( value=path, _tags={}, @@ -72,6 +80,12 @@ if __name__ == "__main__": action="store_true", help="don't actually perform lint, only classification", ) + lint_parser.add_argument( + "-i", + "--ignore-non-appliance", + action="store_true", + help="if no appliance found, just try to lint target anyway", + ) lint_parser.add_argument( "target", help="appliance name, path to appliance or path to file inside appliance", @@ -103,7 +117,8 @@ if __name__ == "__main__": elif args.action == "lint": try: for report in report.filter_all_reports( - perform_lint(args.target, args.dump_tags, args.skip_lint) + perform_lint(args.target, args.dump_tags, args.skip_lint, + args.ignore_non_appliance) ): print("\n| ".join(report.format().split("\n"))) print() From 192b1acff90c5f24e434b3b8129aa9a55573ff75 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Fri, 30 Aug 2024 05:39:54 +0000 Subject: [PATCH 04/14] fix some incorrect codes --- tkldet_modules/ruff.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index a98420a..45fd9a7 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -439,9 +439,9 @@ BLE001 = 'WARN', # blind `except` ), flake8_boolean_trap = dict( - FTB001 = None, # boolean typed positional arg in function def - FTB002 = None, # boolean default positional argument in func def - FTB003 = 'REFACTOR', # boolean positional value in func call + FBT001 = None, # boolean typed positional arg in function def + FBT002 = None, # boolean default positional argument in func def + FBT003 = 'REFACTOR', # boolean positional value in func call ), flake8_bugbear = dict( B002 = 'ERROR', # unary prefix increment/decrement From 124728b2a114cbca2d769cdbe593d99733061931 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Fri, 30 Aug 2024 05:41:17 +0000 Subject: [PATCH 05/14] fix some ruff lints --- tkldev-detective | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tkldev-detective b/tkldev-detective index d93184a..c8f67c0 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -20,7 +20,8 @@ from os.path import relpath, abspath from typing import Generator import sys -from libtkldet import locator, modman, report, colors +from libtkldet import locator, modman, colors +from libtkldet.report import Report, filter_all_reports import libtkldet import libtkldet.error from libtkldet.error import ApplianceNotFound @@ -29,20 +30,18 @@ import libtkldet.linter def perform_lint( - path: str, dump_tags: bool, skip_lint: bool, ignore_non_appliance: bool -) -> Generator[report.Report, None, None]: - - libtkldet.initialize(path, ignore_non_appliance) + root_path: str, dump_tags: bool, skip_lint: bool, ignore_non_appliance: bool +) -> Generator[Report, None, None]: + libtkldet.initialize(root_path, ignore_non_appliance) try: - root = locator.get_appliance_root(path) + root = locator.get_appliance_root(root_path) except ApplianceNotFound: if not ignore_non_appliance: - raise e + raise else: - root = path - + root = root_path - for path in locator.locator(path, ignore_non_appliance): + for path in locator.locator(root_path, ignore_non_appliance): item = libtkldet.classifier.FileItem( value=path, _tags={}, @@ -116,9 +115,13 @@ if __name__ == "__main__": elif args.action == "lint": try: - for report in report.filter_all_reports( - perform_lint(args.target, args.dump_tags, args.skip_lint, - args.ignore_non_appliance) + for report in filter_all_reports( + perform_lint( + args.target, + args.dump_tags, + args.skip_lint, + args.ignore_non_appliance, + ) ): print("\n| ".join(report.format().split("\n"))) print() From 5c364e1f8dccfaddfef5d26de133d501edb184ff Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Wed, 4 Sep 2024 04:16:25 +0000 Subject: [PATCH 06/14] Self check & Fixes run tkldev-detector over itself, fixing a LOT of failed lints in the process and a few bugs as well --- libtkldet/__init__.py | 16 +-- libtkldet/apt_file.py | 25 +++-- libtkldet/classifier.py | 56 ++++++----- libtkldet/colors.py | 48 ++++----- libtkldet/common_data.py | 41 ++++---- libtkldet/error.py | 32 +++--- libtkldet/file_util.py | 26 ++--- libtkldet/fuzzy.py | 19 ++-- libtkldet/hint_extract.py | 54 +++++----- libtkldet/linter.py | 34 ++++--- libtkldet/locator.py | 46 ++++++--- libtkldet/mkparser.py | 84 ++++++++++------ libtkldet/modman.py | 25 +++-- libtkldet/plan_resolve.py | 66 ++++++------ libtkldet/report.py | 127 ++++++++++++++---------- setup.py | 0 tkldet_modules/appliance_confd.py | 9 +- tkldet_modules/appliance_files.py | 62 ++++++++---- tkldet_modules/appliance_makefile.py | 23 +++-- tkldet_modules/filetype.py | 41 +++++++- tkldet_modules/json_check.py | 11 +- tkldet_modules/missing_module_filter.py | 6 +- tkldet_modules/pylint.py | 11 +- tkldet_modules/ruff.py | 12 ++- tkldet_modules/shebang.py | 38 ------- tkldet_modules/shellcheck.py | 5 +- tkldev-detective | 4 +- 27 files changed, 531 insertions(+), 390 deletions(-) mode change 100644 => 100755 setup.py delete mode 100644 tkldet_modules/shebang.py diff --git a/libtkldet/__init__.py b/libtkldet/__init__.py index 74b3ceb..032c7e2 100644 --- a/libtkldet/__init__.py +++ b/libtkldet/__init__.py @@ -19,24 +19,26 @@ from os.path import relpath, abspath from . import locator, common_data, classifier from .common_data import APPLIANCE_ROOT -from .error import ApplianceNotFound +from .error import ApplianceNotFoundError -def initialize(path: str, ignore_non_appliance: bool): - """initialize everything, involves scraping makefiles, parsing plans, etc.""" +def initialize(path: str, ignore_non_appliance: bool) -> None: + """Initialize everything + + Involves scraping makefiles, parsing plans, etc. + """ try: root = locator.get_appliance_root(path) - except ApplianceNotFound: + except ApplianceNotFoundError: if not ignore_non_appliance: raise - else: - root = path + root = path else: common_data.initialize_common_data(root) def yield_appliance_items() -> Generator[classifier.Item, None, None]: - '''generator that yields everything "lintable"''' + """Yield everything 'lintable'""" yield from common_data.iter_packages() for path in locator.locator(APPLIANCE_ROOT): diff --git a/libtkldet/apt_file.py b/libtkldet/apt_file.py index fa9d8bc..4a203d1 100644 --- a/libtkldet/apt_file.py +++ b/libtkldet/apt_file.py @@ -15,23 +15,26 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""relates to finding packages based on files they provide, including those not -installed""" +"""Utilities for finding packages + +Finds packages based on files they provide, including those not +installed +""" import subprocess def is_in_path(name: str) -> bool: - """check if a given name is in the path""" + """Check if a given name is in the path""" in_path = subprocess.run( - ["which", name], + ["/usr/bin/which", name], capture_output=True ) return in_path.returncode == 0 def is_installed(package_name: str) -> bool: - """check if a given package is installed on the HOST system (tkldev)""" + """Check if a given package is installed on the HOST system (tkldev)""" pkg_installed = subprocess.run( - ["dpkg-query", "-W", "--showformat='${Status}'", package_name], + ["/usr/bin/dpkg-query", "-W", "--showformat='${Status}'", package_name], capture_output=True ) return pkg_installed.returncode != 0 @@ -41,11 +44,11 @@ def is_installed(package_name: str) -> bool: def find_package_by_file(path: str) -> list[str]: - """return a list of packages that provide a file at a given path""" + """Return a list of packages that provide a file at a given path""" ret = subprocess.run( [ - "apt-file", + "/usr/bin/apt-file", "search", "--package-only", "-x", @@ -60,14 +63,16 @@ def find_package_by_file(path: str) -> list[str]: def find_python_package(package_name: str) -> list[str]: - """return a list of packages that provide a given python module""" + """Return a list of packages that provide a given python module""" return find_package_by_file( f"/usr/lib/python3/dist-packages/{package_name}(\\.py)?" ) def find_python_package_from_import(module_str: str) -> list[str]: - """return a list of packages that provide a given python import module, may + """Find python package from import name + + Return a list of packages that provide a given python import module, may be several modules deep (e.g. `foo.bar.baz`), attempts to find most specific python package provider """ diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 830aabd..c471379 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -22,7 +22,7 @@ """ from dataclasses import dataclass -from typing import Generator, Iterable, Type, cast +from typing import Generator, Iterable, cast from os.path import dirname @@ -46,19 +46,19 @@ class Item: @property def tags(self) -> Generator[str, None, None]: - """ yields all tags, may contain duplicates """ + """Yields all tags, may contain duplicates""" for tags in self._tags.values(): yield from tags - def add_tags(self, classifier: "Classifier", tags: Iterable[str]): - """convenience method for adding tags to an item""" + def add_tags(self, classifier: "Classifier", tags: Iterable[str]) -> None: + """Add tags to an item""" name = classifier.__class__.__name__ if name not in self._tags: self._tags[name] = set() self._tags[name].update(tags) - def pretty_print(self): - """show item value as well as tags""" + def pretty_print(self) -> None: + """Show item value as well as tags""" print(f"{self.value}") for src in self._tags: print("\t", src, self._tags[src]) @@ -111,29 +111,32 @@ class Classifier: classifier can leverage information provided (or omitted) by previous classifiers""" - ItemType: Type[Item] = Item + ItemType: type[Item] = Item - def do_classify(self, item: Item): - """actually perform a classification so long as the concrete item type - is compatible with this classifier""" + def do_classify(self, item: Item) -> None: + """Perform classification + + Perform a classification so long as the concrete item type + is compatible with this classifier + """ if isinstance(item, self.ItemType): self.classify(item) - def classify(self, item: Item): - """abstract method to be implemented by subclass""" - raise NotImplementedError() + def classify(self, item: Item) -> None: + """Classify exact item type""" + raise NotImplementedError class FileClassifier(Classifier): """Specialized classifer which operates on "FileItem"s""" - ItemType: Type[Item] = FileItem + ItemType: type[Item] = FileItem class PackageClassifier(Classifier): """Specialized classifier which operates on "PackageItem"s""" - ItemType: Type[Item] = PackageItem + ItemType: type[Item] = PackageItem class ExactPathClassifier(FileClassifier): @@ -145,7 +148,7 @@ class ExactPathClassifier(FileClassifier): tags: list[str] "exact tags to add to matched item" - def classify(self, item: Item): + def classify(self, item: Item) -> None: item = cast(FileItem, item) # item will definitely be subclass of # cls.ItemType, just need to convince the type checker @@ -166,7 +169,7 @@ class SubdirClassifier(FileClassifier): tags: list[str] "exact tags to add to matched item" - def classify(self, item: Item): + def classify(self, item: Item) -> None: item = cast(FileItem, item) # item will definitely be subclass of # cls.ItemType, just need to convince the type checker @@ -175,23 +178,24 @@ def classify(self, item: Item): if item.relpath.startswith(self.path): # XXX doesn't handle any `..` in path, hopefully doesn't matter item.add_tags(self, self.tags[:]) - else: - if dirname(item.relpath) == self.path: - item.add_tags(self, self.tags[:]) + elif dirname(item.relpath) == self.path: + item.add_tags(self, self.tags[:]) -_CLASSIFIERS: list[Type[Classifier]] = [] +_CLASSIFIERS: list[type[Classifier]] = [] -def register_classifier(classifier: Type[Classifier]): - """registers a classifier for use in tkldev-detective, must be called on all - classifiers added""" +def register_classifier(classifier: type[Classifier]) -> type[Classifier]: + """Register a classifier + + This must be called on classifiers added + """ _CLASSIFIERS.append(classifier) return classifier def get_weighted_classifiers() -> list[Classifier]: - """returns instances of registered classifiers in order of weight""" + """Return instances of registered classifiers in order of weight""" return sorted( - map(lambda x: x(), _CLASSIFIERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) + (c() for c in _CLASSIFIERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) ) diff --git a/libtkldet/colors.py b/libtkldet/colors.py index 9bf0f5e..963c001 100644 --- a/libtkldet/colors.py +++ b/libtkldet/colors.py @@ -38,26 +38,26 @@ "RESET", ] -_COLOR_ASCII_CODES = dict( - BLACK="\x1b[30m", - RED="\x1b[31m", - GREEN="\x1b[32m", - YELLOW="\x1b[33m", - BLUE="\x1b[34m", - MAGENTA="\x1b[35m", - CYAN="\x1b[36m", - WHITE="\x1b[37m", - BRIGHT_BLACK="\x1b[90m", - BRIGHT_RED="\x1b[91m", - BRIGHT_GREEN="\x1b[92m", - BRIGHT_YELLOW="\x1b[93m", - BRIGHT_BLUE="\x1b[94m", - BRIGHT_MAGENTA="\x1b[95m", - BRIGHT_CYAN="\x1b[96m", - BRIGHT_WHITE="\x1b[97m", - RESET="\x1b[0m", - BOLD="\x1b[1m", -) +_COLOR_ASCII_CODES = { + "BLACK": "\x1b[30m", + "RED": "\x1b[31m", + "GREEN": "\x1b[32m", + "YELLOW": "\x1b[33m", + "BLUE": "\x1b[34m", + "MAGENTA": "\x1b[35m", + "CYAN": "\x1b[36m", + "WHITE": "\x1b[37m", + "BRIGHT_BLACK": "\x1b[90m", + "BRIGHT_RED": "\x1b[91m", + "BRIGHT_GREEN": "\x1b[92m", + "BRIGHT_YELLOW": "\x1b[93m", + "BRIGHT_BLUE": "\x1b[94m", + "BRIGHT_MAGENTA": "\x1b[95m", + "BRIGHT_CYAN": "\x1b[96m", + "BRIGHT_WHITE": "\x1b[97m", + "RESET": "\x1b[0m", + "BOLD": "\x1b[1m", +} _COLOR_GLOBALS = globals() @@ -81,9 +81,11 @@ BOLD: str -def set_colors_enabled(enabled: bool): - """sets color globals to ANSI color codes if `enabled` otherwise sets them - to empty strings""" +def set_colors_enabled(enabled: bool) -> None: + """Set color globals to ANSI color codes + + If not enabled, sets them to empty strings + """ for color in _COLORS: if enabled: _COLOR_GLOBALS[color] = _COLOR_ASCII_CODES[color] diff --git a/libtkldet/common_data.py b/libtkldet/common_data.py index c9d9a13..c938945 100644 --- a/libtkldet/common_data.py +++ b/libtkldet/common_data.py @@ -18,7 +18,7 @@ import os from os.path import join, isfile -from typing import Generator, Optional +from typing import Generator from .plan_resolve import parse_plan, PlanEntry from .locator import iter_plan from .classifier import PackageItem @@ -29,10 +29,9 @@ _INCLUDED_PLAN_CACHE: set[str] = set() _FAB_DATA: CommonFabBuildData - -def initialize_common_data(appliance_root: str): - """parse plan & makefile and initialize data which utilizes it""" - global APPLIANCE_ROOT, _PLAN_RESOLVE_CACHE, _INCLUDED_PLAN_CACHE, _FAB_DATA +def initialize_common_data(appliance_root: str) -> None: + """Parse plan & makefile and initialize data which utilizes it""" + global APPLIANCE_ROOT, _FAB_DATA APPLIANCE_ROOT = appliance_root for plan_path in iter_plan(appliance_root): @@ -47,20 +46,19 @@ def initialize_common_data(appliance_root: str): def is_package_to_be_installed(package_name: str) -> bool: - """check if an apt package will be installed via plan""" - for entry in _PLAN_RESOLVE_CACHE: - if entry.package_name == package_name: - return True - return False - + """Check if an apt package will be installed via plan""" + return any( + entry.package_name == package_name + for entry in _PLAN_RESOLVE_CACHE + ) def is_common_plan_included(plan_name: str) -> bool: - """check if a common plan (by file name) is included in appliance build """ + """Check if a common plan (by file name) is included in appliance build""" return join("/turnkey/fab/common/plans", plan_name) in _INCLUDED_PLAN_CACHE def iter_packages() -> Generator[PackageItem, None, None]: - """ iterate over all packages which will be installed """ + """Iterate over all packages which will be installed""" for entry in _PLAN_RESOLVE_CACHE: yield PackageItem( value=entry.package_name, _tags={}, plan_stack=entry.include_stack[:] @@ -68,29 +66,32 @@ def iter_packages() -> Generator[PackageItem, None, None]: def get_common_overlays() -> list[str]: - """ return a list of all common overlays included in this appliance """ + """Return a list of all common overlays included in this appliance""" return _FAB_DATA.overlays[:] def get_common_conf() -> list[str]: - """ return a list of all common conf scripts included in this appliance """ + """Return a list of all common conf scripts included in this appliance""" return _FAB_DATA.conf[:] def get_common_removelists() -> list[str]: - """ return a list of all common removelists included in this appliance """ + """Return a list of all common removelists included in this appliance""" return _FAB_DATA.removelists[:] def get_common_removelists_final() -> list[str]: - """ return a list of all common final removelists included in this appliance """ + """Return a list of all common final removelists included in this appliance""" return _FAB_DATA.removelists_final[:] -def get_path_in_common_overlay(path: str) -> Optional[str]: - """check if a given path (expressed as an absolute path, where it would be +def get_path_in_common_overlay(path: str) -> str | None: + """Get overlay path from absolute path + + Check if a given path (expressed as an absolute path, where it would be placed in a build) is included in build, if so the path to the file/dir IN - the common overlay is returned. Otherwise None is returned.""" + the common overlay is returned. Otherwise None is returned. + """ path = path.lstrip("/") for common in _FAB_DATA.overlays: common_path = join( diff --git a/libtkldet/error.py b/libtkldet/error.py index 70b8727..976b9b4 100644 --- a/libtkldet/error.py +++ b/libtkldet/error.py @@ -18,31 +18,29 @@ class TKLDevDetectiveError(Exception): - '''Base class for tkldev-detective specific errors''' - ... + """Base class for tkldev-detective specific errors""" -class ApplianceNotFound(TKLDevDetectiveError): - """appliance was not found for some reason (likely path/app name was - incorrect)""" +class ApplianceNotFoundError(TKLDevDetectiveError): + """Appliance was not found - ... + Likely path/app name was incorrect + """ -class PlanNotFound(TKLDevDetectiveError): - """a plan could not be included (likely include name is incorrect)""" +class PlanNotFoundError(TKLDevDetectiveError): + """A plan could not be included - ... + Likely include name is incorrect + """ -class UnknownPlanDirective(TKLDevDetectiveError): - """encountered some unexpected cpp directive in plan""" +class UnknownPlanDirectiveError(TKLDevDetectiveError): + """Encountered some unexpected CPP directive in plan""" - ... +class InvalidPlanError(TKLDevDetectiveError): + """Plan appears to not be valid -class InvalidPlan(TKLDevDetectiveError): - """plan appears to not be valid (mismatched #if* and #endif directives - likely)""" - - ... + Mismatched #if* and #endif directives likely + """ diff --git a/libtkldet/file_util.py b/libtkldet/file_util.py index ce7e30e..468280e 100644 --- a/libtkldet/file_util.py +++ b/libtkldet/file_util.py @@ -15,16 +15,15 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -""" -Utilities relating to classification/linting files -""" -from typing import Optional +"""Utilities relating to classification/linting files""" +def position_from_char_offset(path: str, offset: int) -> tuple[int, int] | None: + """Get column/line from offset into file -def position_from_char_offset(path: str, offset: int) -> Optional[tuple[int, int]]: - """given an offset into a file (decodes), returns the line and column numbers - respectively, expressed as a tuple. If offset is invalid (such as too large - for file) None is returned""" + Given an offset into a file, returns the line and column numbers + respectively, expressed as a tuple. If offset is invalid (such as too + large for file) None is returned + """ line = 0 col = 0 with open(path, "r") as fob: @@ -40,10 +39,13 @@ def position_from_char_offset(path: str, offset: int) -> Optional[tuple[int, int return None -def position_from_byte_offset(path: str, offset: int) -> Optional[tuple[int, int]]: - """given an offset into a file (raw), returns the line and column numbers - respectively, expressed as a tuple. If offset is invalid (such as too large - for file) None is returned""" +def position_from_byte_offset(path: str, offset: int) -> tuple[int, int] | None: + """Get column/line from offset into file in binary mode + + Given an offset into a file (in binary mode), returns the line and column + numbers respectively, expressed as a tuple. If offset is invalid (such as + too large for file) None is returned + """ line = 0 col = 0 with open(path, "rb") as fob: diff --git a/libtkldet/fuzzy.py b/libtkldet/fuzzy.py index ab43256..47560d0 100644 --- a/libtkldet/fuzzy.py +++ b/libtkldet/fuzzy.py @@ -15,17 +15,17 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""very basic fuzzy search""" - -from typing import Optional +"""Very basic fuzzy search""" MAX_DIFF = 3 "words that differ more than MAX_DIFF will not be suggested" def fuzzy_diff(x: str, y: str) -> int: - """given 2 string values, calculate a 'difference' between them, expressed - as an integer""" + """Calculate difference between two strings + + Return value has no objective meaning, only for comparison + """ diff = 0 for i in range(max(len(x), len(y))): if len(x) <= i or len(y) <= i: @@ -35,8 +35,13 @@ def fuzzy_diff(x: str, y: str) -> int: return diff -def fuzzy_suggest(check: str, options: list[str], max_diff=MAX_DIFF) -> Optional[str]: - """given a 'check' value, and a list of valid options, find the option +def fuzzy_suggest( + check: str, + options: list[str], + max_diff: int=MAX_DIFF) -> str | None: + """Suggest a string from given options + + Given a 'check' value, and a list of valid options, find the option closest to the 'check' value, given that it's 'difference' (calculated by 'fuzzy_diff' is less than or equal to max_diff """ diff --git a/libtkldet/hint_extract.py b/libtkldet/hint_extract.py index 00b26d8..5bb3dd3 100644 --- a/libtkldet/hint_extract.py +++ b/libtkldet/hint_extract.py @@ -15,32 +15,34 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -""" -Code for extracting parts of files and annotating them for visualizing lints -""" -from typing import Union +"""Utilities for annotating parts of files""" from . import colors as co H_PAD = 6 # padding (for hint lines to account for line numbers) def extract_line(path: str, row: int) -> str: - """extract a single line from a file""" + """Extract a single line from a file""" with open(path, "r") as fob: for (i, line) in enumerate(fob): - line = line.rstrip() if i == row: - return str(i + 1).rjust(4) + ": " + co.GREEN + line + co.RESET + return ( + str(i + 1).rjust(4) + + ": " + + co.GREEN + + line.rstrip() + + co.RESET + ) return "" def extract_line_col(path: str, row: int, col: int) -> list[str]: - """extract a single line from a file, but also point at a specific column""" + """Annotate line with specific column""" return [extract_line(path, row), co.RED + "^".rjust(col + H_PAD + 1) + co.RESET] def extract_line_cols(path: str, row: int, col_span: tuple[int, int]) -> list[str]: - """extract a single line from a file, but also point at a span of columns""" + """Annotate line with span of columns""" min_col, max_col = col_span return [ extract_line(path, row), @@ -53,12 +55,11 @@ def extract_line_cols(path: str, row: int, col_span: tuple[int, int]) -> list[st def extract_lines(path: str, row_span: tuple[int, int]) -> list[str]: - """extract a multiple lines from a file""" + """Extract a multiple lines from a file""" min_row, max_row = row_span out = [] with open(path, "r") as fob: for (i, line) in enumerate(fob): - line = line.rstrip() if i in (min_row, max_row): out.append( co.RED @@ -66,7 +67,7 @@ def extract_lines(path: str, row_span: tuple[int, int]) -> list[str]: + str(i + 1).rjust(4) + ":" + co.GREEN - + line + + line.rstrip() + co.RESET ) elif min_row < i < max_row: @@ -76,7 +77,7 @@ def extract_lines(path: str, row_span: tuple[int, int]) -> list[str]: + str(i + 1).rjust(4) + ":" + co.GREEN - + line + + line.rstrip() + co.RESET ) return out @@ -85,15 +86,20 @@ def extract_lines(path: str, row_span: tuple[int, int]) -> list[str]: def extract_lines_cols( path: str, row_span: tuple[int, int], col_span: tuple[int, int] ) -> list[str]: - """extract a span of characters in a file over multiple lines""" + """Extract a span of characters in a file over multiple lines""" min_row, max_row = row_span min_col, max_col = col_span out = [] with open(path, "r") as fob: for (i, line) in enumerate(fob): - line = line.rstrip() if min_row <= i <= max_row: - out.append(str(i + 1).rjust(4) + ":" + co.GREEN + line + co.RESET) + out.append( + str(i + 1).rjust(4) + + ":" + + co.GREEN + + line.rstrip() + + co.RESET + ) if i == min_row: out.append(co.RED + "^".rjust(min_col + H_PAD) + co.RESET) elif i > min_row: @@ -112,11 +118,14 @@ def extract_lines_cols( def format_extract( path: str, - row_span: Union[tuple[int, int], int], - col_span: Union[tuple[int, int], int, None], + row_span: tuple[int, int] | int, + col_span: tuple[int, int] | int | None, ) -> list[str]: - """given a row or span of rows and optionally a column or span of columns - return an annotated segment of the specified file""" + """Annotate segment of file + + Given a row or span of rows and optionally a column or span of columns + return an annotated segment of the specified file + """ if isinstance(row_span, tuple) and row_span[0] == row_span[1]: row_span = row_span[0] @@ -140,6 +149,5 @@ def format_extract( if isinstance(col_span, int): return extract_lines(path, row_span) print(row_span, col_span) - raise NotImplementedError( - "some combination of 0/1/more rows/cols is not supported!" - ) + error_message = "some combination of 0/1/more rows/cols is not supported!" + raise NotImplementedError(error_message) diff --git a/libtkldet/linter.py b/libtkldet/linter.py index 3346935..79d4e60 100644 --- a/libtkldet/linter.py +++ b/libtkldet/linter.py @@ -20,7 +20,7 @@ code here provides interface for modules to provide linting """ -from typing import Generator, Type, Optional +from typing import Generator from .classifier import Item, FileItem from .report import Report @@ -40,10 +40,10 @@ class Linter: WEIGHT: int = 100 - ItemType: Type[Item] = Item + ItemType: type[Item] = Item def should_check(self, item: Item) -> bool: - """actually performs check to see if the linter should run on this item. + """Actually performs check to see if the linter should run on this item if `ENABLE_TAGS` is empty, run lint on all items except those that have tags in `DISABLE_TAGS` @@ -71,38 +71,40 @@ def should_check(self, item: Item) -> bool: return False return True - def do_check(self, item: Item) -> Optional[Generator[Report, None, None]]: - """runs lint, if `should_check` returns True, used internally""" + def do_check(self, item: Item) -> Generator[Report, None, None] | None: + """Run lint, if `should_check` returns True, used internally""" if isinstance(item, self.ItemType) and self.should_check(item): return self.check(item) return None def check(self, item: Item) -> Generator[Report, None, None]: - """abstract method, actually runs lint, to be implemented by subclass""" - raise NotImplementedError() + """Actually run lint""" + raise NotImplementedError class FileLinter(Linter): - """ Specific linter that operates only on FileItems """ + """Specific linter that operates only on FileItems""" - ItemType: Type[Item] = FileItem + ItemType: type[Item] = FileItem def check(self, item: Item) -> Generator[Report, None, None]: - raise NotImplementedError() + raise NotImplementedError -_LINTERS: list[Type[Linter]] = [] +_LINTERS: list[type[Linter]] = [] -def register_linter(linter: Type[Linter]): - """registers a linter for use in tkldev-detective, must be called on all - linters added""" +def register_linter(linter: type[Linter]) -> type[Linter]: + """Register a linter + + Must be called on all linters added + """ _LINTERS.append(linter) return linter def get_weighted_linters() -> list[Linter]: - """returnss instances of registered classifiers in order of weight""" + """Return instances of registered classifiers in order of weight""" return sorted( - map(lambda x: x(), _LINTERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) + (x() for x in _LINTERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) ) diff --git a/libtkldet/locator.py b/libtkldet/locator.py index ee4848c..e85f2e8 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -20,14 +20,14 @@ from os.path import join, normpath, basename, isdir, isfile from glob import iglob -from typing import Generator, Optional +from typing import Generator -from .error import ApplianceNotFound +from .error import ApplianceNotFoundError PRODUCTS_DIR = "/turnkey/fab/products" -def is_appliance_path(path: str): +def is_appliance_path(path: str) -> bool: """ is path, a path to an appliance? """ path = normpath(path) if path == join(PRODUCTS_DIR, basename(path)): @@ -35,12 +35,12 @@ def is_appliance_path(path: str): return False -def is_appliance_name(name: str): +def is_appliance_name(name: str) -> bool: """ is name, the name of an existing appliance on tkldev? """ return "/" not in name and isdir(join(PRODUCTS_DIR, name)) -def is_inside_appliance(path: str): +def is_inside_appliance(path: str) -> bool: """ is path, a path to a file inside an appliance """ path = normpath(path) if not path.startswith(PRODUCTS_DIR + "/"): @@ -50,10 +50,13 @@ def is_inside_appliance(path: str): def get_appliance_root(path: str) -> str: - """Given a path to appliance, file inside appliance or appliance name, - return absolute path to the appliance""" + """Get appliance root from path - root: Optional[str] = None + Given a path to appliance, file inside appliance or appliance name, + return absolute path to the appliance + """ + + root: str | None = None if is_appliance_name(path): root = join(PRODUCTS_DIR, path) @@ -65,33 +68,44 @@ def get_appliance_root(path: str) -> str: root = join(PRODUCTS_DIR, appliance_name) if root is None or not isfile(join(root, "Makefile")): - raise ApplianceNotFound( + error_message = ( "input does not appear to be an appliance name, path to an appliance" " or path to a file inside of an appliance" ) + raise ApplianceNotFoundError(error_message) return root def locator(root: str, ignore_non_appliance: bool) -> Generator[str, None, None]: - """yields (pretty much) every file in an appliance of potential concern - or a specific file only if given a path to a file inside an appliance""" + """Yield most files inside appliance + + Yields almost every file in an appliance of potential concern + or a specific file only if given a path to a file inside an appliance + """ if is_appliance_name(root): yield from full_appliance_locator(join(PRODUCTS_DIR, root)) elif is_appliance_path(root): yield from full_appliance_locator(root) elif is_inside_appliance(root) or ignore_non_appliance: - yield root + yield from everything_locator(root) else: - raise ApplianceNotFound( + error_message = ( "input does not appear to be an appliance name, path to an" " appliance or path to a file inside of an appliance" ) + raise ApplianceNotFoundError(error_message) +def everything_locator(root: str) -> Generator[str, None, None]: + """Yield everything, appliance or not""" + if isfile(root): + yield root + else: + yield from iglob(join(root, '**')) def full_appliance_locator(root: str) -> Generator[str, None, None]: - """yields (pretty much) every file in an appliance of potential concern""" - yield from map( - lambda x: join(root, x), ["Makefile", "changelog", "README.rst", "removelist"] + """Yield (pretty much) every file in an appliance of potential concern""" + yield from ( + join(root, x) for x in ["Makefile", "changelog", "README.rst", "removelist"] ) yield from iter_conf(root) yield from iter_plan(root) diff --git a/libtkldet/mkparser.py b/libtkldet/mkparser.py index 505e390..cd9f472 100644 --- a/libtkldet/mkparser.py +++ b/libtkldet/mkparser.py @@ -15,21 +15,27 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""tools to extract variable definitions from makefiles, purpose built for +"""Utilities for parsing and extracting information from makefiles + +Extracts variable definitions from makefiles, purpose built for fab tool-chain on tkldev, so ignores tests & definitions, butchers most -functions, doesn't understand targets, makes more unspoken assumptions and probably -produces a lot of other erroneous output (if used for general makefile parsing) +functions, doesn't understand targets, makes more unspoken assumptions and +probably produces a lot of other erroneous output (if used for general +makefile parsing) """ -from typing import Optional, Union import typing from dataclasses import dataclass import os ASSIGNMENT_OPERATORS = ["?=", ":=", "+=", "="] CHECKS = ["ifeq", "ifneq", "ifdef", "ifndef"] -MAKEFILE_ENV = {"FAB_PATH": os.environ.get("FAB_PATH", '/turnkey/fab'), "FAB_SHARE_PATH": "/usr/share/fab"} +MAKEFILE_ENV = { + "FAB_PATH": os.environ.get("FAB_PATH", '/turnkey/fab'), + "FAB_SHARE_PATH": "/usr/share/fab" +} def split_value(raw: str) -> list[str]: + """Split value by space""" chunks = [''] bracket_depth = 0 for c in raw: @@ -48,8 +54,10 @@ def split_value(raw: str) -> list[str]: return chunks -def parse_assignment(line: str) -> Optional[tuple[str, str, str]]: - """attempt to parse a makefile assignment operation, +def parse_assignment(line: str) -> tuple[str, str, str] | None: + """Parse assignment line + + Attempt to parse a makefile assignment operation, if successful return tuple of (variable_name, operator, variable_value) """ for operator in ASSIGNMENT_OPERATORS: @@ -63,7 +71,7 @@ def parse_assignment(line: str) -> Optional[tuple[str, str, str]]: @dataclass class CommonFabBuildData: - "holds lists of paths of each component type included from common" + """Hold lists of paths of each component type included from common""" overlays: list[str] conf: list[str] @@ -72,20 +80,24 @@ class CommonFabBuildData: @dataclass class LazyVar: - "a value referencing a variable we havn't resolved yet" + """A value referencing a variable we haven't resolved yet""" + name: str -ValueList = list[Union[str, LazyVar]] +ValueList = list[str | LazyVar] @dataclass class MutMakefileData: - """holds variables set by makefiles""" + """Hold variables set by makefiles""" variables: dict[str, ValueList] included: list[str] def resolve_var(self, value: str) -> ValueList: - """expand make variables, env variables and split into multiple values""" + """Expand variables + + Expands makefile and env variables, then split into multiple values + """ out_var: list[str | LazyVar] = [] if value.startswith("$(") and value.endswith(")"): @@ -99,8 +111,8 @@ def resolve_var(self, value: str) -> ValueList: out_var.extend(split_value(value)) return out_var - def assign_var(self, name: str, operator: str, values: str): - """process a variable assignment""" + def assign_var(self, name: str, operator: str, values: str) -> None: + """Process a variable assignment""" if operator == "+=": # add to existing definition if name not in self.variables: @@ -120,18 +132,22 @@ def assign_var(self, name: str, operator: str, values: str): for value in split_value(values): self.variables[name].extend(self.resolve_var(value)) else: - raise ValueError(f"unknown operator {operator!r}") + error_message = f"unknown operator {operator!r}" + raise ValueError(error_message) + + def finish(self) -> 'MakefileData': + """Return concrete class - def finish(self): - ''' resolve unresolved variables and return a concrete version of this - class with simpler typing ''' + Resolve unresolved variables and return a concrete version of this + class with simpler typing + """ # variables in make are often not resolved immediately, and such the # actual value of a variable may not be available until parsing has # finished # # furthermore values may resolve to other variables that also have not # yet been resolved and so on. - # + # # smart ways of handling this include chains of dependent variables or # handling the semantic difference between `=`, `:=` and similar # operations. @@ -160,21 +176,27 @@ class with simpler typing ''' new_variables = {key: list(values) for key, values in self.variables.items()} new_included = list(self.included) - return MakefileData(typing.cast(dict[str, list[str]], new_variables), new_included) + return MakefileData( + typing.cast(dict[str, list[str]], new_variables), + new_included + ) @dataclass class MakefileData(MutMakefileData): - """holds variables set by makefiles""" + """Holds variables set by makefiles""" variables: dict[str, list[str]] included: list[str] def __getitem__(self, key: str) -> list[str]: + """Get a variable by name""" return self.variables[key] def to_fab_data(self) -> CommonFabBuildData: - """return just the high level data relating to included overlays, conf - and removelists""" + """Return high level appliance data + + Returns included overlays, conf and removelists + """ return CommonFabBuildData( overlays=[*self["COMMON_OVERLAYS"]], conf=[*self["COMMON_CONF"]], @@ -183,18 +205,24 @@ def to_fab_data(self) -> CommonFabBuildData: ) def to_dict(self) -> dict: + """Return contents as a dictionary""" return { 'variables': self.variables, 'included': self.included } -def parse_makefile( - path: str, makefile_data: Optional[MakefileData] = None -) -> MakefileData: - """attempts to naively get all variables defined in makefile tree. This +# ignore warnings about complexity, this is just a complex job and breaking it +# down further would only obfuscate what it's doing. +def parse_makefile( # noqa: C901, PLR0912 + path: str, makefile_data: MakefileData | None = None + ) -> MakefileData: + """Get all variables in makefile including included makefiles + + Attempts to naively get all variables defined in makefile tree. This function is recursive and makefile_data is used when including other - makefiles""" + makefiles + """ if makefile_data is None: makefile_data = MakefileData({}, []) diff --git a/libtkldet/modman.py b/libtkldet/modman.py index db650a8..947995f 100644 --- a/libtkldet/modman.py +++ b/libtkldet/modman.py @@ -30,9 +30,14 @@ MOD_PATH = [ dirname(dirname(abspath(__file__))), '/usr/share/tkldev-detective'] -def _load_all_modules_from_dir(root: str): +def _load_all_modules_from_dir(root: str) -> None: print( - co.BRIGHT_BLACK + co.BOLD + "load all modules from", root + co.RESET, file=sys.stderr + co.BRIGHT_BLACK + + co.BOLD + + "load all modules from", + root + + co.RESET, + file=sys.stderr ) root = abspath(root) for filename in listdir(root): @@ -45,13 +50,21 @@ def _load_all_modules_from_dir(root: str): assert spec.loader is not None spec.loader.exec_module(module) - print(co.BRIGHT_BLACK + co.BOLD + "loaded", spec.name + co.RESET, file=sys.stderr) + print( + co.BRIGHT_BLACK + + co.BOLD + + "loaded", + spec.name + + co.RESET, + file=sys.stderr + ) -def load_modules(): - """load all tkldev-detective modules""" +def load_modules() -> None: + """Load all tkldev-detective modules""" for _path in (join(x, "tkldet_modules") for x in MOD_PATH): if exists(_path): _load_all_modules_from_dir(_path) return - raise TKLDevDetectiveError(f"Mod path 'tkldet_modules' not found - tried {MOD_PATH}") + error_message = f"Mod path 'tkldet_modules' not found - tried {MOD_PATH}" + raise TKLDevDetectiveError(error_message) diff --git a/libtkldet/plan_resolve.py b/libtkldet/plan_resolve.py index 1dff88f..e533e05 100644 --- a/libtkldet/plan_resolve.py +++ b/libtkldet/plan_resolve.py @@ -19,8 +19,9 @@ from os.path import join, isfile from dataclasses import dataclass -from typing import Optional -from .error import PlanNotFound, UnknownPlanDirective, InvalidPlan +from .error import ( + PlanNotFoundError, UnknownPlanDirectiveError, InvalidPlanError +) static_vars = {"KERNEL": "", "DEBIAN": "", "AMD64": ""} @@ -45,7 +46,7 @@ class PlanEntry: """ def get_plan_path(self) -> str: - """path to plan file which contains this package""" + """Path to plan file which contains this package""" return self.include_stack[-1] @@ -55,13 +56,11 @@ def _include_plan( for path in include_paths: if isfile(join(path, name)): return _parse_plan(join(path, name), include_paths, plan_stack) - raise PlanNotFound(name) + raise PlanNotFoundError(name) def _remove_multiline_comments(raw: str) -> str: - """ - removes multiline cpp comments (in the form /* I'm a comment */) - """ + """Remove multiline cpp comments (in the form /* I'm a comment */)""" out = "" comment_depth = 0 @@ -76,28 +75,35 @@ def _remove_multiline_comments(raw: str) -> str: elif char == "*": comment_begun = True - else: - if comment_begun: - if char == "*": - comment_depth += 1 - else: - out += "/" + char - comment_begun = False - elif char == "/": - comment_begun = True + elif comment_begun: + if char == "*": + comment_depth += 1 else: - out += char + out += "/" + char + comment_begun = False + elif char == "/": + comment_begun = True + else: + out += char return out -def _parse_plan( - path: str, include_paths: list[str], plan_stack: Optional[list[str]] = None +# ignoring lints in this function: +# - C901 (too complex), breaking this down further +# would obfuscate what it does +# - PLW0912 (too many branches), as above +# - PLW2901 (iteration variable overwritten), variable's meaning does not +# change with overwrite. + +def _parse_plan( # noqa: C901, PLR0912 + path: str, include_paths: list[str], plan_stack: list[str] | None = None ) -> list[PlanEntry]: - """Parse a plan (uses cpp, but notably does not use *most* cpp - functionality). + """Parse a plan - This code will not work on *most* cpp related projects""" + (uses cpp, but notably does not use *most* cpp functionality). + This code will not work on *most* cpp related projects + """ if plan_stack is None: plan_stack = [path] @@ -122,22 +128,22 @@ def _parse_plan( for line in data.splitlines(): # remove single line comment if "//" in line: - line = line.split("//", 1)[0] + line = line.split("//", 1)[0] # noqa: PLW2901 # honestly would've thought hashes in cpp code wouldn't work like this, # but apparently it does if not line.startswith("#") and "#" in line: - line = line.split("#", 1)[0] + line = line.split("#", 1)[0] # noqa: PLW2901 - line = line.strip() + line = line.strip() # noqa: PLW2901 if not line: continue if line.startswith("#endif"): if not cond_stack: - raise InvalidPlan( - f"unbalanced #if* and #endif directives in plan {path}" - ) + error_message = \ + f"unbalanced #if* and #endif directives in plan {path}" + raise InvalidPlanError(error_message) cond_stack.pop() continue @@ -160,7 +166,7 @@ def _parse_plan( ) ) elif line.startswith("#"): - raise UnknownPlanDirective(line) + raise UnknownPlanDirectiveError(line) else: assert "=" not in line, "assumption broken: '=' in plan" packages.append(PlanEntry(line.strip(), plan_stack[:])) @@ -174,5 +180,5 @@ def _parse_plan( def parse_plan(path: str) -> list[PlanEntry]: - """parse a plan and return a plan entry for each package """ + """Parse a plan and return a plan entry for each package""" return _parse_plan(path, ["/turnkey/fab/common/plans"]) diff --git a/libtkldet/report.py b/libtkldet/report.py index 68406ae..2220c2a 100644 --- a/libtkldet/report.py +++ b/libtkldet/report.py @@ -15,12 +15,14 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""Encapsulates "reports", these are issues, warnings or notes about "Item"s -produced by "Linter"s""" +"""Encapsulates "reports" + +these are issues, warnings or notes about "Item"s produced by "Linter"s +""" from dataclasses import dataclass from enum import Enum import enum -from typing import Union, Generator, Type, Iterable +from typing import Generator, Iterable import textwrap from .classifier import Item, FileItem @@ -30,15 +32,18 @@ @dataclass class Replacement: - ''' Holds replacement data, a list of replacements in form provided by - shellcheck ''' + """Holds replacement data + + a list of replacements in form provided by linters + """ + begin_line: int end_line: int replacement: list[str] class ReportLevel(Enum): - """represents a "level" of report, from information through hard issues""" + """Represents a "level" of report, from information through hard issues""" # report represents some information that is not particularly important INFO = enum.auto() @@ -46,38 +51,38 @@ class ReportLevel(Enum): # report represents something that doesn't conform to some convention CONVENTION = enum.auto() - # report represents a probable issue with readability, design anti-pattern, complexity, etc. + # report represents a probable issue with readability, design + # anti-pattern, complexity, etc. REFACTOR = enum.auto() - # report represents a probable issue with code correctness, uses inconsistent, error-prone, - # deprecated or otherwise non-ideal functionality that could be improved + # report represents a probable issue with code correctness, uses + # inconsistent, error-prone, deprecated or otherwise non-ideal + # functionality that could be improved WARN = enum.auto() - # report represents a serious issue with code. It is syntactically invalid or similarly incorrect + # report represents a serious issue with code. It is syntactically invalid + # or similarly incorrect ERROR = enum.auto() - # report represents a possible or confirmed security issue and fix should be seriously considered + # report represents a possible or confirmed security issue and fix should + # be seriously considered SECURITY = enum.auto() def ansi_color_code(self) -> str: - """returns an ansi escape code for color, for each level""" - if self == self.INFO: - return co.CYAN - if self == self.CONVENTION: - return co.CYAN - if self == self.REFACTOR: - return co.YELLOW - if self == self.WARN: - return co.YELLOW - if self == self.ERROR: - return co.RED - if self == self.SECURITY: - return co.RED - return "" + """Return an ansi escape code for color, for each level""" + colors = { + self.INFO: co.CYAN, + self.CONVENTION: co.CYAN, + self.REFACTOR: co.YELLOW, + self.WARN: co.YELLOW, + self.ERROR: co.RED, + self.SECURITY: co.RED, + } + return colors.get(self, "") def parse_report_level(raw: str) -> ReportLevel: - """parse a string into a ReportLevel""" + """Parse a string into a ReportLevel""" raw = raw.lower() if raw in ("i", "info", "note", "message"): return ReportLevel.INFO @@ -91,12 +96,14 @@ def parse_report_level(raw: str) -> ReportLevel: return ReportLevel.ERROR if raw in ("s", "security"): return ReportLevel.SECURITY - raise ValueError(f'couldn\'t parse unknown report level by name "{raw}"') + error_message = f'couldn\'t parse unknown report level by name "{raw}"' + raise ValueError(error_message) @dataclass(frozen=True) class Report: - """ + """Information to be presented to user + Holds all information about a particular issue in a particular location possibly including metadata, possible fixes, severity, which linter created the report, etc. @@ -114,7 +121,7 @@ class Report: item: Item "metadata on location of issue (path, tags, etc.)" - location_metadata: Union[str, None] + location_metadata: str | None """ location metadata (optional, can be anything such as function name, header section, etc, etc, etc.) @@ -123,7 +130,7 @@ class Report: message: str "message describing issue" - fix: Union[str, None] + fix: str | None "message describing fix (might be empty)" source: str @@ -132,11 +139,11 @@ class Report: level: ReportLevel "error, warning, etc." - raw: Union[dict, None] = None + raw: dict | None = None "raw data, format depends on `source`, not guaranteed to be set" def to_dict(self) -> dict: - """ return this object as dictionary """ + """Return this object as dictionary""" return { "item": self.item, "location_metadata": self.location_metadata, @@ -147,15 +154,18 @@ def to_dict(self) -> dict: "raw": self.raw, } - def modified(self, **kwargs) -> "Report": - """return a copy of this report with fields specified in - `kwargs` replacing fields from this report""" + def modified(self, **kwargs: Item | str | dict | None) -> "Report": + """Return new modified version of this report + + Return a copy of this report with fields specified in `kwargs` + replacing fields from this report + """ data = self.to_dict() data.update(kwargs) return self.__class__(**data) def format(self, suggested_fix: bool = True) -> str: - """formats report for terminal output and returns as a string""" + """Format report for terminal output and return as a string""" out = "| " out += f"{self.level.ansi_color_code()}{self.level.name} " @@ -166,7 +176,6 @@ def format(self, suggested_fix: bool = True) -> str: for line in wrapped_lines: out += self.level.ansi_color_code() + line + co.RESET + "\n" - # out += f"{self.message}{co.RESET}\n" if self.fix and suggested_fix: out += f"{co.CYAN}suggested fix: {self.fix}{co.RESET}\n" return out @@ -174,21 +183,22 @@ def format(self, suggested_fix: bool = True) -> str: @dataclass(frozen=True) class FileReport(Report): - """ + """Holds information about a report for a file + Holds all information about a particular issue in a particular location in a particular file possibly including metadata, possible fixes, severity, which linter created the report, etc. """ - line: Union[int, tuple[int, int], None] = None + line: int | tuple[int, int] | None = None "line or (begin_line, end_line) issue occurs" - column: Union[int, tuple[int, int], None] = None + column: int | tuple[int, int] | None = None "column or (begin_column, end_column) issue occurs" def format(self, suggested_fix: bool = True) -> str: - """formats report for terminal output and returns as a string""" - out = super().format(False) + """Format report for terminal output and return as a string""" + out = super().format(suggested_fix=False) if isinstance(self.item, FileItem): if self.line: out += f"@{self.item.relpath} +{self.line}\n" @@ -216,6 +226,7 @@ def format(self, suggested_fix: bool = True) -> str: return out.rstrip() def to_dict(self) -> dict: + """Return object as dictionary""" return { "item": self.item, "location_metadata": self.location_metadata, @@ -230,42 +241,48 @@ def to_dict(self) -> dict: class ReportFilter: - """Last stop before presenting to the user, report filters can modify, - split, generate or even remove reports""" + """A filter to change reports before presenting + + Last stop before presenting to the user, report filters can modify, + split, generate or even remove reports + """ WEIGHT: int = 100 def filter(self, report: Report) -> Generator[Report, None, None]: - """given a report filter or modify it + """Given a report filter or modify it - there doesn't need to be a 1-1 relationship between inputs and outputs + There doesn't need to be a 1-1 relationship between inputs and outputs - reports will be given to this function, and the reports it yields will + Reports will be given to this function, and the reports it yields will be fed to all remaining filters, after all processing they will be presented to the user """ - raise NotImplementedError() + raise NotImplementedError + +_FILTERS: list[type[ReportFilter]] = [] -_FILTERS: list[Type[ReportFilter]] = [] +def register_filter(filt: type[ReportFilter]) -> type[ReportFilter]: + """Register a report filter -def register_filter(filt: Type[ReportFilter]): - """registers a report filter for use in tkldev-detective, must be called on - all filters added""" + Must be called on all filters added + """ _FILTERS.append(filt) return filt def get_weighted_filters() -> list[ReportFilter]: - """returns instances of registered filterss in order of weight""" + """Return instances of registered filters in order of weight""" return sorted( - map(lambda x: x(), _FILTERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) + (x() for x in _FILTERS), + key=lambda x: (x.WEIGHT, x.__class__.__name__) ) def filter_all_reports(reports: Iterable[Report]) -> Generator[Report, None, None]: - """filters all reports through all filters in order of weight""" + """Filter all reports through all filters in order of weight""" filters = get_weighted_filters() for report in reports: diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 diff --git a/tkldet_modules/appliance_confd.py b/tkldet_modules/appliance_confd.py index 0d2a072..0161b36 100644 --- a/tkldet_modules/appliance_confd.py +++ b/tkldet_modules/appliance_confd.py @@ -14,7 +14,10 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Generator + +"""Lints for appliance conf.d/* scripts""" + +from typing import Generator, ClassVar import stat import os @@ -24,8 +27,8 @@ @register_linter class ApplianceConfDLinter(FileLinter): - ENABLE_TAGS: set[str] = {"appliance-conf.d"} - DISABLE_TAGS: set[str] = set() + ENABLE_TAGS: ClassVar[set[str]] = {"appliance-conf.d"} + DISABLE_TAGS: ClassVar[set[str]] = set() def check(self, item: FileItem) -> Generator[Report, None, None]: mode = os.lstat(item.abspath).st_mode diff --git a/tkldet_modules/appliance_files.py b/tkldet_modules/appliance_files.py index 84fd1cf..577a250 100644 --- a/tkldet_modules/appliance_files.py +++ b/tkldet_modules/appliance_files.py @@ -14,61 +14,79 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . +"""Classifiers for appliance specific files""" + from libtkldet.classifier import ( ExactPathClassifier, SubdirClassifier, register_classifier, ) - +from typing import ClassVar @register_classifier class ApplianceMakefileClassifier(ExactPathClassifier): - path: str = "Makefile" - tags: list[str] = ["appliance-makefile"] + """Classifies appliance Makefile""" + + path: ClassVar[str] = "Makefile" + tags: ClassVar[list[str]] = ["appliance-makefile"] @register_classifier class ApplianceConfDClassifier(SubdirClassifier): - path: str = "conf.d" - recursive: bool = False - tags: list[str] = ["appliance-conf.d"] + """Classifies appliance conf.d scripts""" + + path: ClassVar[str] = "conf.d" + recursive: ClassVar[bool] = False + tags: ClassVar[list[str]] = ["appliance-conf.d"] @register_classifier class ApplianceOverlayClassifier(SubdirClassifier): - path: str = "overlay" - recursive: bool = True - tags: list[str] = ["appliance-overlay"] + """Classifies appliance overlay files""" + + path: ClassVar[str] = "overlay" + recursive: ClassVar[bool] = True + tags: ClassVar[list[str]] = ["appliance-overlay"] @register_classifier class AppliancePlanClassifier(SubdirClassifier): - path: str = "plan" - recursive: bool = False - tags: list[str] = ["appliance-plan"] + """Classifies appliance plans""" + + path: ClassVar[str] = "plan" + recursive: ClassVar[bool] = False + tags: ClassVar[list[str]] = ["appliance-plan"] @register_classifier class ApplianceInithookFirstbootClassifier(SubdirClassifier): - path: str = "overlay/usr/lib/inithooks/firstboot.d" - recursive: bool = False - tags: list[str] = ["appliance-inithook-firstboot"] + """Classifies appliance firstboot inithooks""" + + path: ClassVar[str] = "overlay/usr/lib/inithooks/firstboot.d" + recursive: ClassVar[bool] = False + tags: ClassVar[list[str]] = ["appliance-inithook-firstboot"] @register_classifier class ApplianceInithookBinClassifier(SubdirClassifier): - path: str = "overlay/usr/lib/inithooks/bin" - recursive: bool = False - tags: list[str] = ["appliance-inithook-bin"] + """Classifies appliance inithooks bin scripts""" + + path: ClassVar[str] = "overlay/usr/lib/inithooks/bin" + recursive: ClassVar[bool] = False + tags: ClassVar[list[str]] = ["appliance-inithook-bin"] @register_classifier class ApplianceReadmeClassifier(ExactPathClassifier): - path: str = "README.rst" - tags: list[str] = ["appliance-readme"] + """Classifies appliance readme""" + + path: ClassVar[str] = "README.rst" + tags: ClassVar[list[str]] = ["appliance-readme"] @register_classifier class ApplianceChangelogClassifier(ExactPathClassifier): - path: str = "changelog" - tags: list[str] = ["appliance-readme"] + """Classifies appliance changelog""" + + path: ClassVar[str] = "changelog" + tags: ClassVar[list[str]] = ["appliance-readme"] diff --git a/tkldet_modules/appliance_makefile.py b/tkldet_modules/appliance_makefile.py index 713c040..f2ad828 100644 --- a/tkldet_modules/appliance_makefile.py +++ b/tkldet_modules/appliance_makefile.py @@ -14,7 +14,10 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Generator + +"""Linters for appliance makefile""" + +from typing import Generator, ClassVar from libtkldet.linter import FileLinter, register_linter, FileItem from libtkldet.report import Report, FileReport, ReportLevel @@ -23,15 +26,17 @@ @register_linter class ApplianceMakefileLinter(FileLinter): - ENABLE_TAGS: set[str] = {"appliance-makefile"} - DISABLE_TAGS: set[str] = set() + """Linter for appliance makefile""" + + ENABLE_TAGS: ClassVar[set[str]] = {"appliance-makefile"} + DISABLE_TAGS: ClassVar[set[str]] = set() def check(self, item: FileItem) -> Generator[Report, None, None]: - MK_CONFVARS = ["COMMON_CONF", "COMMON_OVERLAYS"] + mk_confvars = ["COMMON_CONF", "COMMON_OVERLAYS"] with open("/turnkey/fab/common/mk/turnkey.mk", "r") as fob: for line in fob: if line.startswith("CONF_VARS += "): - MK_CONFVARS.extend(line.strip().split()[2:]) + mk_confvars.extend(line.strip().split()[2:]) in_define = False first_include = None @@ -57,15 +62,15 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: var = line.split("+=", 1)[0].strip() else: var = line.split("=", 1)[0].strip() - if var not in MK_CONFVARS: - suggested_var = fuzzy_suggest(var, MK_CONFVARS) + if var not in mk_confvars: + suggested_var = fuzzy_suggest(var, mk_confvars) if suggested_var: fix = f"did you mean {suggested_var!r} instead of {var!r} ?" else: fix = ( - f"either replace with one of {MK_CONFVARS} or add it to" + f"either replace with one of {mk_confvars} or add it to" + " turnkey.mk's list of valid CONF_VARS", ) - " turnkey.mk's list of valid CONF_VARS", yield FileReport( item=item, line=i + 1, diff --git a/tkldet_modules/filetype.py b/tkldet_modules/filetype.py index f95d150..5e16189 100644 --- a/tkldet_modules/filetype.py +++ b/tkldet_modules/filetype.py @@ -14,14 +14,49 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . +"""General file classification""" + from libtkldet.classifier import FileClassifier, FileItem, register_classifier from os.path import splitext, isfile - +from typing import ClassVar @register_classifier class FiletypeClassifier(FileClassifier): - WEIGHT = 10 + """Classify files by extension""" + + WEIGHT: ClassVar[int] = 10 - def classify(self, item: FileItem): + def classify(self, item: FileItem) -> None: if isfile(item.abspath) and "." in item.value: item.add_tags(self, [f"ext:{splitext(item.value)[1][1:]}"]) + + +@register_classifier +class ShebangClassifier(FileClassifier): + """Classify files by shebang""" + + WEIGHT: ClassVar[int] = 10 + + def classify(self, item: FileItem) -> None: + if isfile(item.abspath): + other_parts = [] + with open(item.abspath, "rb") as fob: + shebang = b"" + head = fob.read(512) + + if b"\n" in head: + shebang = head.split(b"\n")[0].strip() + if shebang: + other_parts = shebang.split() + shebang = other_parts.pop(0) + + other_parts = [part.decode() for part in other_parts] + shebang = shebang.decode().strip() + + if shebang.startswith("#!"): + if shebang == '#!/usr/bin/env': + item.add_tags(self, [ + f"shebang:{shebang[2:]} {other_parts[0]}" + ]) + else: + item.add_tags(self, [f"shebang:{shebang[2:]}"]) diff --git a/tkldet_modules/json_check.py b/tkldet_modules/json_check.py index cf449ca..40992fe 100644 --- a/tkldet_modules/json_check.py +++ b/tkldet_modules/json_check.py @@ -14,8 +14,11 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . + +"""Json related linters""" + import json -from typing import Generator +from typing import Generator, ClassVar from libtkldet.linter import FileLinter, FileItem, register_linter from libtkldet.report import Report, FileReport, ReportLevel @@ -23,10 +26,12 @@ @register_linter class JsonLinter(FileLinter): - ENABLE_TAGS: set[str] = { + """Tries to load json, lints if errors are produced""" + + ENABLE_TAGS: ClassVar[set[str]] = { "ext:json", } - DISABLE_TAGS: set[str] = set() + DISABLE_TAGS: ClassVar[set[str]] = set() def check(self, item: FileItem) -> Generator[Report, None, None]: with open(item.abspath, "r") as fob: diff --git a/tkldet_modules/missing_module_filter.py b/tkldet_modules/missing_module_filter.py index 8dfb458..f6b62e2 100644 --- a/tkldet_modules/missing_module_filter.py +++ b/tkldet_modules/missing_module_filter.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Generator, Optional +from typing import Generator from libtkldet.report import Report, ReportLevel, register_filter, ReportFilter from libtkldet.linter import FileItem @@ -27,7 +27,7 @@ MISSING_MODULE_RE = re.compile(r"^Unable to import '(.*)'$") -def filter_packaged(report: Report, module_name: str) -> Optional[Report]: +def filter_packaged(report: Report, module_name: str) -> Report | None: packages = find_python_package_from_import(module_name) modified_fix = report.fix or "" @@ -48,7 +48,7 @@ def filter_packaged(report: Report, module_name: str) -> Optional[Report]: modified_level = ReportLevel.INFO modified_message += ( f' ("{package}" likely provides this ' - + "module and will be installed at build time)" + "module and will be installed at build time)" ) package_installed = True break diff --git a/tkldet_modules/pylint.py b/tkldet_modules/pylint.py index b8f2b37..bdedd89 100644 --- a/tkldet_modules/pylint.py +++ b/tkldet_modules/pylint.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . import json -from typing import Generator +from typing import Generator, ClassVar import subprocess from os.path import join, dirname, abspath @@ -28,18 +28,21 @@ @register_linter class PyLinter(FileLinter): - ENABLE_TAGS: set[str] = { + ENABLE_TAGS: ClassVar[set[str]] = { "ext:py", "shebang:/usr/bin/python", "shebang:/usr/bin/python3", "shebang:/usr/bin/python3.9", + "shebang:/usr/bin/env python", + "shebang:/usr/bin/env python3", + "shebang:/usr/bin/env python3.9", } - DISABLE_TAGS: set[str] = set() + DISABLE_TAGS: ClassVar[set[str]] = set() def check(self, item: FileItem) -> Generator[Report, None, None]: for report in json.loads( subprocess.run( - ["pylint", item.abspath, "-f", "json", "--rcfile", rcfile], + ["/usr/bin/pylint", item.abspath, "-f", "json", "--rcfile", rcfile], capture_output=True, text=True, ).stdout diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 45fd9a7..26c1937 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -214,14 +214,15 @@ D200 = 'CONVENTION', # one line docstring not on one line D201 = None, # (conflicts D211) blank line before func docstring - D202 = 'CONVENTION', # blank line after func docstring + D202 = None, # blank line after func docstring D203 = 'CONVENTION', # missing blank line before class docstring D204 = 'CONVENTION', # missing blank line after class docstring - D206 = 'CONVENTION', # missing blank line between summary and description in docstring + D205 = 'CONVENTION', # missing blank line between summary and description in docstring + D206 = 'CONVENTION', # docstring should be indented with spaces not tabs D207 = 'CONVENTION', # under-indented docstring D208 = 'CONVENTION', # over-indented docstring D209 = 'CONVENTION', # multi-line docstring should close on seperate line - D210 = 'CONVENTION', # whitespace surrounding docstring text + D210 = None, # whitespace surrounding docstring text D211 = 'CONVENTION', # blank line before class docstring D212 = None, # (conflicts D213) multi-line summary not on first line D213 = 'CONVENTION', # docstring multi-line summary not on first line @@ -1037,7 +1038,7 @@ PLW0406 = 'WARN', # module imports itself PLW0602 = 'WARN', # global variable not assigned - PLW0603 = 'REFACTOR', # global variable updated in function + PLW0603 = None, # global variable updated in function PLW0604 = 'REFACTOR', # redundant global variable at module level PLW0642 = 'REFACTOR', # reassignment of `self` or `class` @@ -1219,6 +1220,9 @@ class RuffLinter(FileLinter): "shebang:/usr/bin/python", "shebang:/usr/bin/python3", "shebang:/usr/bin/python3.9", + "shebang:/usr/bin/env python", + "shebang:/usr/bin/env python3", + "shebang:/usr/bin/env python3.9", } DISABLE_TAGS: set[str] = set() diff --git a/tkldet_modules/shebang.py b/tkldet_modules/shebang.py deleted file mode 100644 index 365c2a6..0000000 --- a/tkldet_modules/shebang.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Turnkey GNU/Linux -# -# this file is part of tkldev-detective. -# -# tkldev-detective is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# tkldev-detective is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# tkldev-detective. If not, see . -from libtkldet.classifier import FileClassifier, FileItem, register_classifier -from os.path import isfile - - -@register_classifier -class ShebangClassifier(FileClassifier): - WEIGHT = 10 - - def classify(self, item: FileItem): - if isfile(item.abspath): - with open(item.abspath, "rb") as fob: - shebang = b"" - head = fob.read(512) - - if b"\n" in head: - shebang = head.split(b"\n")[0].strip() - if shebang: - shebang = shebang.split()[0].strip() - shebang = shebang.decode() - - if shebang.startswith("#!"): - item.add_tags(self, [f"shebang:{shebang[2:]}"]) diff --git a/tkldet_modules/shellcheck.py b/tkldet_modules/shellcheck.py index c1b0d29..8414ccb 100644 --- a/tkldet_modules/shellcheck.py +++ b/tkldet_modules/shellcheck.py @@ -23,14 +23,13 @@ from libtkldet.apt_file import is_installed if is_installed("shellcheck"): - def insert_str(v: str, i: int, instr: str): + def insert_str(v: str, i: int, instr: str) -> str: return v[:i] + instr + v[i:] def expand_lines(lines: list[str]) -> Generator[str, None, None]: for line in lines: - for subline in line.splitlines(): - yield subline + yield from line.splitlines() def format_replacement( diff --git a/tkldev-detective b/tkldev-detective index c8f67c0..31bf3ec 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -24,7 +24,7 @@ from libtkldet import locator, modman, colors from libtkldet.report import Report, filter_all_reports import libtkldet import libtkldet.error -from libtkldet.error import ApplianceNotFound +from libtkldet.error import ApplianceNotFoundError import libtkldet.classifier import libtkldet.linter @@ -35,7 +35,7 @@ def perform_lint( libtkldet.initialize(root_path, ignore_non_appliance) try: root = locator.get_appliance_root(root_path) - except ApplianceNotFound: + except ApplianceNotFoundError: if not ignore_non_appliance: raise else: From 9d78c5508d7213175dc7208d97cbbe5db14c2ce6 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Wed, 4 Sep 2024 06:02:31 +0000 Subject: [PATCH 07/14] Add ignore & non-appliance lint fix - adds special case tag "ignore:*" which causes "items" to be ignored - fixes non-recursive location of non-appliance files --- libtkldet/classifier.py | 9 ++++++++ libtkldet/locator.py | 2 +- tkldet_modules/to_ignore.py | 42 +++++++++++++++++++++++++++++++++++++ tkldev-detective | 7 +++++++ 4 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 tkldet_modules/to_ignore.py diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index c471379..572a523 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -57,6 +57,15 @@ def add_tags(self, classifier: "Classifier", tags: Iterable[str]) -> None: self._tags[name] = set() self._tags[name].update(tags) + def has_tag(self, tag: str) -> bool: + """Check if item contains a given tag""" + return tag in self.tags + + def has_tag_type(self, tag_type: str) -> bool: + """Checks if item contains a variant tag of a given type""" + check = tag_type + ':' + return any(tag.startswith(check) for tag in self.tags) + def pretty_print(self) -> None: """Show item value as well as tags""" print(f"{self.value}") diff --git a/libtkldet/locator.py b/libtkldet/locator.py index e85f2e8..2328c94 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -100,7 +100,7 @@ def everything_locator(root: str) -> Generator[str, None, None]: if isfile(root): yield root else: - yield from iglob(join(root, '**')) + yield from iglob(join(root, '**'), recursive=True) def full_appliance_locator(root: str) -> Generator[str, None, None]: """Yield (pretty much) every file in an appliance of potential concern""" diff --git a/tkldet_modules/to_ignore.py b/tkldet_modules/to_ignore.py new file mode 100644 index 0000000..cfe24a7 --- /dev/null +++ b/tkldet_modules/to_ignore.py @@ -0,0 +1,42 @@ +# Copyright (c) Turnkey GNU/Linux +# +# this file is part of tkldev-detective. +# +# tkldev-detective is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# tkldev-detective is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# tkldev-detective. If not, see . +"""For marking files which should be ignored""" + +from libtkldet.classifier import FileClassifier, FileItem, register_classifier +import os.path +from typing import ClassVar + +def is_or_has_ancestor_dir(path, directory): + """Checks if path has an ancestor directory with a given name""" + while path not in ('/', ''): + path, path_segment = os.path.split(path) + if path_segment == directory: + return True + return False + +@register_classifier +class FiletypeClassifier(FileClassifier): + """Classify files by a parent directory""" + + WEIGHT: ClassVar[int] = 5 + + def classify(self, item: FileItem) -> None: + + if is_or_has_ancestor_dir(item.abspath, '__pycache__'): + item.add_tags(self, ['ignore:__pycache__']) + if is_or_has_ancestor_dir(item.abspath, '.git'): + item.add_tags(self, ['ignore:.git']) diff --git a/tkldev-detective b/tkldev-detective index 31bf3ec..044b37f 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -48,8 +48,15 @@ def perform_lint( relpath=relpath(path, start=root), abspath=abspath(path), ) + ignore = False for classifier in all_classifiers: classifier.classify(item) + if item.has_tag_type('ignore'): + ignore = True + break + if ignore: + continue + if dump_tags: item.pretty_print() if not skip_lint: From 3180e00b525eadf133e1adad5a6a4584e2dd4750 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Thu, 5 Sep 2024 03:53:00 +0000 Subject: [PATCH 08/14] Configurable logging + bugfix - Makes logging configurable. - Fixes bug when linting non-appliance, where hidden files and directories are omitted. - Improve logging of ignored items --- libtkldet/classifier.py | 12 ++++++++++-- libtkldet/locator.py | 16 +++++++++++++--- libtkldet/mkparser.py | 2 +- tkldet_modules/filetype.py | 26 +++++++++++++++++--------- tkldev-detective | 21 ++++++++++++++++++++- 5 files changed, 61 insertions(+), 16 deletions(-) diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 572a523..728f687 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -22,7 +22,7 @@ """ from dataclasses import dataclass -from typing import Generator, Iterable, cast +from typing import Generator, Iterable, Iterator, cast from os.path import dirname @@ -62,10 +62,18 @@ def has_tag(self, tag: str) -> bool: return tag in self.tags def has_tag_type(self, tag_type: str) -> bool: - """Checks if item contains a variant tag of a given type""" + """Check if item contains a variant tag of a given type""" check = tag_type + ':' return any(tag.startswith(check) for tag in self.tags) + def tags_with_type(self, tag_type: str) -> Iterator[str]: + """Return all tags with a variant tag of a given type""" + check = tag_type + ':' + return filter( + lambda tag: tag.startswith(check), + self.tags + ) + def pretty_print(self) -> None: """Show item value as well as tags""" print(f"{self.value}") diff --git a/libtkldet/locator.py b/libtkldet/locator.py index 2328c94..f2ba3f5 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -24,8 +24,11 @@ from .error import ApplianceNotFoundError +from logging import getLogger + PRODUCTS_DIR = "/turnkey/fab/products" +logger = getLogger(__name__) def is_appliance_path(path: str) -> bool: """ is path, a path to an appliance? """ @@ -37,7 +40,7 @@ def is_appliance_path(path: str) -> bool: def is_appliance_name(name: str) -> bool: """ is name, the name of an existing appliance on tkldev? """ - return "/" not in name and isdir(join(PRODUCTS_DIR, name)) + return name != '.' and "/" not in name and isdir(join(PRODUCTS_DIR, name)) def is_inside_appliance(path: str) -> bool: @@ -68,6 +71,7 @@ def get_appliance_root(path: str) -> str: root = join(PRODUCTS_DIR, appliance_name) if root is None or not isfile(join(root, "Makefile")): + logger.info('lint root is not an appliance') error_message = ( "input does not appear to be an appliance name, path to an appliance" " or path to a file inside of an appliance" @@ -83,10 +87,16 @@ def locator(root: str, ignore_non_appliance: bool) -> Generator[str, None, None] or a specific file only if given a path to a file inside an appliance """ if is_appliance_name(root): + logger.debug('locator(_) # is appliance name') yield from full_appliance_locator(join(PRODUCTS_DIR, root)) elif is_appliance_path(root): + logger.debug('locator(_) # is appliance path') yield from full_appliance_locator(root) - elif is_inside_appliance(root) or ignore_non_appliance: + elif is_inside_appliance(root): + logger.debug('locator(_) # is inside appliance') + yield from full_appliance_locator(get_appliance_root(root)) + elif ignore_non_appliance: + logger.debug('locator(_) # is not an appliance (but ignore_non_appliance set)') yield from everything_locator(root) else: error_message = ( @@ -100,7 +110,7 @@ def everything_locator(root: str) -> Generator[str, None, None]: if isfile(root): yield root else: - yield from iglob(join(root, '**'), recursive=True) + yield from iglob(join(root, '**'), recursive=True, include_hidden=True) def full_appliance_locator(root: str) -> Generator[str, None, None]: """Yield (pretty much) every file in an appliance of potential concern""" diff --git a/libtkldet/mkparser.py b/libtkldet/mkparser.py index cd9f472..38593fc 100644 --- a/libtkldet/mkparser.py +++ b/libtkldet/mkparser.py @@ -161,7 +161,7 @@ class with simpler typing for key in keys: done = True values = self.variables[key] - new_values = [] + new_values: list[str | LazyVar] = [] for value in values: if isinstance(value, str): new_values.append(value) diff --git a/tkldet_modules/filetype.py b/tkldet_modules/filetype.py index 5e16189..869e329 100644 --- a/tkldet_modules/filetype.py +++ b/tkldet_modules/filetype.py @@ -19,6 +19,9 @@ from libtkldet.classifier import FileClassifier, FileItem, register_classifier from os.path import splitext, isfile from typing import ClassVar +from logging import getLogger + +logger = getLogger(__name__) @register_classifier class FiletypeClassifier(FileClassifier): @@ -50,13 +53,18 @@ def classify(self, item: FileItem) -> None: other_parts = shebang.split() shebang = other_parts.pop(0) - other_parts = [part.decode() for part in other_parts] - shebang = shebang.decode().strip() + try: + other_parts = [part.decode() for part in other_parts] + shebang = shebang.decode().strip() + except UnicodeDecodeError: + logger.debug("failed to decode shebang", exc_info=True) + item.add_tags(self, ['not-utf8']) + else: - if shebang.startswith("#!"): - if shebang == '#!/usr/bin/env': - item.add_tags(self, [ - f"shebang:{shebang[2:]} {other_parts[0]}" - ]) - else: - item.add_tags(self, [f"shebang:{shebang[2:]}"]) + if shebang.startswith("#!"): + if shebang == '#!/usr/bin/env': + item.add_tags(self, [ + f"shebang:{shebang[2:]} {other_parts[0]}" + ]) + else: + item.add_tags(self, [f"shebang:{shebang[2:]}"]) diff --git a/tkldev-detective b/tkldev-detective index 044b37f..d8345e7 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -18,6 +18,7 @@ from argparse import ArgumentParser from os.path import relpath, abspath from typing import Generator +import logging import sys from libtkldet import locator, modman, colors @@ -28,6 +29,7 @@ from libtkldet.error import ApplianceNotFoundError import libtkldet.classifier import libtkldet.linter +logger = logging.getLogger('tkldev-detective') def perform_lint( root_path: str, dump_tags: bool, skip_lint: bool, ignore_non_appliance: bool @@ -52,6 +54,9 @@ def perform_lint( for classifier in all_classifiers: classifier.classify(item) if item.has_tag_type('ignore'): + logger.info(f'item "%s" skipped (tagged with %s)', + item.abspath, + ', '.join(map(repr, item.tags_with_type('ignore')))) ignore = True break if ignore: @@ -69,6 +74,8 @@ def perform_lint( if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--color", choices=["always", "never", "auto"], default="auto") + parser.add_argument("--log-level", choices=["debug", "info", "warn", + "error"], default="warn") subparsers = parser.add_subparsers(dest="action") list_parser = subparsers.add_parser("list") @@ -99,6 +106,18 @@ if __name__ == "__main__": args = parser.parse_args() + log_level = logging.WARNING + if args.log_level == "debug": + log_level = logging.DEBUG + elif args.log_level == "info": + log_level = logging.INFO + elif args.log_level == "warn": + log_level = logging.WARNING + elif args.log_level == "error": + log_level = logging.ERROR + + logging.basicConfig(level=log_level) + if args.color == "auto": colors.set_colors_enabled(sys.stdout.isatty()) else: @@ -132,7 +151,7 @@ if __name__ == "__main__": ): print("\n| ".join(report.format().split("\n"))) print() - except libtkldet.error.PlanNotFound as e: + except libtkldet.error.PlanNotFoundError as e: print( colors.RED + "error: " From 845cd7bd0a39098644c3661fd5e7a7d3295a94aa Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Tue, 10 Sep 2024 04:14:52 +0000 Subject: [PATCH 09/14] cleaned up typing a little --- libtkldet/__init__.py | 4 ++-- libtkldet/classifier.py | 22 +++++++++++----------- libtkldet/common_data.py | 4 ++-- libtkldet/file_util.py | 2 +- libtkldet/linter.py | 16 ++++++++-------- libtkldet/locator.py | 14 +++++++------- libtkldet/report.py | 8 ++++---- tkldet_modules/filetype.py | 1 - tkldet_modules/to_ignore.py | 2 +- tkldev-detective | 12 +++++++++--- 10 files changed, 45 insertions(+), 40 deletions(-) diff --git a/libtkldet/__init__.py b/libtkldet/__init__.py index 032c7e2..857e136 100644 --- a/libtkldet/__init__.py +++ b/libtkldet/__init__.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Generator +from typing import Iterator from os.path import relpath, abspath from . import locator, common_data, classifier from .common_data import APPLIANCE_ROOT @@ -37,7 +37,7 @@ def initialize(path: str, ignore_non_appliance: bool) -> None: common_data.initialize_common_data(root) -def yield_appliance_items() -> Generator[classifier.Item, None, None]: +def yield_appliance_items() -> Iterator[classifier.Item]: """Yield everything 'lintable'""" yield from common_data.iter_packages() diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 728f687..198f113 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -22,7 +22,7 @@ """ from dataclasses import dataclass -from typing import Generator, Iterable, Iterator, cast +from typing import Iterator, Iterable, Iterator, cast, ClassVar from os.path import dirname @@ -45,7 +45,7 @@ class Item: exactly how an item got classsified in a certain way """ @property - def tags(self) -> Generator[str, None, None]: + def tags(self) -> Iterator[str]: """Yields all tags, may contain duplicates""" for tags in self._tags.values(): yield from tags @@ -123,12 +123,12 @@ class Classifier: which files. """ - WEIGHT: int = 100 + WEIGHT: ClassVar[int] = 100 """weight is used to order classifiers, as tthey can read tags as well, classifier can leverage information provided (or omitted) by previous classifiers""" - ItemType: type[Item] = Item + ItemType: ClassVar[type[Item]] = Item def do_classify(self, item: Item) -> None: """Perform classification @@ -147,22 +147,22 @@ def classify(self, item: Item) -> None: class FileClassifier(Classifier): """Specialized classifer which operates on "FileItem"s""" - ItemType: type[Item] = FileItem + ItemType: ClassVar[type[Item]] = FileItem class PackageClassifier(Classifier): """Specialized classifier which operates on "PackageItem"s""" - ItemType: type[Item] = PackageItem + ItemType: ClassVar[type[Item]] = PackageItem class ExactPathClassifier(FileClassifier): """Classifies an item which matches some exact path""" - path: str + path: ClassVar[str] "exact path to match" - tags: list[str] + tags: ClassVar[list[str]] "exact tags to add to matched item" def classify(self, item: Item) -> None: @@ -177,13 +177,13 @@ def classify(self, item: Item) -> None: class SubdirClassifier(FileClassifier): """Classifies an item which is inside a given subdirectory""" - path: str + path: ClassVar[str] "the parent directory" - recursive: bool + recursive: ClassVar[bool] "whether to match a child of any depth or only files directly inside the given dir" - tags: list[str] + tags: ClassVar[list[str]] "exact tags to add to matched item" def classify(self, item: Item) -> None: diff --git a/libtkldet/common_data.py b/libtkldet/common_data.py index c938945..597e922 100644 --- a/libtkldet/common_data.py +++ b/libtkldet/common_data.py @@ -18,7 +18,7 @@ import os from os.path import join, isfile -from typing import Generator +from typing import Iterator from .plan_resolve import parse_plan, PlanEntry from .locator import iter_plan from .classifier import PackageItem @@ -57,7 +57,7 @@ def is_common_plan_included(plan_name: str) -> bool: return join("/turnkey/fab/common/plans", plan_name) in _INCLUDED_PLAN_CACHE -def iter_packages() -> Generator[PackageItem, None, None]: +def iter_packages() -> Iterator[PackageItem]: """Iterate over all packages which will be installed""" for entry in _PLAN_RESOLVE_CACHE: yield PackageItem( diff --git a/libtkldet/file_util.py b/libtkldet/file_util.py index 468280e..cdda8d7 100644 --- a/libtkldet/file_util.py +++ b/libtkldet/file_util.py @@ -53,7 +53,7 @@ def position_from_byte_offset(path: str, offset: int) -> tuple[int, int] | None: if i == offset: return line, col - if char == b"\n": + if char == ord(b"\n"): line += 1 col = 0 else: diff --git a/libtkldet/linter.py b/libtkldet/linter.py index 79d4e60..e6c38b4 100644 --- a/libtkldet/linter.py +++ b/libtkldet/linter.py @@ -20,7 +20,7 @@ code here provides interface for modules to provide linting """ -from typing import Generator +from typing import ClassVar, Iterator from .classifier import Item, FileItem from .report import Report @@ -33,14 +33,14 @@ class Linter: `DISABLE_TAGS` """ - ENABLE_TAGS: set[str] + ENABLE_TAGS: ClassVar[set[str]] "tags which this linter should work on (or all if omitted)" - DISABLE_TAGS: set[str] + DISABLE_TAGS: ClassVar[set[str]] "tags which this linter should never work on" - WEIGHT: int = 100 + WEIGHT: ClassVar[int] = 100 - ItemType: type[Item] = Item + ItemType: ClassVar[type[Item]] = Item def should_check(self, item: Item) -> bool: """Actually performs check to see if the linter should run on this item @@ -71,13 +71,13 @@ def should_check(self, item: Item) -> bool: return False return True - def do_check(self, item: Item) -> Generator[Report, None, None] | None: + def do_check(self, item: Item) -> Iterator[Report] | None: """Run lint, if `should_check` returns True, used internally""" if isinstance(item, self.ItemType) and self.should_check(item): return self.check(item) return None - def check(self, item: Item) -> Generator[Report, None, None]: + def check(self, item: Item) -> Iterator[Report]: """Actually run lint""" raise NotImplementedError @@ -87,7 +87,7 @@ class FileLinter(Linter): ItemType: type[Item] = FileItem - def check(self, item: Item) -> Generator[Report, None, None]: + def check(self, item: Item) -> Iterator[Report]: raise NotImplementedError diff --git a/libtkldet/locator.py b/libtkldet/locator.py index f2ba3f5..93f85f3 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -20,7 +20,7 @@ from os.path import join, normpath, basename, isdir, isfile from glob import iglob -from typing import Generator +from typing import Iterator from .error import ApplianceNotFoundError @@ -80,7 +80,7 @@ def get_appliance_root(path: str) -> str: return root -def locator(root: str, ignore_non_appliance: bool) -> Generator[str, None, None]: +def locator(root: str, ignore_non_appliance: bool) -> Iterator[str]: """Yield most files inside appliance Yields almost every file in an appliance of potential concern @@ -105,14 +105,14 @@ def locator(root: str, ignore_non_appliance: bool) -> Generator[str, None, None] ) raise ApplianceNotFoundError(error_message) -def everything_locator(root: str) -> Generator[str, None, None]: +def everything_locator(root: str) -> Iterator[str]: """Yield everything, appliance or not""" if isfile(root): yield root else: yield from iglob(join(root, '**'), recursive=True, include_hidden=True) -def full_appliance_locator(root: str) -> Generator[str, None, None]: +def full_appliance_locator(root: str) -> Iterator[str]: """Yield (pretty much) every file in an appliance of potential concern""" yield from ( join(root, x) for x in ["Makefile", "changelog", "README.rst", "removelist"] @@ -122,16 +122,16 @@ def full_appliance_locator(root: str) -> Generator[str, None, None]: yield from iter_overlay(root) -def iter_conf(root: str) -> Generator[str, None, None]: +def iter_conf(root: str) -> Iterator[str]: """ yield each conf file in the appliance """ yield from iglob(join(root, "conf.d/*")) -def iter_plan(root: str) -> Generator[str, None, None]: +def iter_plan(root: str) -> Iterator[str]: """ yield each plan file in the appliance """ yield from iglob(join(root, "plan/*")) -def iter_overlay(root: str) -> Generator[str, None, None]: +def iter_overlay(root: str) -> Iterator[str]: """ yield each file in the appliance overlay""" yield from iglob(join(root, "overlay/**"), recursive=True) diff --git a/libtkldet/report.py b/libtkldet/report.py index 2220c2a..de92e08 100644 --- a/libtkldet/report.py +++ b/libtkldet/report.py @@ -22,7 +22,7 @@ from dataclasses import dataclass from enum import Enum import enum -from typing import Generator, Iterable +from typing import Iterator, Iterable, ClassVar import textwrap from .classifier import Item, FileItem @@ -247,9 +247,9 @@ class ReportFilter: split, generate or even remove reports """ - WEIGHT: int = 100 + WEIGHT: ClassVar[int] = 100 - def filter(self, report: Report) -> Generator[Report, None, None]: + def filter(self, report: Report) -> Iterator[Report]: """Given a report filter or modify it There doesn't need to be a 1-1 relationship between inputs and outputs @@ -281,7 +281,7 @@ def get_weighted_filters() -> list[ReportFilter]: ) -def filter_all_reports(reports: Iterable[Report]) -> Generator[Report, None, None]: +def filter_all_reports(reports: Iterable[Report]) -> Iterator[Report]: """Filter all reports through all filters in order of weight""" filters = get_weighted_filters() diff --git a/tkldet_modules/filetype.py b/tkldet_modules/filetype.py index 869e329..c525c36 100644 --- a/tkldet_modules/filetype.py +++ b/tkldet_modules/filetype.py @@ -60,7 +60,6 @@ def classify(self, item: FileItem) -> None: logger.debug("failed to decode shebang", exc_info=True) item.add_tags(self, ['not-utf8']) else: - if shebang.startswith("#!"): if shebang == '#!/usr/bin/env': item.add_tags(self, [ diff --git a/tkldet_modules/to_ignore.py b/tkldet_modules/to_ignore.py index cfe24a7..ed4f12f 100644 --- a/tkldet_modules/to_ignore.py +++ b/tkldet_modules/to_ignore.py @@ -20,7 +20,7 @@ import os.path from typing import ClassVar -def is_or_has_ancestor_dir(path, directory): +def is_or_has_ancestor_dir(path: str, directory: str) -> bool: """Checks if path has an ancestor directory with a given name""" while path not in ('/', ''): path, path_segment = os.path.split(path) diff --git a/tkldev-detective b/tkldev-detective index d8345e7..4eb7d7e 100755 --- a/tkldev-detective +++ b/tkldev-detective @@ -54,7 +54,7 @@ def perform_lint( for classifier in all_classifiers: classifier.classify(item) if item.has_tag_type('ignore'): - logger.info(f'item "%s" skipped (tagged with %s)', + logger.info('item "%s" skipped (tagged with %s)', item.abspath, ', '.join(map(repr, item.tags_with_type('ignore')))) ignore = True @@ -128,8 +128,14 @@ if __name__ == "__main__": all_classifiers = libtkldet.classifier.get_weighted_classifiers() all_linters = libtkldet.linter.get_weighted_linters() - linters_by_name = {l.__class__.__name__: l for l in all_linters} - classifiers_by_name = {c.__class__.__name__: c for c in all_classifiers} + linters_by_name = { + linter.__class__.__name__: linter + for linter in all_linters + } + classifiers_by_name = { + classifier.__class__.__name__: classifier + for classifier in all_classifiers + } if args.action == "list": if args.list_item in ("linters", "all"): From 9aa460cb935c04d1574ea9b111f4ef91bd7b2940 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Tue, 10 Sep 2024 05:00:37 +0000 Subject: [PATCH 10/14] reformat using the same ruff settings we lint with --- libtkldet/apt_file.py | 16 +- libtkldet/classifier.py | 12 +- libtkldet/common_data.py | 14 +- libtkldet/file_util.py | 9 +- libtkldet/fuzzy.py | 5 +- libtkldet/hint_extract.py | 16 +- libtkldet/linter.py | 1 + libtkldet/locator.py | 38 +- libtkldet/mkparser.py | 54 +- libtkldet/modman.py | 22 +- libtkldet/plan_resolve.py | 22 +- libtkldet/report.py | 10 +- setup.py | 2 +- tkldet_modules/appliance_confd.py | 4 +- tkldet_modules/appliance_files.py | 1 + tkldet_modules/filetype.py | 11 +- tkldet_modules/missing_module_filter.py | 12 +- tkldet_modules/pylint.py | 14 +- tkldet_modules/ruff.py | 2064 ++++++++++------------- tkldet_modules/shellcheck.py | 19 +- tkldet_modules/to_ignore.py | 13 +- tkldet_modules/yaml_check.py | 2 + 22 files changed, 1111 insertions(+), 1250 deletions(-) diff --git a/libtkldet/apt_file.py b/libtkldet/apt_file.py index 4a203d1..dc1b0a0 100644 --- a/libtkldet/apt_file.py +++ b/libtkldet/apt_file.py @@ -23,19 +23,23 @@ import subprocess + def is_in_path(name: str) -> bool: """Check if a given name is in the path""" - in_path = subprocess.run( - ["/usr/bin/which", name], - capture_output=True - ) + in_path = subprocess.run(["/usr/bin/which", name], capture_output=True) return in_path.returncode == 0 + def is_installed(package_name: str) -> bool: """Check if a given package is installed on the HOST system (tkldev)""" pkg_installed = subprocess.run( - ["/usr/bin/dpkg-query", "-W", "--showformat='${Status}'", package_name], - capture_output=True + [ + "/usr/bin/dpkg-query", + "-W", + "--showformat='${Status}'", + package_name, + ], + capture_output=True, ) return pkg_installed.returncode != 0 diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 198f113..6d8970c 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -63,16 +63,13 @@ def has_tag(self, tag: str) -> bool: def has_tag_type(self, tag_type: str) -> bool: """Check if item contains a variant tag of a given type""" - check = tag_type + ':' + check = tag_type + ":" return any(tag.startswith(check) for tag in self.tags) def tags_with_type(self, tag_type: str) -> Iterator[str]: """Return all tags with a variant tag of a given type""" - check = tag_type + ':' - return filter( - lambda tag: tag.startswith(check), - self.tags - ) + check = tag_type + ":" + return filter(lambda tag: tag.startswith(check), self.tags) def pretty_print(self) -> None: """Show item value as well as tags""" @@ -214,5 +211,6 @@ def register_classifier(classifier: type[Classifier]) -> type[Classifier]: def get_weighted_classifiers() -> list[Classifier]: """Return instances of registered classifiers in order of weight""" return sorted( - (c() for c in _CLASSIFIERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) + (c() for c in _CLASSIFIERS), + key=lambda x: (x.WEIGHT, x.__class__.__name__), ) diff --git a/libtkldet/common_data.py b/libtkldet/common_data.py index 597e922..135d2ac 100644 --- a/libtkldet/common_data.py +++ b/libtkldet/common_data.py @@ -29,6 +29,7 @@ _INCLUDED_PLAN_CACHE: set[str] = set() _FAB_DATA: CommonFabBuildData + def initialize_common_data(appliance_root: str) -> None: """Parse plan & makefile and initialize data which utilizes it""" global APPLIANCE_ROOT, _FAB_DATA @@ -48,10 +49,10 @@ def initialize_common_data(appliance_root: str) -> None: def is_package_to_be_installed(package_name: str) -> bool: """Check if an apt package will be installed via plan""" return any( - entry.package_name == package_name - for entry in _PLAN_RESOLVE_CACHE + entry.package_name == package_name for entry in _PLAN_RESOLVE_CACHE ) + def is_common_plan_included(plan_name: str) -> bool: """Check if a common plan (by file name) is included in appliance build""" return join("/turnkey/fab/common/plans", plan_name) in _INCLUDED_PLAN_CACHE @@ -61,7 +62,9 @@ def iter_packages() -> Iterator[PackageItem]: """Iterate over all packages which will be installed""" for entry in _PLAN_RESOLVE_CACHE: yield PackageItem( - value=entry.package_name, _tags={}, plan_stack=entry.include_stack[:] + value=entry.package_name, + _tags={}, + plan_stack=entry.include_stack[:], ) @@ -95,7 +98,10 @@ def get_path_in_common_overlay(path: str) -> str | None: path = path.lstrip("/") for common in _FAB_DATA.overlays: common_path = join( - os.getenv("FAB_PATH", "/turnkey/fab"), "common/overlays", common, path + os.getenv("FAB_PATH", "/turnkey/fab"), + "common/overlays", + common, + path, ) if isfile(common_path): return common_path diff --git a/libtkldet/file_util.py b/libtkldet/file_util.py index cdda8d7..6bf0177 100644 --- a/libtkldet/file_util.py +++ b/libtkldet/file_util.py @@ -17,7 +17,10 @@ """Utilities relating to classification/linting files""" -def position_from_char_offset(path: str, offset: int) -> tuple[int, int] | None: + +def position_from_char_offset( + path: str, offset: int +) -> tuple[int, int] | None: """Get column/line from offset into file Given an offset into a file, returns the line and column numbers @@ -39,7 +42,9 @@ def position_from_char_offset(path: str, offset: int) -> tuple[int, int] | None: return None -def position_from_byte_offset(path: str, offset: int) -> tuple[int, int] | None: +def position_from_byte_offset( + path: str, offset: int +) -> tuple[int, int] | None: """Get column/line from offset into file in binary mode Given an offset into a file (in binary mode), returns the line and column diff --git a/libtkldet/fuzzy.py b/libtkldet/fuzzy.py index 47560d0..e83846d 100644 --- a/libtkldet/fuzzy.py +++ b/libtkldet/fuzzy.py @@ -36,9 +36,8 @@ def fuzzy_diff(x: str, y: str) -> int: def fuzzy_suggest( - check: str, - options: list[str], - max_diff: int=MAX_DIFF) -> str | None: + check: str, options: list[str], max_diff: int = MAX_DIFF +) -> str | None: """Suggest a string from given options Given a 'check' value, and a list of valid options, find the option diff --git a/libtkldet/hint_extract.py b/libtkldet/hint_extract.py index 5bb3dd3..d530930 100644 --- a/libtkldet/hint_extract.py +++ b/libtkldet/hint_extract.py @@ -16,6 +16,7 @@ # tkldev-detective. If not, see . """Utilities for annotating parts of files""" + from . import colors as co H_PAD = 6 # padding (for hint lines to account for line numbers) @@ -24,7 +25,7 @@ def extract_line(path: str, row: int) -> str: """Extract a single line from a file""" with open(path, "r") as fob: - for (i, line) in enumerate(fob): + for i, line in enumerate(fob): if i == row: return ( str(i + 1).rjust(4) @@ -38,10 +39,15 @@ def extract_line(path: str, row: int) -> str: def extract_line_col(path: str, row: int, col: int) -> list[str]: """Annotate line with specific column""" - return [extract_line(path, row), co.RED + "^".rjust(col + H_PAD + 1) + co.RESET] + return [ + extract_line(path, row), + co.RED + "^".rjust(col + H_PAD + 1) + co.RESET, + ] -def extract_line_cols(path: str, row: int, col_span: tuple[int, int]) -> list[str]: +def extract_line_cols( + path: str, row: int, col_span: tuple[int, int] +) -> list[str]: """Annotate line with span of columns""" min_col, max_col = col_span return [ @@ -59,7 +65,7 @@ def extract_lines(path: str, row_span: tuple[int, int]) -> list[str]: min_row, max_row = row_span out = [] with open(path, "r") as fob: - for (i, line) in enumerate(fob): + for i, line in enumerate(fob): if i in (min_row, max_row): out.append( co.RED @@ -91,7 +97,7 @@ def extract_lines_cols( min_col, max_col = col_span out = [] with open(path, "r") as fob: - for (i, line) in enumerate(fob): + for i, line in enumerate(fob): if min_row <= i <= max_row: out.append( str(i + 1).rjust(4) diff --git a/libtkldet/linter.py b/libtkldet/linter.py index e6c38b4..05d1922 100644 --- a/libtkldet/linter.py +++ b/libtkldet/linter.py @@ -20,6 +20,7 @@ code here provides interface for modules to provide linting """ + from typing import ClassVar, Iterator from .classifier import Item, FileItem diff --git a/libtkldet/locator.py b/libtkldet/locator.py index 93f85f3..bbd391c 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -30,8 +30,9 @@ logger = getLogger(__name__) + def is_appliance_path(path: str) -> bool: - """ is path, a path to an appliance? """ + """is path, a path to an appliance?""" path = normpath(path) if path == join(PRODUCTS_DIR, basename(path)): return isfile(join(path, "Makefile")) @@ -39,17 +40,19 @@ def is_appliance_path(path: str) -> bool: def is_appliance_name(name: str) -> bool: - """ is name, the name of an existing appliance on tkldev? """ - return name != '.' and "/" not in name and isdir(join(PRODUCTS_DIR, name)) + """is name, the name of an existing appliance on tkldev?""" + return name != "." and "/" not in name and isdir(join(PRODUCTS_DIR, name)) def is_inside_appliance(path: str) -> bool: - """ is path, a path to a file inside an appliance """ + """is path, a path to a file inside an appliance""" path = normpath(path) if not path.startswith(PRODUCTS_DIR + "/"): return False path = path[len(PRODUCTS_DIR) + 1 :] - return bool(path) # if path is non-zero length, it must be a path into an appliance + return bool( + path + ) # if path is non-zero length, it must be a path into an appliance def get_appliance_root(path: str) -> str: @@ -71,7 +74,7 @@ def get_appliance_root(path: str) -> str: root = join(PRODUCTS_DIR, appliance_name) if root is None or not isfile(join(root, "Makefile")): - logger.info('lint root is not an appliance') + logger.info("lint root is not an appliance") error_message = ( "input does not appear to be an appliance name, path to an appliance" " or path to a file inside of an appliance" @@ -87,16 +90,18 @@ def locator(root: str, ignore_non_appliance: bool) -> Iterator[str]: or a specific file only if given a path to a file inside an appliance """ if is_appliance_name(root): - logger.debug('locator(_) # is appliance name') + logger.debug("locator(_) # is appliance name") yield from full_appliance_locator(join(PRODUCTS_DIR, root)) elif is_appliance_path(root): - logger.debug('locator(_) # is appliance path') + logger.debug("locator(_) # is appliance path") yield from full_appliance_locator(root) elif is_inside_appliance(root): - logger.debug('locator(_) # is inside appliance') + logger.debug("locator(_) # is inside appliance") yield from full_appliance_locator(get_appliance_root(root)) elif ignore_non_appliance: - logger.debug('locator(_) # is not an appliance (but ignore_non_appliance set)') + logger.debug( + "locator(_) # is not an appliance (but ignore_non_appliance set)" + ) yield from everything_locator(root) else: error_message = ( @@ -105,17 +110,20 @@ def locator(root: str, ignore_non_appliance: bool) -> Iterator[str]: ) raise ApplianceNotFoundError(error_message) + def everything_locator(root: str) -> Iterator[str]: """Yield everything, appliance or not""" if isfile(root): yield root else: - yield from iglob(join(root, '**'), recursive=True, include_hidden=True) + yield from iglob(join(root, "**"), recursive=True, include_hidden=True) + def full_appliance_locator(root: str) -> Iterator[str]: """Yield (pretty much) every file in an appliance of potential concern""" yield from ( - join(root, x) for x in ["Makefile", "changelog", "README.rst", "removelist"] + join(root, x) + for x in ["Makefile", "changelog", "README.rst", "removelist"] ) yield from iter_conf(root) yield from iter_plan(root) @@ -123,15 +131,15 @@ def full_appliance_locator(root: str) -> Iterator[str]: def iter_conf(root: str) -> Iterator[str]: - """ yield each conf file in the appliance """ + """yield each conf file in the appliance""" yield from iglob(join(root, "conf.d/*")) def iter_plan(root: str) -> Iterator[str]: - """ yield each plan file in the appliance """ + """yield each plan file in the appliance""" yield from iglob(join(root, "plan/*")) def iter_overlay(root: str) -> Iterator[str]: - """ yield each file in the appliance overlay""" + """yield each file in the appliance overlay""" yield from iglob(join(root, "overlay/**"), recursive=True) diff --git a/libtkldet/mkparser.py b/libtkldet/mkparser.py index 38593fc..033e998 100644 --- a/libtkldet/mkparser.py +++ b/libtkldet/mkparser.py @@ -23,6 +23,7 @@ probably produces a lot of other erroneous output (if used for general makefile parsing) """ + import typing from dataclasses import dataclass import os @@ -30,30 +31,32 @@ ASSIGNMENT_OPERATORS = ["?=", ":=", "+=", "="] CHECKS = ["ifeq", "ifneq", "ifdef", "ifndef"] MAKEFILE_ENV = { - "FAB_PATH": os.environ.get("FAB_PATH", '/turnkey/fab'), - "FAB_SHARE_PATH": "/usr/share/fab" + "FAB_PATH": os.environ.get("FAB_PATH", "/turnkey/fab"), + "FAB_SHARE_PATH": "/usr/share/fab", } + def split_value(raw: str) -> list[str]: """Split value by space""" - chunks = [''] + chunks = [""] bracket_depth = 0 for c in raw: - if c == ')': + if c == ")": bracket_depth -= 1 - chunks[-1] += ')' - elif c == '(': + chunks[-1] += ")" + elif c == "(": bracket_depth += 1 - chunks[-1] += '(' + chunks[-1] += "(" elif bracket_depth > 0: chunks[-1] += c elif c.isspace() and chunks[-1]: - chunks.append('') + chunks.append("") else: chunks[-1] += c return chunks + def parse_assignment(line: str) -> tuple[str, str, str] | None: """Parse assignment line @@ -78,14 +81,17 @@ class CommonFabBuildData: removelists: list[str] removelists_final: list[str] + @dataclass class LazyVar: """A value referencing a variable we haven't resolved yet""" name: str + ValueList = list[str | LazyVar] + @dataclass class MutMakefileData: """Hold variables set by makefiles""" @@ -105,8 +111,9 @@ def resolve_var(self, value: str) -> ValueList: if var_name in MAKEFILE_ENV: out_var.append(MAKEFILE_ENV[var_name]) else: - out_var.extend(self.variables.get(var_name, - [LazyVar(var_name)])) + out_var.extend( + self.variables.get(var_name, [LazyVar(var_name)]) + ) else: out_var.extend(split_value(value)) return out_var @@ -135,7 +142,7 @@ def assign_var(self, name: str, operator: str, values: str) -> None: error_message = f"unknown operator {operator!r}" raise ValueError(error_message) - def finish(self) -> 'MakefileData': + def finish(self) -> "MakefileData": """Return concrete class Resolve unresolved variables and return a concrete version of this @@ -166,21 +173,23 @@ class with simpler typing if isinstance(value, str): new_values.append(value) elif isinstance(value, LazyVar): - new_v = self.variables.get(value.name, - [f'$({value.name})']) - if isinstance(new_v , LazyVar): + new_v = self.variables.get( + value.name, [f"$({value.name})"] + ) + if isinstance(new_v, LazyVar): done = False new_values.extend(new_v) self.variables[key] = new_values - new_variables = {key: list(values) for key, values in - self.variables.items()} + new_variables = { + key: list(values) for key, values in self.variables.items() + } new_included = list(self.included) return MakefileData( - typing.cast(dict[str, list[str]], new_variables), - new_included + typing.cast(dict[str, list[str]], new_variables), new_included ) + @dataclass class MakefileData(MutMakefileData): """Holds variables set by makefiles""" @@ -206,17 +215,14 @@ def to_fab_data(self) -> CommonFabBuildData: def to_dict(self) -> dict: """Return contents as a dictionary""" - return { - 'variables': self.variables, - 'included': self.included - } + return {"variables": self.variables, "included": self.included} # ignore warnings about complexity, this is just a complex job and breaking it # down further would only obfuscate what it's doing. -def parse_makefile( # noqa: C901, PLR0912 +def parse_makefile( # noqa: C901, PLR0912 path: str, makefile_data: MakefileData | None = None - ) -> MakefileData: +) -> MakefileData: """Get all variables in makefile including included makefiles Attempts to naively get all variables defined in makefile tree. This diff --git a/libtkldet/modman.py b/libtkldet/modman.py index 947995f..4c080df 100644 --- a/libtkldet/modman.py +++ b/libtkldet/modman.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -""" handles loading / managing tkldev-detective-modules """ +"""handles loading / managing tkldev-detective-modules""" import importlib.machinery import importlib.util @@ -27,17 +27,14 @@ from .error import TKLDevDetectiveError # priortise local tkldet_modules path, fallback to OS path -MOD_PATH = [ dirname(dirname(abspath(__file__))), '/usr/share/tkldev-detective'] +MOD_PATH = [dirname(dirname(abspath(__file__))), "/usr/share/tkldev-detective"] def _load_all_modules_from_dir(root: str) -> None: print( - co.BRIGHT_BLACK - + co.BOLD - + "load all modules from", - root + - co.RESET, - file=sys.stderr + co.BRIGHT_BLACK + co.BOLD + "load all modules from", + root + co.RESET, + file=sys.stderr, ) root = abspath(root) for filename in listdir(root): @@ -51,12 +48,9 @@ def _load_all_modules_from_dir(root: str) -> None: spec.loader.exec_module(module) print( - co.BRIGHT_BLACK - + co.BOLD - + "loaded", - spec.name - + co.RESET, - file=sys.stderr + co.BRIGHT_BLACK + co.BOLD + "loaded", + spec.name + co.RESET, + file=sys.stderr, ) diff --git a/libtkldet/plan_resolve.py b/libtkldet/plan_resolve.py index e533e05..202c8c1 100644 --- a/libtkldet/plan_resolve.py +++ b/libtkldet/plan_resolve.py @@ -20,7 +20,9 @@ from os.path import join, isfile from dataclasses import dataclass from .error import ( - PlanNotFoundError, UnknownPlanDirectiveError, InvalidPlanError + PlanNotFoundError, + UnknownPlanDirectiveError, + InvalidPlanError, ) static_vars = {"KERNEL": "", "DEBIAN": "", "AMD64": ""} @@ -96,7 +98,8 @@ def _remove_multiline_comments(raw: str) -> str: # - PLW2901 (iteration variable overwritten), variable's meaning does not # change with overwrite. -def _parse_plan( # noqa: C901, PLR0912 + +def _parse_plan( # noqa: C901, PLR0912 path: str, include_paths: list[str], plan_stack: list[str] | None = None ) -> list[PlanEntry]: """Parse a plan @@ -128,21 +131,22 @@ def _parse_plan( # noqa: C901, PLR0912 for line in data.splitlines(): # remove single line comment if "//" in line: - line = line.split("//", 1)[0] # noqa: PLW2901 + line = line.split("//", 1)[0] # noqa: PLW2901 # honestly would've thought hashes in cpp code wouldn't work like this, # but apparently it does if not line.startswith("#") and "#" in line: - line = line.split("#", 1)[0] # noqa: PLW2901 + line = line.split("#", 1)[0] # noqa: PLW2901 - line = line.strip() # noqa: PLW2901 + line = line.strip() # noqa: PLW2901 if not line: continue if line.startswith("#endif"): if not cond_stack: - error_message = \ - f"unbalanced #if* and #endif directives in plan {path}" + error_message = ( + f"unbalanced #if* and #endif directives in plan {path}" + ) raise InvalidPlanError(error_message) cond_stack.pop() continue @@ -162,7 +166,9 @@ def _parse_plan( # noqa: C901, PLR0912 elif line.startswith("#include"): packages.extend( _include_plan( - line[8:].strip().strip("<>"), include_paths, plan_stack[:] + line[8:].strip().strip("<>"), + include_paths, + plan_stack[:], ) ) elif line.startswith("#"): diff --git a/libtkldet/report.py b/libtkldet/report.py index de92e08..2cb0705 100644 --- a/libtkldet/report.py +++ b/libtkldet/report.py @@ -19,6 +19,7 @@ these are issues, warnings or notes about "Item"s produced by "Linter"s """ + from dataclasses import dataclass from enum import Enum import enum @@ -203,7 +204,11 @@ def format(self, suggested_fix: bool = True) -> str: if self.line: out += f"@{self.item.relpath} +{self.line}\n" out += ( - "\n".join(format_extract(self.item.abspath, self.line, self.column)) + "\n".join( + format_extract( + self.item.abspath, self.line, self.column + ) + ) + "\n" ) else: @@ -276,8 +281,7 @@ def register_filter(filt: type[ReportFilter]) -> type[ReportFilter]: def get_weighted_filters() -> list[ReportFilter]: """Return instances of registered filters in order of weight""" return sorted( - (x() for x in _FILTERS), - key=lambda x: (x.WEIGHT, x.__class__.__name__) + (x() for x in _FILTERS), key=lambda x: (x.WEIGHT, x.__class__.__name__) ) diff --git a/setup.py b/setup.py index 98f0be6..1b40c12 100755 --- a/setup.py +++ b/setup.py @@ -9,5 +9,5 @@ author_email="stefan@turnkeylinux.org", url="https://github.com/turnkeylinux/tkldev-detective", packages=["libtkldet"], - scripts=["tkldev-detective"] + scripts=["tkldev-detective"], ) diff --git a/tkldet_modules/appliance_confd.py b/tkldet_modules/appliance_confd.py index 0161b36..cccd47b 100644 --- a/tkldet_modules/appliance_confd.py +++ b/tkldet_modules/appliance_confd.py @@ -33,7 +33,9 @@ class ApplianceConfDLinter(FileLinter): def check(self, item: FileItem) -> Generator[Report, None, None]: mode = os.lstat(item.abspath).st_mode if not ( - (mode & stat.S_IXUSR) or (mode & stat.S_IXGRP) or (mode & stat.S_IXOTH) + (mode & stat.S_IXUSR) + or (mode & stat.S_IXGRP) + or (mode & stat.S_IXOTH) ): yield FileReport( item=item, diff --git a/tkldet_modules/appliance_files.py b/tkldet_modules/appliance_files.py index 577a250..72f418a 100644 --- a/tkldet_modules/appliance_files.py +++ b/tkldet_modules/appliance_files.py @@ -23,6 +23,7 @@ ) from typing import ClassVar + @register_classifier class ApplianceMakefileClassifier(ExactPathClassifier): """Classifies appliance Makefile""" diff --git a/tkldet_modules/filetype.py b/tkldet_modules/filetype.py index c525c36..6c9ef8d 100644 --- a/tkldet_modules/filetype.py +++ b/tkldet_modules/filetype.py @@ -23,6 +23,7 @@ logger = getLogger(__name__) + @register_classifier class FiletypeClassifier(FileClassifier): """Classify files by extension""" @@ -58,12 +59,12 @@ def classify(self, item: FileItem) -> None: shebang = shebang.decode().strip() except UnicodeDecodeError: logger.debug("failed to decode shebang", exc_info=True) - item.add_tags(self, ['not-utf8']) + item.add_tags(self, ["not-utf8"]) else: if shebang.startswith("#!"): - if shebang == '#!/usr/bin/env': - item.add_tags(self, [ - f"shebang:{shebang[2:]} {other_parts[0]}" - ]) + if shebang == "#!/usr/bin/env": + item.add_tags( + self, [f"shebang:{shebang[2:]} {other_parts[0]}"] + ) else: item.add_tags(self, [f"shebang:{shebang[2:]}"]) diff --git a/tkldet_modules/missing_module_filter.py b/tkldet_modules/missing_module_filter.py index f6b62e2..e6cf3f1 100644 --- a/tkldet_modules/missing_module_filter.py +++ b/tkldet_modules/missing_module_filter.py @@ -20,7 +20,10 @@ from libtkldet.report import Report, ReportLevel, register_filter, ReportFilter from libtkldet.linter import FileItem from libtkldet.apt_file import find_python_package_from_import -from libtkldet.common_data import is_package_to_be_installed, get_path_in_common_overlay +from libtkldet.common_data import ( + is_package_to_be_installed, + get_path_in_common_overlay, +) from os.path import dirname import re @@ -55,9 +58,7 @@ def filter_packaged(report: Report, module_name: str) -> Report | None: if not package_installed: if len(packages) > 1: packages_str = ", ".join('"' + pkg + '"' for pkg in packages) - modified_message += ( - f" (perhaps you meant to add one of {packages_str} to the plan?)" - ) + modified_message += f" (perhaps you meant to add one of {packages_str} to the plan?)" else: modified_message += ( f' (perhaps you meant to add "{packages[0]}" to the plan?)' @@ -82,7 +83,8 @@ def filter(self, report: Report) -> Generator[Report, None, None]: if ( isinstance(report.item, FileItem) - and dirname(report.item.relpath) == "overlay/usr/lib/inithooks/bin" + and dirname(report.item.relpath) + == "overlay/usr/lib/inithooks/bin" ): temp_module_name = module_name if "." in temp_module_name: diff --git a/tkldet_modules/pylint.py b/tkldet_modules/pylint.py index bdedd89..9420de5 100644 --- a/tkldet_modules/pylint.py +++ b/tkldet_modules/pylint.py @@ -42,14 +42,22 @@ class PyLinter(FileLinter): def check(self, item: FileItem) -> Generator[Report, None, None]: for report in json.loads( subprocess.run( - ["/usr/bin/pylint", item.abspath, "-f", "json", "--rcfile", rcfile], + [ + "/usr/bin/pylint", + item.abspath, + "-f", + "json", + "--rcfile", + rcfile, + ], capture_output=True, text=True, ).stdout ): - if report["obj"]: - location_metadata = f'{report["obj"]} in module {report["module"]}' + location_metadata = ( + f'{report["obj"]} in module {report["module"]}' + ) else: location_metadata = f'in base of module {report["module"]}' diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 26c1937..50f377a 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -23,1196 +23,989 @@ from libtkldet.apt_file import is_in_path RUFF_LINTS = dict( - pyflakes = dict( - F401 = 'WARN', # unused import - F402 = 'WARN', # import shadowed by loop variable - F403 = 'WARN', # import * used - F404 = 'ERROR', # late __future__ import - F405 = 'WARN', # possibly undefined (unsure due to import *) - F406 = 'ERROR', # import * used outside module level - F407 = 'ERROR', # unknown __future__ import - - F501 = 'ERROR', # invalid % formatting - F502 = 'ERROR', # % expected mapping, got sequence - F503 = 'ERROR', # % expected sequence, got mapping - F504 = 'ERROR', # % has unused name arguments - F505 = 'ERROR', # % has missing arguments - F506 = 'ERROR', # % mixed positional and named - F507 = 'ERROR', # % positional count mismatch - F508 = 'ERROR', # % * specifier requires sequence - F509 = 'ERROR', # % unsupported format char - - F521 = 'ERROR', # .format invalid format string - F522 = 'ERROR', # .format unused name arguments - F523 = 'ERROR', # .format unused positional arguments - F524 = 'ERROR', # .format missing arguments - F525 = 'ERROR', # .format mix automatic and manual numbering - - F541 = 'REFACTOR', # fstring no placeholders - - F601 = 'ERROR', # dict key literal repeated - F602 = 'WARN', # dict key variable repeated - - F621 = 'ERROR', # too many expressions in star unpacking - F622 = 'ERROR', # two starred expressions in assignment - - F631 = 'WARN', # assert is non-empty tuple - F632 = 'ERROR', # == checking 2 constant literals - F633 = 'ERROR', # >> used with print - F634 = 'WARN', # if tuple check (always true) - - F701 = 'ERROR', # break outside loop - F702 = 'ERROR', # continue outside loop - - F704 = 'ERROR', # yield outside function - - F706 = 'ERROR', # return outside function - F707 = 'ERROR', # default except not last - - F722 = 'ERROR', # invalid type annotation - - F811 = 'WARN', # redefined of unused variable - - F821 = 'ERROR', # undefined variable - F822 = 'ERROR', # undefined name in __all__ - F823 = 'ERROR', # local variable undefined - - F841 = 'WARN', # unused variable - F842 = 'WARN', # annotated but never used - - F901 = 'WARN', # incorrect raise NotImplementedError + pyflakes=dict( + F401="WARN", # unused import + F402="WARN", # import shadowed by loop variable + F403="WARN", # import * used + F404="ERROR", # late __future__ import + F405="WARN", # possibly undefined (unsure due to import *) + F406="ERROR", # import * used outside module level + F407="ERROR", # unknown __future__ import + F501="ERROR", # invalid % formatting + F502="ERROR", # % expected mapping, got sequence + F503="ERROR", # % expected sequence, got mapping + F504="ERROR", # % has unused name arguments + F505="ERROR", # % has missing arguments + F506="ERROR", # % mixed positional and named + F507="ERROR", # % positional count mismatch + F508="ERROR", # % * specifier requires sequence + F509="ERROR", # % unsupported format char + F521="ERROR", # .format invalid format string + F522="ERROR", # .format unused name arguments + F523="ERROR", # .format unused positional arguments + F524="ERROR", # .format missing arguments + F525="ERROR", # .format mix automatic and manual numbering + F541="REFACTOR", # fstring no placeholders + F601="ERROR", # dict key literal repeated + F602="WARN", # dict key variable repeated + F621="ERROR", # too many expressions in star unpacking + F622="ERROR", # two starred expressions in assignment + F631="WARN", # assert is non-empty tuple + F632="ERROR", # == checking 2 constant literals + F633="ERROR", # >> used with print + F634="WARN", # if tuple check (always true) + F701="ERROR", # break outside loop + F702="ERROR", # continue outside loop + F704="ERROR", # yield outside function + F706="ERROR", # return outside function + F707="ERROR", # default except not last + F722="ERROR", # invalid type annotation + F811="WARN", # redefined of unused variable + F821="ERROR", # undefined variable + F822="ERROR", # undefined name in __all__ + F823="ERROR", # local variable undefined + F841="WARN", # unused variable + F842="WARN", # annotated but never used + F901="WARN", # incorrect raise NotImplementedError ), - pycodestyle = dict( - E101 = 'ERROR', # mixed spaces and tabs - - E111 = 'ERROR', # bad indent - E112 = 'ERROR', # expected indented block - E113 = 'ERROR', # unexpected indentation - E114 = 'ERROR', # bad indent (with comment) - E115 = 'ERROR', # expected indent block (with comment) - E116 = 'ERROR', # unexpected indentation (with comment) - E117 = 'ERROR', # over indented (with comment) - - E201 = 'CONVENTION', # whitespace after open bracket - E202 = 'CONVENTION', # whitespace before close bracket - E203 = 'CONVENTION', # whitespace before punctuation - E204 = 'CONVENTION', # whitespace after decorator - - E211 = 'CONVENTION', # whitespace before parameters - - E221 = 'CONVENTION', # multiple whitespace before operator - E222 = 'CONVENTION', # multiple whitespace after operator - E223 = 'CONVENTION', # tab before operator - E224 = 'CONVENTION', # tab after operator - E225 = 'CONVENTION', # missing whitespace around operator - E226 = 'CONVENTION', # missing whitespace around arithmetic operator - E227 = 'CONVENTION', # missing whitespace around bitwise operator - E228 = 'CONVENTION', # missing whitespace around modulo operator - - E231 = 'CONVENTION', # missing whitespace - - E241 = 'CONVENTION', # multiple spaces after comma - E242 = 'CONVENTION', # tab after comma - - E251 = 'CONVENTION', # unexpected spaces around keyword / param equals - E252 = 'CONVENTION', # missing whitespace around param equals - - E261 = None, # too few spaces before comment - E262 = 'CONVENTION', # inline comment missing space - - E265 = 'CONVENTION', # block comment missing space - E266 = 'CONVENTION', # too many # before block comment - - E271 = 'CONVENTION', # too many spaces after keyword - E272 = 'CONVENTION', # too many spaces before keyword - E273 = 'CONVENTION', # tab after keyword - E274 = 'CONVENTION', # tab before keyword - E275 = 'CONVENTION', # missing whitespace after keyword - - E301 = 'CONVENTION', # wrong number blank line between methods - E302 = 'CONVENTION', # wrong number blank lines top of module - E303 = 'CONVENTION', # too many blank lines - E304 = 'CONVENTION', # blank lines after decorator - E305 = 'CONVENTION', # wrong number blank lines after class/func - E306 = 'CONVENTION', # missing blank line before nested definition - - E401 = 'CONVENTION', # multiple imports on one line - E402 = 'CONVENTION', # module level import not at top of cell - - E501 = 'CONVENTION', # line too long - E502 = 'CONVENTION', # redundant backslash - - E701 = 'CONVENTION', # multiple statements on one line (colon) - E702 = 'CONVENTION', # multiple statements on one line (semicoloon) - E703 = 'CONVENTION', # unnecessary semicolon - - E711 = 'CONVENTION', # `== None` used instead of `is None` - E712 = 'CONVENTION', # `==` used with `True` / `False` - E713 = 'CONVENTION', # `not (? in ?)` used instead of `? not in ?` - E714 = 'CONVENTION', # `not (? is ?)` used instead of `? is not ?` - - E721 = 'CONVENTION', # `type(?) == ?` used instead of `isinstance(?)` - E722 = 'CONVENTION', # bare except used - - E731 = 'CONVENTION', # assigned lambda instead of def - - E741 = 'CONVENTION', # ambigious variable name - E742 = 'CONVENTION', # ambigious class name - E743 = 'CONVENTION', # ambigious function name - - E902 = 'ERROR', # io error - E999 = 'ERROR', # syntax error - - W191 = 'CONVENTION', # uses tabs for indentation - - W291 = 'CONVENTION', # trailing whitespace - W292 = 'CONVENTION', # missing newline at EOF - W293 = 'CONVENTION', # blank line with whitespace - - W391 = 'CONVENTION', # too many newlines at EOF - - W505 = 'CONVENTION', # doc line too long - - W605 = 'ERROR', # invalid escape sequence + pycodestyle=dict( + E101="ERROR", # mixed spaces and tabs + E111="ERROR", # bad indent + E112="ERROR", # expected indented block + E113="ERROR", # unexpected indentation + E114="ERROR", # bad indent (with comment) + E115="ERROR", # expected indent block (with comment) + E116="ERROR", # unexpected indentation (with comment) + E117="ERROR", # over indented (with comment) + E201="CONVENTION", # whitespace after open bracket + E202="CONVENTION", # whitespace before close bracket + E203="CONVENTION", # whitespace before punctuation + E204="CONVENTION", # whitespace after decorator + E211="CONVENTION", # whitespace before parameters + E221="CONVENTION", # multiple whitespace before operator + E222="CONVENTION", # multiple whitespace after operator + E223="CONVENTION", # tab before operator + E224="CONVENTION", # tab after operator + E225="CONVENTION", # missing whitespace around operator + E226="CONVENTION", # missing whitespace around arithmetic operator + E227="CONVENTION", # missing whitespace around bitwise operator + E228="CONVENTION", # missing whitespace around modulo operator + E231="CONVENTION", # missing whitespace + E241="CONVENTION", # multiple spaces after comma + E242="CONVENTION", # tab after comma + E251="CONVENTION", # unexpected spaces around keyword / param equals + E252="CONVENTION", # missing whitespace around param equals + E261=None, # too few spaces before comment + E262="CONVENTION", # inline comment missing space + E265="CONVENTION", # block comment missing space + E266="CONVENTION", # too many # before block comment + E271="CONVENTION", # too many spaces after keyword + E272="CONVENTION", # too many spaces before keyword + E273="CONVENTION", # tab after keyword + E274="CONVENTION", # tab before keyword + E275="CONVENTION", # missing whitespace after keyword + E301="CONVENTION", # wrong number blank line between methods + E302="CONVENTION", # wrong number blank lines top of module + E303="CONVENTION", # too many blank lines + E304="CONVENTION", # blank lines after decorator + E305="CONVENTION", # wrong number blank lines after class/func + E306="CONVENTION", # missing blank line before nested definition + E401="CONVENTION", # multiple imports on one line + E402="CONVENTION", # module level import not at top of cell + E501="CONVENTION", # line too long + E502="CONVENTION", # redundant backslash + E701="CONVENTION", # multiple statements on one line (colon) + E702="CONVENTION", # multiple statements on one line (semicoloon) + E703="CONVENTION", # unnecessary semicolon + E711="CONVENTION", # `== None` used instead of `is None` + E712="CONVENTION", # `==` used with `True` / `False` + E713="CONVENTION", # `not (? in ?)` used instead of `? not in ?` + E714="CONVENTION", # `not (? is ?)` used instead of `? is not ?` + E721="CONVENTION", # `type(?) == ?` used instead of `isinstance(?)` + E722="CONVENTION", # bare except used + E731="CONVENTION", # assigned lambda instead of def + E741="CONVENTION", # ambigious variable name + E742="CONVENTION", # ambigious class name + E743="CONVENTION", # ambigious function name + E902="ERROR", # io error + E999="ERROR", # syntax error + W191="CONVENTION", # uses tabs for indentation + W291="CONVENTION", # trailing whitespace + W292="CONVENTION", # missing newline at EOF + W293="CONVENTION", # blank line with whitespace + W391="CONVENTION", # too many newlines at EOF + W505="CONVENTION", # doc line too long + W605="ERROR", # invalid escape sequence ), - mccabe = dict( - C901 = 'REFACTOR', # structure too complex + mccabe=dict( + C901="REFACTOR", # structure too complex ), - isort = dict( - I001 = 'CONVENTION', # unsorted imports - I002 = 'ERROR', # missing required import + isort=dict( + I001="CONVENTION", # unsorted imports + I002="ERROR", # missing required import ), - pep8_naming = dict( - N801 = 'CONVENTION', # bad class name - N802 = 'CONVENTION', # bad func name - N803 = 'CONVENTION', # bad arg name - N804 = 'ERROR', # bad cls argument - N805 = 'ERROR', # bad self argument - N806 = 'CONVENTION', # bad local var name - N807 = 'CONVENTION', # non-standard dunder method - - N811 = 'CONVENTION', # const imported as non const - N812 = 'CONVENTION', # lowercase imported as non lowercase - N813 = 'CONVENTION', # camelcase imported as non lowercase - N814 = 'CONVENTION', # camelcase imported as constant - N815 = 'CONVENTION', # class variable using mixedcase - N816 = 'CONVENTION', # global variable using mixedcase - N817 = 'CONVENTION', # camelcase imported as acronym - N818 = 'CONVENTION', # exception name not suffixed "Error" - - N999 = 'CONVENTION', # invalid module name + pep8_naming=dict( + N801="CONVENTION", # bad class name + N802="CONVENTION", # bad func name + N803="CONVENTION", # bad arg name + N804="ERROR", # bad cls argument + N805="ERROR", # bad self argument + N806="CONVENTION", # bad local var name + N807="CONVENTION", # non-standard dunder method + N811="CONVENTION", # const imported as non const + N812="CONVENTION", # lowercase imported as non lowercase + N813="CONVENTION", # camelcase imported as non lowercase + N814="CONVENTION", # camelcase imported as constant + N815="CONVENTION", # class variable using mixedcase + N816="CONVENTION", # global variable using mixedcase + N817="CONVENTION", # camelcase imported as acronym + N818="CONVENTION", # exception name not suffixed "Error" + N999="CONVENTION", # invalid module name ), - pydocstyle = dict( - D100 = 'CONVENTION', # missing docstring in module - D101 = 'CONVENTION', # missing docstring in class - D102 = 'CONVENTION', # missing docstring in method - D103 = 'CONVENTION', # missing docstring in function - D104 = 'CONVENTION', # missing docstring in package - D105 = 'CONVENTION', # missing docstring in magic method - D106 = 'CONVENTION', # missing docstring in nested class - D107 = 'CONVENTION', # missing docstring in __init__ - - D200 = 'CONVENTION', # one line docstring not on one line - D201 = None, # (conflicts D211) blank line before func docstring - D202 = None, # blank line after func docstring - D203 = 'CONVENTION', # missing blank line before class docstring - D204 = 'CONVENTION', # missing blank line after class docstring - D205 = 'CONVENTION', # missing blank line between summary and description in docstring - D206 = 'CONVENTION', # docstring should be indented with spaces not tabs - D207 = 'CONVENTION', # under-indented docstring - D208 = 'CONVENTION', # over-indented docstring - D209 = 'CONVENTION', # multi-line docstring should close on seperate line - D210 = None, # whitespace surrounding docstring text - D211 = 'CONVENTION', # blank line before class docstring - D212 = None, # (conflicts D213) multi-line summary not on first line - D213 = 'CONVENTION', # docstring multi-line summary not on first line - D214 = 'CONVENTION', # docstring section over-indented - D215 = 'CONVENTION', # docstring section underline is over-indented - - D300 = 'CONVENTION', # docstring should use triple double-quotes (""") - D301 = 'CONVENTION', # docstring should use raw triple double-quotes (r""") if backslash in docstring - - D400 = None, # docstring first line should end with '.' - D401 = 'CONVENTION', # docstring first line should be in imperative mood - D402 = 'CONVENTION', # docstring first line shoudl not be in function's signature - D403 = 'CONVENTION', # first word of line should be capitalized in docstring - D404 = 'CONVENTION', # docstring should not start with "This" - D405 = 'CONVENTION', # docstring section name should be capitalized - D406 = 'CONVENTION', # docstring section name should end with newline - D407 = 'CONVENTION', # docstring missing dashed underline after section name - D408 = 'CONVENTION', # docstring section underline should be directly after section name - D409 = 'CONVENTION', # docstring section underline should match length of section name - D410 = 'CONVENTION', # missing newline after docstring section - D411 = 'CONVENTION', # missing newline before docstring section - D412 = 'CONVENTION', # missing blank line between docstring section header and contents - D413 = 'CONVENTION', # missing blank line after last docstring section - D414 = 'CONVENTION', # docstring section has no contents - D415 = None, # docstring first line should end with punctuation - D416 = None, # docstring section name should end with a colon - D417 = 'CONVENTION', # docstring missing argument description - D418 = 'CONVENTION', # func decorated with `@overload` shouldn't contain docstring - D419 = 'CONVENTION', # docstring is empty + pydocstyle=dict( + D100="CONVENTION", # missing docstring in module + D101="CONVENTION", # missing docstring in class + D102="CONVENTION", # missing docstring in method + D103="CONVENTION", # missing docstring in function + D104="CONVENTION", # missing docstring in package + D105="CONVENTION", # missing docstring in magic method + D106="CONVENTION", # missing docstring in nested class + D107="CONVENTION", # missing docstring in __init__ + D200="CONVENTION", # one line docstring not on one line + D201=None, # (conflicts D211) blank line before func docstring + D202=None, # blank line after func docstring + D203="CONVENTION", # missing blank line before class docstring + D204="CONVENTION", # missing blank line after class docstring + D205="CONVENTION", # missing blank line between summary and description in docstring + D206="CONVENTION", # docstring should be indented with spaces not tabs + D207="CONVENTION", # under-indented docstring + D208="CONVENTION", # over-indented docstring + D209="CONVENTION", # multi-line docstring should close on seperate line + D210=None, # whitespace surrounding docstring text + D211="CONVENTION", # blank line before class docstring + D212=None, # (conflicts D213) multi-line summary not on first line + D213="CONVENTION", # docstring multi-line summary not on first line + D214="CONVENTION", # docstring section over-indented + D215="CONVENTION", # docstring section underline is over-indented + D300="CONVENTION", # docstring should use triple double-quotes (""") + D301="CONVENTION", # docstring should use raw triple double-quotes (r""") if backslash in docstring + D400=None, # docstring first line should end with '.' + D401="CONVENTION", # docstring first line should be in imperative mood + D402="CONVENTION", # docstring first line shoudl not be in function's signature + D403="CONVENTION", # first word of line should be capitalized in docstring + D404="CONVENTION", # docstring should not start with "This" + D405="CONVENTION", # docstring section name should be capitalized + D406="CONVENTION", # docstring section name should end with newline + D407="CONVENTION", # docstring missing dashed underline after section name + D408="CONVENTION", # docstring section underline should be directly after section name + D409="CONVENTION", # docstring section underline should match length of section name + D410="CONVENTION", # missing newline after docstring section + D411="CONVENTION", # missing newline before docstring section + D412="CONVENTION", # missing blank line between docstring section header and contents + D413="CONVENTION", # missing blank line after last docstring section + D414="CONVENTION", # docstring section has no contents + D415=None, # docstring first line should end with punctuation + D416=None, # docstring section name should end with a colon + D417="CONVENTION", # docstring missing argument description + D418="CONVENTION", # func decorated with `@overload` shouldn't contain docstring + D419="CONVENTION", # docstring is empty ), - pyupgrade = dict( - UP001 = 'REFACTOR', # `__metaclass__ = type` (implied) - - UP003 = 'REFACTOR', # type of primitive used instead of type name `type(1)` instead of `int` - UP004 = 'REFACTOR', # class inheriting from object (implied) - UP005 = 'REFACTOR', # deprecated unittest aliases used - UP006 = 'REFACTOR', # pre pip858 annotation - UP007 = 'REFACTOR', # pre pip604 union annotations - UP008 = 'REFACTOR', # using `super(__class__, self)` instead of `super()` - UP009 = 'REFACTOR', # UTF-8 declaration (implied) - UP010 = 'REFACTOR', # unnecessary `__future__` import - UP011 = 'REFACTOR', # unnecessary parentheses to `functools.lru_cache` - UP012 = 'REFACTOR', # unnecessary call to encode as UTF-8 - UP013 = 'REFACTOR', # named dict should use class syntax - UP014 = 'REFACTOR', # named tuples should use class syntax - UP015 = None, # redundant `open(..., 'r')` when read mode is default - - UP017 = 'REFACTOR', # should use datetime.UTC alias for datetime.timezone.utc - UP018 = 'REFACTOR', # unnecessary literal-type call to literal `str("foo")` - UP019 = 'REFACTOR', # use of `typing.Text` instead of `str` - UP020 = 'REFACTOR', # use of `io.open` instead of `open` builtin alias - UP021 = 'REFACTOR', # use of `universal_newlines` kwarg instead of `text` to subprocess - UP022 = 'REFACTOR', # use of `stdout=PIPE` and `stderr=PIPE` instead of `capture_output` in subprocess - UP023 = 'REFACTOR', # use of `cElementTree` instead of `ElementTree` in `xml.etree` - UP024 = 'REFACTOR', # use of exception that aliases `OSError` - UP025 = 'REFACTOR', # use of unicode literal string - UP026 = 'REFACTOR', # use of `mock` instead of `unittest.mock` - UP027 = 'REFACTOR', # use of unpacked list comprehension instead of generator expression - UP028 = 'REFACTOR', # use of yield only for loop instead of `yield from` - UP029 = 'REFACTOR', # importing builtin - UP030 = 'REFACTOR', # explicit positional format where implicit is correct - UP031 = 'REFACTOR', # use of percent format instead of `.format` - UP032 = 'REFACTOR', # use of `.format` instead of f-string - UP033 = 'REFACTOR', # use of `functools.lru_cache` with `maxsize=None` - UP034 = 'REFACTOR', # extra parentheses - UP035 = 'REFACTOR', # use of deprecated import - UP036 = None, # use of version-block test - UP037 = 'REFACTOR', # use of unnecessary quoted annotation - UP038 = 'REFACTOR', # use of `isinstance(..., (a, b))` instead of `isinstance(..., a | b)` - UP039 = 'REFACTOR', # use of unnecessary parenthesis after class definition - UP040 = 'REFACTOR', # use of `TypeAlias` instead of `type` keyword - UP041 = 'REFACTOR', # use of aliases to `TimeoutError` - UP042 = 'REFACTOR', # multiple inheritence instead of `StrEnum` - UP043 = 'REFACTOR', # unnecessary default args for typing + pyupgrade=dict( + UP001="REFACTOR", # `__metaclass__ = type` (implied) + UP003="REFACTOR", # type of primitive used instead of type name `type(1)` instead of `int` + UP004="REFACTOR", # class inheriting from object (implied) + UP005="REFACTOR", # deprecated unittest aliases used + UP006="REFACTOR", # pre pip858 annotation + UP007="REFACTOR", # pre pip604 union annotations + UP008="REFACTOR", # using `super(__class__, self)` instead of `super()` + UP009="REFACTOR", # UTF-8 declaration (implied) + UP010="REFACTOR", # unnecessary `__future__` import + UP011="REFACTOR", # unnecessary parentheses to `functools.lru_cache` + UP012="REFACTOR", # unnecessary call to encode as UTF-8 + UP013="REFACTOR", # named dict should use class syntax + UP014="REFACTOR", # named tuples should use class syntax + UP015=None, # redundant `open(..., 'r')` when read mode is default + UP017="REFACTOR", # should use datetime.UTC alias for datetime.timezone.utc + UP018="REFACTOR", # unnecessary literal-type call to literal `str("foo")` + UP019="REFACTOR", # use of `typing.Text` instead of `str` + UP020="REFACTOR", # use of `io.open` instead of `open` builtin alias + UP021="REFACTOR", # use of `universal_newlines` kwarg instead of `text` to subprocess + UP022="REFACTOR", # use of `stdout=PIPE` and `stderr=PIPE` instead of `capture_output` in subprocess + UP023="REFACTOR", # use of `cElementTree` instead of `ElementTree` in `xml.etree` + UP024="REFACTOR", # use of exception that aliases `OSError` + UP025="REFACTOR", # use of unicode literal string + UP026="REFACTOR", # use of `mock` instead of `unittest.mock` + UP027="REFACTOR", # use of unpacked list comprehension instead of generator expression + UP028="REFACTOR", # use of yield only for loop instead of `yield from` + UP029="REFACTOR", # importing builtin + UP030="REFACTOR", # explicit positional format where implicit is correct + UP031="REFACTOR", # use of percent format instead of `.format` + UP032="REFACTOR", # use of `.format` instead of f-string + UP033="REFACTOR", # use of `functools.lru_cache` with `maxsize=None` + UP034="REFACTOR", # extra parentheses + UP035="REFACTOR", # use of deprecated import + UP036=None, # use of version-block test + UP037="REFACTOR", # use of unnecessary quoted annotation + UP038="REFACTOR", # use of `isinstance(..., (a, b))` instead of `isinstance(..., a | b)` + UP039="REFACTOR", # use of unnecessary parenthesis after class definition + UP040="REFACTOR", # use of `TypeAlias` instead of `type` keyword + UP041="REFACTOR", # use of aliases to `TimeoutError` + UP042="REFACTOR", # multiple inheritence instead of `StrEnum` + UP043="REFACTOR", # unnecessary default args for typing ), - flake8_2020 = dict( - YTT101 = 'REFACTOR', # `sys.version[:3]` instead of `sys.version_info` - YTT102 = 'REFACTOR', # `sys.version[2]` instead of `sys.version_info` - YTT103 = 'REFACTOR', # `sys.version` instead of `sys.version_info` - - YTT201 = None, # `sys.version_info[0] ==` instead of `sys.version_info[0] >=` - YTT202 = 'REFACTOR', # `six.PY3` checked over `not six.PY2` - YTT203 = 'REFACTOR', # `sys.version_info[1]` compared to integer instead of tuple - YTT204 = 'REFACTOR', # `sys.version_info.minor` compared to integer instead of tuple - - YTT301 = 'REFACTOR', # `sys.version[0]` referenced instead of `sys.version_info` - YTT302 = 'REFACTOR', # `sys.version` compared to string, instead of `sys.version_info` - YTT303 = 'REFACTOR', # `sys.version[:1]` referenced instead of `sys.version_info` - + flake8_2020=dict( + YTT101="REFACTOR", # `sys.version[:3]` instead of `sys.version_info` + YTT102="REFACTOR", # `sys.version[2]` instead of `sys.version_info` + YTT103="REFACTOR", # `sys.version` instead of `sys.version_info` + YTT201=None, # `sys.version_info[0] ==` instead of `sys.version_info[0] >=` + YTT202="REFACTOR", # `six.PY3` checked over `not six.PY2` + YTT203="REFACTOR", # `sys.version_info[1]` compared to integer instead of tuple + YTT204="REFACTOR", # `sys.version_info.minor` compared to integer instead of tuple + YTT301="REFACTOR", # `sys.version[0]` referenced instead of `sys.version_info` + YTT302="REFACTOR", # `sys.version` compared to string, instead of `sys.version_info` + YTT303="REFACTOR", # `sys.version[:1]` referenced instead of `sys.version_info` ), - flake8_annotations = dict( + flake8_annotations=dict( # NOTE do we want to ensure typing in inithooks? - ANN001 = 'REFACTOR', # missing type annotation for func arg - ANN002 = 'REFACTOR', # missing type annotation for `*args` - ANN003 = 'REFACTOR', # missing type annotation for `**kwargs` - - ANN101 = None, # missing type annotation for `self` - ANN102 = None, # missing type annotation for `cls` - - ANN201 = 'REFACTOR', # missing return type annotation for public func - ANN202 = 'REFACTOR', # missing return type annotation for private func - - ANN204 = 'REFACTOR', # missing return type annotation for special method - ANN205 = 'REFACTOR', # missing return type annotation for static method - ANN206 = 'REFACTOR', # missing return type annotation for class method - - ANN401 = 'REFACTOR', # use of dynamically typed expression + ANN001="REFACTOR", # missing type annotation for func arg + ANN002="REFACTOR", # missing type annotation for `*args` + ANN003="REFACTOR", # missing type annotation for `**kwargs` + ANN101=None, # missing type annotation for `self` + ANN102=None, # missing type annotation for `cls` + ANN201="REFACTOR", # missing return type annotation for public func + ANN202="REFACTOR", # missing return type annotation for private func + ANN204="REFACTOR", # missing return type annotation for special method + ANN205="REFACTOR", # missing return type annotation for static method + ANN206="REFACTOR", # missing return type annotation for class method + ANN401="REFACTOR", # use of dynamically typed expression ), - flake8_async = dict( - ASYNC100 = 'REFACTOR', # `async with` used with no internal `await` - - ASYNC105 = 'REFACTOR', # trio called without await - - ASYNC109 = 'REFACTOR', # async function with timeout param - ASYNC110 = 'REFACTOR', # async busy wait - - ASYNC115 = 'REFACTOR', # use of sleep(0) in async - ASYNC116 = 'REFACTOR', # async use of `sleep` with very large time, instead of `sleep_forever()` - - ASYNC210 = 'REFACTOR', # async function using blocking http methods - - ASYNC220 = 'REFACTOR', # async function using blocking create subprocess methods - ASYNC221 = 'REFACTOR', # async function using blocking run subprocess methods - ASYNC222 = 'REFACTOR', # async function using blocking wait on subprocess - - ASYNC230 = 'REFACTOR', # async function opening file with blocking methods - - ASYNC251 = 'REFACTOR', # async function calling `time.sleep` + flake8_async=dict( + ASYNC100="REFACTOR", # `async with` used with no internal `await` + ASYNC105="REFACTOR", # trio called without await + ASYNC109="REFACTOR", # async function with timeout param + ASYNC110="REFACTOR", # async busy wait + ASYNC115="REFACTOR", # use of sleep(0) in async + ASYNC116="REFACTOR", # async use of `sleep` with very large time, instead of `sleep_forever()` + ASYNC210="REFACTOR", # async function using blocking http methods + ASYNC220="REFACTOR", # async function using blocking create subprocess methods + ASYNC221="REFACTOR", # async function using blocking run subprocess methods + ASYNC222="REFACTOR", # async function using blocking wait on subprocess + ASYNC230="REFACTOR", # async function opening file with blocking methods + ASYNC251="REFACTOR", # async function calling `time.sleep` ), - flake8_bandit = dict( - S101 = None, # use of assert - S102 = 'SECURITY', # use of exec - S103 = 'SECURITY', # chmod setting overly permissive mask - S104 = 'SECURITY', # address binding to 0.0.0.0 - S105 = 'SECURITY', # hardcoded password - S106 = 'SECURITY', # hardcoded password in argument - S107 = 'SECURITY', # hardcoded password in function default - S108 = 'SECURITY', # hardcoded temp file - - S110 = 'SECURITY', # hardcoded try-except-pass (exception not logged) - - S112 = 'SECURITY', # hardcoded try-except-continue (exception not logged) - S113 = 'SECURITY', # request without timeout (might wait forever) - - S201 = 'SECURITY', # use of `debug=True` with flask - S202 = 'SECURITY', # use of `tarfile.extractall()` - - S301 = 'SECURITY', # possibly insecure use of pickle - S302 = 'SECURITY', # possibly insecure use of marshal - S303 = 'SECURITY', # use of MD2, MD4, MD5, SHA1 - S304 = 'SECURITY', # use of insecure cipher - S305 = 'SECURITY', # use of insecure block cipher mode - S306 = 'SECURITY', # use of `mktemp` - S307 = 'SECURITY', # use of `eval` - S308 = 'SECURITY', # use of `mark_safe` - - S310 = 'SECURITY', # use of unchecked URL - S311 = 'SECURITY', # use of standard prng for crypto - S312 = 'SECURITY', # use of telnet - S313 = 'SECURITY', # use of xmlc etree - S314 = 'SECURITY', # use of xml etree - S315 = 'SECURITY', # use of xml expat reader - S316 = 'SECURITY', # use of xml expat builder - S317 = 'SECURITY', # use of xml sax - S318 = 'SECURITY', # use of xml mini dom - S319 = 'SECURITY', # use of xml pull dom - S320 = 'SECURITY', # use of xmle etree - S321 = 'SECURITY', # use of ftp - - S323 = 'SECURITY', # use of ssl `_create_unverified_context` - S324 = 'SECURITY', # use of insecure hash functions - - S401 = 'SECURITY', # import of telnetlib - S402 = 'SECURITY', # import of ftplib - S403 = 'SECURITY', # use of pickle, cPickle, dill or shelve - S404 = None, # import of subprocess - S405 = 'SECURITY', # import of xml.etree - S406 = 'SECURITY', # import of xml.sax - S407 = 'SECURITY', # import of xml.dom.expatbuilder - S408 = 'SECURITY', # import of xml.dom.minidom - S409 = 'SECURITY', # import of xml.dom.pulldom - S410 = None, # import of lxml, note this should be disabled anyway as lxml has addressed insecurities - S411 = 'SECURITY', # import of xmlrpc - S412 = 'SECURITY', # import of httpoxy - S413 = 'SECURITY', # import of pycrypto, publicly disclosed buffer overflow - S415 = 'SECURITY', # use of ipmi - - S501 = 'SECURITY', # ssl with disabled cert checks - S502 = 'SECURITY', # insecure ssl protocol - S503 = 'SECURITY', # ssl with bad defaults - S504 = 'SECURITY', # ssl without version specified - S505 = 'SECURITY', # weak crypto key size - S506 = 'SECURITY', # unsafe yaml loader - S507 = 'SECURITY', # paramiko ssh without host verification - S508 = 'SECURITY', # insecure snmp version - S509 = 'SECURITY', # snmp weak crypto - - S601 = 'SECURITY', # paramiko call - S602 = 'SECURITY', # subprocess popen with shell=True - S603 = None, # subprocess popen use at all - S604 = 'SECURITY', # func call with shell=True - S605 = 'SECURITY', # starting process with shell=True - S606 = None, # starting process without shell - S607 = 'SECURITY', # starting process with partial executable path - S608 = 'SECURITY', # hardcoded sql expression - S609 = 'SECURITY', # possible wildcard injection - S610 = 'SECURITY', # django `extra` use (can lead to SQL injection) - S611 = 'SECURITY', # django `RawSQL` use (can lead to SQL injection) - S612 = 'SECURITY', # use of insecure `logging.config.listen` - - S701 = 'SECURITY', # use of jinja2 templates with `autoescape=False` - S702 = 'SECURITY', # use of mako templates + flake8_bandit=dict( + S101=None, # use of assert + S102="SECURITY", # use of exec + S103="SECURITY", # chmod setting overly permissive mask + S104="SECURITY", # address binding to 0.0.0.0 + S105="SECURITY", # hardcoded password + S106="SECURITY", # hardcoded password in argument + S107="SECURITY", # hardcoded password in function default + S108="SECURITY", # hardcoded temp file + S110="SECURITY", # hardcoded try-except-pass (exception not logged) + S112="SECURITY", # hardcoded try-except-continue (exception not logged) + S113="SECURITY", # request without timeout (might wait forever) + S201="SECURITY", # use of `debug=True` with flask + S202="SECURITY", # use of `tarfile.extractall()` + S301="SECURITY", # possibly insecure use of pickle + S302="SECURITY", # possibly insecure use of marshal + S303="SECURITY", # use of MD2, MD4, MD5, SHA1 + S304="SECURITY", # use of insecure cipher + S305="SECURITY", # use of insecure block cipher mode + S306="SECURITY", # use of `mktemp` + S307="SECURITY", # use of `eval` + S308="SECURITY", # use of `mark_safe` + S310="SECURITY", # use of unchecked URL + S311="SECURITY", # use of standard prng for crypto + S312="SECURITY", # use of telnet + S313="SECURITY", # use of xmlc etree + S314="SECURITY", # use of xml etree + S315="SECURITY", # use of xml expat reader + S316="SECURITY", # use of xml expat builder + S317="SECURITY", # use of xml sax + S318="SECURITY", # use of xml mini dom + S319="SECURITY", # use of xml pull dom + S320="SECURITY", # use of xmle etree + S321="SECURITY", # use of ftp + S323="SECURITY", # use of ssl `_create_unverified_context` + S324="SECURITY", # use of insecure hash functions + S401="SECURITY", # import of telnetlib + S402="SECURITY", # import of ftplib + S403="SECURITY", # use of pickle, cPickle, dill or shelve + S404=None, # import of subprocess + S405="SECURITY", # import of xml.etree + S406="SECURITY", # import of xml.sax + S407="SECURITY", # import of xml.dom.expatbuilder + S408="SECURITY", # import of xml.dom.minidom + S409="SECURITY", # import of xml.dom.pulldom + S410=None, # import of lxml, note this should be disabled anyway as lxml has addressed insecurities + S411="SECURITY", # import of xmlrpc + S412="SECURITY", # import of httpoxy + S413="SECURITY", # import of pycrypto, publicly disclosed buffer overflow + S415="SECURITY", # use of ipmi + S501="SECURITY", # ssl with disabled cert checks + S502="SECURITY", # insecure ssl protocol + S503="SECURITY", # ssl with bad defaults + S504="SECURITY", # ssl without version specified + S505="SECURITY", # weak crypto key size + S506="SECURITY", # unsafe yaml loader + S507="SECURITY", # paramiko ssh without host verification + S508="SECURITY", # insecure snmp version + S509="SECURITY", # snmp weak crypto + S601="SECURITY", # paramiko call + S602="SECURITY", # subprocess popen with shell=True + S603=None, # subprocess popen use at all + S604="SECURITY", # func call with shell=True + S605="SECURITY", # starting process with shell=True + S606=None, # starting process without shell + S607="SECURITY", # starting process with partial executable path + S608="SECURITY", # hardcoded sql expression + S609="SECURITY", # possible wildcard injection + S610="SECURITY", # django `extra` use (can lead to SQL injection) + S611="SECURITY", # django `RawSQL` use (can lead to SQL injection) + S612="SECURITY", # use of insecure `logging.config.listen` + S701="SECURITY", # use of jinja2 templates with `autoescape=False` + S702="SECURITY", # use of mako templates ), - flake8_blint_except = dict( - BLE001 = 'WARN', # blind `except` + flake8_blint_except=dict( + BLE001="WARN", # blind `except` ), - flake8_boolean_trap = dict( - FBT001 = None, # boolean typed positional arg in function def - FBT002 = None, # boolean default positional argument in func def - FBT003 = 'REFACTOR', # boolean positional value in func call + flake8_boolean_trap=dict( + FBT001=None, # boolean typed positional arg in function def + FBT002=None, # boolean default positional argument in func def + FBT003="REFACTOR", # boolean positional value in func call ), - flake8_bugbear = dict( - B002 = 'ERROR', # unary prefix increment/decrement - B003 = 'ERROR', # assignment to `os.environ` - B004 = 'WARN', # using `hasattr(x, '__call__')` instead of `callable(x)` - B005 = 'WARN', # `.strip()` with multicharacter string is misleading - B006 = 'ERROR', # mutable default argument to function - B007 = 'REFACTOR', # unused loop control variable - B008 = 'REFACTOR', # function call in default argument - B009 = 'REFACTOR', # getattr with constant attribute value - B010 = 'REFACTOR', # setattr with constant attribute value - B011 = 'REFACTOR', # use of `assert(False)` - B012 = 'REFACTOR', # jump in except - B013 = 'REFACTOR', # redundant tuple in except - B014 = 'WARN', # duplicate exception handler in `except` - B015 = 'WARN', # useless comparison - B016 = 'ERROR', # raise literal - B017 = 'ERROR', # `assertRaises(Exception, ...)` - B018 = 'ERROR', # useless expression - B019 = 'ERROR', # use of `functools.lru_cache` or `functools.cache` on method - B020 = 'WARN', # loop control variable overrides iterable - B021 = 'ERROR', # f-docstring (doesn't do what you think) - B022 = 'REFACTOR', # useless contextlib suppress - B023 = 'ERROR', # function defined in loop does not bind loop variable - B024 = 'REFACTOR', # abstract base class without abstract methods - B025 = 'ERROR', # duplicate try-except block - B026 = 'ERROR', # star unpack after keyword arg - B027 = 'ERROR', # empty method in abstract class - B028 = 'WARN', # `warnings.warn` without `stacklevel` - B029 = 'ERROR', # except with empty tuple - B030 = 'ERROR', # except with non-exception class - B031 = 'ERROR', # reuse of groupby generator - B032 = 'WARN', # possible accidental type annotation - B033 = 'ERROR', # duplicate item in set - B034 = 'REFACTOR', # potentially confusing use of positional arg with some `re.*` functions - B035 = 'ERROR', # dictionary comprehension with static key - - B039 = 'ERROR', # mutable default to contextvar - - B901 = 'ERROR', # return in generator - - B904 = 'ERROR', # raise without `from` inside exception handler (stops traceback propogating properly) - B905 = 'WARN', # zip without `strict=True` will truncate when iterables are of different lengths - - B909 = 'ERROR', # mutation of loop iterable + flake8_bugbear=dict( + B002="ERROR", # unary prefix increment/decrement + B003="ERROR", # assignment to `os.environ` + B004="WARN", # using `hasattr(x, '__call__')` instead of `callable(x)` + B005="WARN", # `.strip()` with multicharacter string is misleading + B006="ERROR", # mutable default argument to function + B007="REFACTOR", # unused loop control variable + B008="REFACTOR", # function call in default argument + B009="REFACTOR", # getattr with constant attribute value + B010="REFACTOR", # setattr with constant attribute value + B011="REFACTOR", # use of `assert(False)` + B012="REFACTOR", # jump in except + B013="REFACTOR", # redundant tuple in except + B014="WARN", # duplicate exception handler in `except` + B015="WARN", # useless comparison + B016="ERROR", # raise literal + B017="ERROR", # `assertRaises(Exception, ...)` + B018="ERROR", # useless expression + B019="ERROR", # use of `functools.lru_cache` or `functools.cache` on method + B020="WARN", # loop control variable overrides iterable + B021="ERROR", # f-docstring (doesn't do what you think) + B022="REFACTOR", # useless contextlib suppress + B023="ERROR", # function defined in loop does not bind loop variable + B024="REFACTOR", # abstract base class without abstract methods + B025="ERROR", # duplicate try-except block + B026="ERROR", # star unpack after keyword arg + B027="ERROR", # empty method in abstract class + B028="WARN", # `warnings.warn` without `stacklevel` + B029="ERROR", # except with empty tuple + B030="ERROR", # except with non-exception class + B031="ERROR", # reuse of groupby generator + B032="WARN", # possible accidental type annotation + B033="ERROR", # duplicate item in set + B034="REFACTOR", # potentially confusing use of positional arg with some `re.*` functions + B035="ERROR", # dictionary comprehension with static key + B039="ERROR", # mutable default to contextvar + B901="ERROR", # return in generator + B904="ERROR", # raise without `from` inside exception handler (stops traceback propogating properly) + B905="WARN", # zip without `strict=True` will truncate when iterables are of different lengths + B909="ERROR", # mutation of loop iterable ), - flake8_builtins = dict( - A001 = 'WARN', # variable shadows builtin - A002 = 'WARN', # argument shadows builtin - A003 = 'WARN', # class attribute shadows builtin - A004 = 'WARN', # import shadows builtin - A005 = 'WARN', # module shadows builtin - A006 = 'WARN', # lambda argument shadows builtin + flake8_builtins=dict( + A001="WARN", # variable shadows builtin + A002="WARN", # argument shadows builtin + A003="WARN", # class attribute shadows builtin + A004="WARN", # import shadows builtin + A005="WARN", # module shadows builtin + A006="WARN", # lambda argument shadows builtin ), - flake8_commas = dict( - COM812 = None, # missing trailing comma in sequence - COM818 = 'ERROR', # trailing comma on bare tuple - COM819 = None, # missing trailing comma in tuple + flake8_commas=dict( + COM812=None, # missing trailing comma in sequence + COM818="ERROR", # trailing comma on bare tuple + COM819=None, # missing trailing comma in tuple ), - flake8_copyright = dict( - CPY001 = None, # missing copyright notice at top of file + flake8_copyright=dict( + CPY001=None, # missing copyright notice at top of file ), - flake8_comprehensions = dict( - C400 = 'REFACTOR', # unnecessary generator (could be `list()`) - C401 = 'REFACTOR', # unnecessary generator (could be `set()`) - C402 = 'REFACTOR', # unnecessary generator (could be dict comprehension) - C403 = 'REFACTOR', # unnecessary list comprehension (could be set comprehension) - C404 = 'REFACTOR', # unnecessary list comprehension (could be dict comprehension) - C405 = 'REFACTOR', # unnecessary literal, use literal set instead - C406 = 'REFACTOR', # unnecessary literal, use literal dict instead - - C408 = 'REFACTOR', # unnecessary literal call, use literal instead - C409 = 'REFACTOR', # unnecessary list literal passed to tuple, use literal tuple instead - C410 = 'REFACTOR', # unnecessary list literal passed to `list()` (remove outer list call) - C411 = 'REFACTOR', # same as C410, but catches list comprehension - - C413 = 'REFACTOR', # unnecessary call around sorted (`list(...)` or `reversed(...)`) - C414 = 'REFACTOR', # unnecessary double cast - C415 = 'REFACTOR', # unnecessary subscript reversal - C416 = 'REFACTOR', # unnecessary comprehension - C417 = 'REFACTOR', # unnecessary map - C418 = 'REFACTOR', # unnecessary dict literal passed to `dict()` (remove outer dict call) - C419 = 'REFACTOR', # unnecessary list comprehension - C420 = 'REFACTOR', # unnecessary dict comprehension for iterable (use dict.fromkeys instead) + flake8_comprehensions=dict( + C400="REFACTOR", # unnecessary generator (could be `list()`) + C401="REFACTOR", # unnecessary generator (could be `set()`) + C402="REFACTOR", # unnecessary generator (could be dict comprehension) + C403="REFACTOR", # unnecessary list comprehension (could be set comprehension) + C404="REFACTOR", # unnecessary list comprehension (could be dict comprehension) + C405="REFACTOR", # unnecessary literal, use literal set instead + C406="REFACTOR", # unnecessary literal, use literal dict instead + C408="REFACTOR", # unnecessary literal call, use literal instead + C409="REFACTOR", # unnecessary list literal passed to tuple, use literal tuple instead + C410="REFACTOR", # unnecessary list literal passed to `list()` (remove outer list call) + C411="REFACTOR", # same as C410, but catches list comprehension + C413="REFACTOR", # unnecessary call around sorted (`list(...)` or `reversed(...)`) + C414="REFACTOR", # unnecessary double cast + C415="REFACTOR", # unnecessary subscript reversal + C416="REFACTOR", # unnecessary comprehension + C417="REFACTOR", # unnecessary map + C418="REFACTOR", # unnecessary dict literal passed to `dict()` (remove outer dict call) + C419="REFACTOR", # unnecessary list comprehension + C420="REFACTOR", # unnecessary dict comprehension for iterable (use dict.fromkeys instead) ), - flake8_datetimez = dict( - DTZ001 = 'WARN', # `datetime.datetime(...)` without `tzinfo` argument - DTZ002 = 'WARN', # naive `datetime.datetime.today()` used - DTZ003 = 'WARN', # naive `datetime.datetime.utcnow()` used - DTZ004 = 'WARN', # naive `datetime.datetime.utcfromtimestamp()` used - DTZ005 = 'WARN', # `datetime.datetime.now(...)` without `tz` argument - DTZ006 = 'WARN', # `datetime.datetime.fromtimestamp(...)` without `tz` argument - DTZ007 = 'WARN', # naive datetime constructed using `datetime.datetime.strptime()` - - DTZ011 = 'WARN', # naive `datetime.date.today()` used - DTZ012 = 'WARN', # naive `datetime.date.fromtimestamp()` used + flake8_datetimez=dict( + DTZ001="WARN", # `datetime.datetime(...)` without `tzinfo` argument + DTZ002="WARN", # naive `datetime.datetime.today()` used + DTZ003="WARN", # naive `datetime.datetime.utcnow()` used + DTZ004="WARN", # naive `datetime.datetime.utcfromtimestamp()` used + DTZ005="WARN", # `datetime.datetime.now(...)` without `tz` argument + DTZ006="WARN", # `datetime.datetime.fromtimestamp(...)` without `tz` argument + DTZ007="WARN", # naive datetime constructed using `datetime.datetime.strptime()` + DTZ011="WARN", # naive `datetime.date.today()` used + DTZ012="WARN", # naive `datetime.date.fromtimestamp()` used ), - flake8_debugger = dict( - T100 = 'WARN', # debug trace or breakpoint found + flake8_debugger=dict( + T100="WARN", # debug trace or breakpoint found ), - flake8_django = dict( - DJ001 = 'REFACTOR', # `null=True` on string-based field - - DJ003 = 'REFACTOR', # use of `locals()` as context in reader function - - DJ006 = 'REFACTOR', # use of `exclude` instead of `fields` in `ModelForm` - DJ007 = 'REFACTOR', # use of `__all__` instead of `fields` in `ModelForm` - DJ008 = 'REFACTOR', # model doesn't define `__str__` - - DJ012 = 'CONVENTION', # order of model's inner classes, methods and fields does not follow django style guide - DJ013 = 'ERROR', # `@receiver` must be on top of all other decorators + flake8_django=dict( + DJ001="REFACTOR", # `null=True` on string-based field + DJ003="REFACTOR", # use of `locals()` as context in reader function + DJ006="REFACTOR", # use of `exclude` instead of `fields` in `ModelForm` + DJ007="REFACTOR", # use of `__all__` instead of `fields` in `ModelForm` + DJ008="REFACTOR", # model doesn't define `__str__` + DJ012="CONVENTION", # order of model's inner classes, methods and fields does not follow django style guide + DJ013="ERROR", # `@receiver` must be on top of all other decorators ), - flake8_errmsg = dict( - EM101 = 'REFACTOR', # raw string in exception (shows twice in traceback) - EM102 = 'REFACTOR', # f-string in exception (shows twice in traceback) - EM103 = 'REFACTOR', # .format in exception (shows twice in traceback) + flake8_errmsg=dict( + EM101="REFACTOR", # raw string in exception (shows twice in traceback) + EM102="REFACTOR", # f-string in exception (shows twice in traceback) + EM103="REFACTOR", # .format in exception (shows twice in traceback) ), - flake8_executable = dict( - EXE001 = 'WARN', # shebang is present but file is not executable - EXE002 = 'WARN', # file is executable but shebang is not present - EXE003 = 'WARN', # shebang doesn't contain "python" - EXE004 = 'WARN', # shebang has leading whitespace - EXE005 = 'WARN', # shebang not on first line + flake8_executable=dict( + EXE001="WARN", # shebang is present but file is not executable + EXE002="WARN", # file is executable but shebang is not present + EXE003="WARN", # shebang doesn't contain "python" + EXE004="WARN", # shebang has leading whitespace + EXE005="WARN", # shebang not on first line ), - flake8_future_annotations = dict( - FA100 = 'WARN', # uses old annotations rather than importing `__future__.annotations` - FA102 = None, # uses new annotations and doesn't import `__future__.annotations` + flake8_future_annotations=dict( + FA100="WARN", # uses old annotations rather than importing `__future__.annotations` + FA102=None, # uses new annotations and doesn't import `__future__.annotations` ), - flake8_implicit_str_concat = dict( - ISC001 = 'REFACTOR', # implicit str concat on one line - ISC002 = 'REFACTOR', # implicit str concat over multiple lines - ISC003 = 'REFACTOR', # explicit str concat could be implicit str concat + flake8_implicit_str_concat=dict( + ISC001="REFACTOR", # implicit str concat on one line + ISC002="REFACTOR", # implicit str concat over multiple lines + ISC003="REFACTOR", # explicit str concat could be implicit str concat ), - flake8_import_conventions = dict( - ICN001 = 'CONVENTION', # not using conventional import alias - ICN002 = 'CONVENTION', # using explicitly non-conventional import alias - ICN003 = 'CONVENTION', # using explicitly non-conventional import from instead of alias + flake8_import_conventions=dict( + ICN001="CONVENTION", # not using conventional import alias + ICN002="CONVENTION", # using explicitly non-conventional import alias + ICN003="CONVENTION", # using explicitly non-conventional import from instead of alias ), - flake8_logging = dict( - LOG001 = 'WARN', # using `logger.Logger` directly - LOG002 = 'WARN', # using `__cached__` or `__file__` for logger name - LOG003 = 'WARN', # using `logger.exception` without `exc_info` - LOG004 = 'REFACTOR', # using `logging.WARN` + flake8_logging=dict( + LOG001="WARN", # using `logger.Logger` directly + LOG002="WARN", # using `__cached__` or `__file__` for logger name + LOG003="WARN", # using `logger.exception` without `exc_info` + LOG004="REFACTOR", # using `logging.WARN` ), - flake8_logging_format = dict( - G001 = 'REFACTOR', # logging statement uses `.format` - G002 = 'REFACTOR', # logging statement uses `%` formatting - G003 = 'REFACTOR', # logging statement uses `+` string concatenation - G004 = 'REFACTOR', # logging statement uses f-string formatting - G010 = 'REFACTOR', # logging uses `.warn` instead of `.warning` - - G101 = 'WARN', # logging uses an `extra` field that conflicts with a `LogRecord` field - - G201 = 'REFACTOR', # logging `.error(..., exc_info=True)` instead of `.exception(...)` - G202 = 'REFACTOR', # logging statement has redundant `exc_info` + flake8_logging_format=dict( + G001="REFACTOR", # logging statement uses `.format` + G002="REFACTOR", # logging statement uses `%` formatting + G003="REFACTOR", # logging statement uses `+` string concatenation + G004="REFACTOR", # logging statement uses f-string formatting + G010="REFACTOR", # logging uses `.warn` instead of `.warning` + G101="WARN", # logging uses an `extra` field that conflicts with a `LogRecord` field + G201="REFACTOR", # logging `.error(..., exc_info=True)` instead of `.exception(...)` + G202="REFACTOR", # logging statement has redundant `exc_info` ), - flake8_no_pep420 = dict( - INP001 = 'REFACTOR', # package missing an `__init__.py` + flake8_no_pep420=dict( + INP001="REFACTOR", # package missing an `__init__.py` ), - flake8_pie = dict( - PIE790 = 'REFACTOR', # unnecessary `pass` statement - - PIE794 = 'WARN', # class field defined multiple times - - PIE796 = 'WARN', # enum contains duplicate value - - PIE800 = 'REFACTOR', # unnecessary dict spread - - PIE804 = 'WARN', # unnecessary `**` kwargs - - PIE807 = 'REFACTOR', # unnecessary `lambda` in dataclass field - PIE808 = 'REFACTOR', # unnecessary `range` start argument - - PIE810 = 'REFACTOR', # multiple `.startswith` or `.endswith` calls + flake8_pie=dict( + PIE790="REFACTOR", # unnecessary `pass` statement + PIE794="WARN", # class field defined multiple times + PIE796="WARN", # enum contains duplicate value + PIE800="REFACTOR", # unnecessary dict spread + PIE804="WARN", # unnecessary `**` kwargs + PIE807="REFACTOR", # unnecessary `lambda` in dataclass field + PIE808="REFACTOR", # unnecessary `range` start argument + PIE810="REFACTOR", # multiple `.startswith` or `.endswith` calls ), - flake8_print = dict( - T201 = None, # `print` call found - T202 = None, # `pprint` call found + flake8_print=dict( + T201=None, # `print` call found + T202=None, # `pprint` call found ), - flake8_pyi = dict( - PYI001 = 'REFACTOR', # private type param should start with `_` - PYI002 = 'REFACTOR', # overly complex `sys.version_info` comparison - PYI003 = 'WARN', # bad `sys.version_info` check - PYI004 = 'WARN', # `sys.version_info` patch version check - PYI005 = 'WARN', # incorrect tuple length in `sys.version_info` comparison - PYI006 = 'WARN', # bad `sys.version_info` check - PYI007 = 'WARN', # bad `sys.platform` check - PYI008 = 'WARN', # unrecognized `sys.platform` name - PYI009 = 'REFACTOR', # using `pass` instead of `...` in stub block - PYI010 = 'WARN', # non-empty stub block - PYI011 = 'WARN', # non-trivial default value - PYI012 = 'WARN', # pass in class body - PYI013 = 'WARN', # non-empty class body contains `...` in stub - PYI014 = 'WARN', # complex default value for argument in stub - PYI015 = 'WARN', # complex default value for assignment in stub - PYI016 = 'WARN', # duplicate union member - PYI017 = 'WARN', # complex assignment in stub - PYI018 = 'WARN', # unused private type var - PYI019 = 'REFACTOR', # custom type var return instead of `typing.Self` - PYI020 = 'WARN', # quoted annotation in stub - PYI021 = 'WARN', # docstring in stub - - PYI024 = 'WARN', # `collections.namedtuple` instead of `typing.NamedTuple` in stub - PYI025 = 'CONVENTION', # use of unaliased `collections.abc.Set` - PYI026 = 'WARN', # type alias without `typing.TypeAlias` type - - PYI029 = 'REFACTOR', # explicitly defined method which is always implicitly defined in stub - PYI030 = 'REFACTOR', # unnecessary literal union - - PYI032 = 'WARN', # `typing.Any` used in `__eq__` or `__ne__` instead of `object` - PYI033 = 'REFACTOR', # comment in stub file - PYI034 = 'WARN', # non-self return type in `__new__` - PYI035 = 'WARN', # unassigned special variables - PYI036 = 'WARN', # bad typing in `__exit__` or `__aexit__` in stub - - PYI041 = 'REFACTOR', # redundant numeric union - PYI042 = 'CONVENTION', # type alias should be CamelCase - PYI043 = 'CONVENTION', # private type alias should not be suffixed with `T` - PYI044 = 'REFACTOR', # `__future__.annotations` don't effect stubs - PYI045 = 'WARN', # `__aiter__` returns incorrect type - PYI046 = 'WARN', # private protocol never used - PYI047 = 'WARN', # private type alias never used - PYI048 = 'WARN', # stub body contains multiple statements - PYI049 = 'WARN', # private `TypedDict` never used - PYI050 = 'CONVENTION', # prefer `Never` over `NoReturn` for function arguments - PYI051 = 'REFACTOR', # redundant literal union - PYI052 = 'WARN', # un-typed assignment in stub - PYI053 = 'WARN', # string too long in stub - PYI054 = 'WARN', # int too long in stub - PYI055 = 'REFACTOR', # unnecessary type union - PYI056 = 'WARN', # unsupported method call on `__all__` - PYI057 = 'WARN', # use of `ByteString` - PYI058 = 'REFACTOR', # use of `Generator` instead of `Iterator` - PYI059 = 'WARN', # `Generic[]` should always be last base class - - PYI062 = 'WARN', # duplicate literal member - PYI063 = 'WARN', # pre pip570 syntax for positional arg - PYI064 = 'REFACTOR', # redundant final literal - - PYI066 = 'WARN', # prefer `>=` when using if-else with sys.version_info + flake8_pyi=dict( + PYI001="REFACTOR", # private type param should start with `_` + PYI002="REFACTOR", # overly complex `sys.version_info` comparison + PYI003="WARN", # bad `sys.version_info` check + PYI004="WARN", # `sys.version_info` patch version check + PYI005="WARN", # incorrect tuple length in `sys.version_info` comparison + PYI006="WARN", # bad `sys.version_info` check + PYI007="WARN", # bad `sys.platform` check + PYI008="WARN", # unrecognized `sys.platform` name + PYI009="REFACTOR", # using `pass` instead of `...` in stub block + PYI010="WARN", # non-empty stub block + PYI011="WARN", # non-trivial default value + PYI012="WARN", # pass in class body + PYI013="WARN", # non-empty class body contains `...` in stub + PYI014="WARN", # complex default value for argument in stub + PYI015="WARN", # complex default value for assignment in stub + PYI016="WARN", # duplicate union member + PYI017="WARN", # complex assignment in stub + PYI018="WARN", # unused private type var + PYI019="REFACTOR", # custom type var return instead of `typing.Self` + PYI020="WARN", # quoted annotation in stub + PYI021="WARN", # docstring in stub + PYI024="WARN", # `collections.namedtuple` instead of `typing.NamedTuple` in stub + PYI025="CONVENTION", # use of unaliased `collections.abc.Set` + PYI026="WARN", # type alias without `typing.TypeAlias` type + PYI029="REFACTOR", # explicitly defined method which is always implicitly defined in stub + PYI030="REFACTOR", # unnecessary literal union + PYI032="WARN", # `typing.Any` used in `__eq__` or `__ne__` instead of `object` + PYI033="REFACTOR", # comment in stub file + PYI034="WARN", # non-self return type in `__new__` + PYI035="WARN", # unassigned special variables + PYI036="WARN", # bad typing in `__exit__` or `__aexit__` in stub + PYI041="REFACTOR", # redundant numeric union + PYI042="CONVENTION", # type alias should be CamelCase + PYI043="CONVENTION", # private type alias should not be suffixed with `T` + PYI044="REFACTOR", # `__future__.annotations` don't effect stubs + PYI045="WARN", # `__aiter__` returns incorrect type + PYI046="WARN", # private protocol never used + PYI047="WARN", # private type alias never used + PYI048="WARN", # stub body contains multiple statements + PYI049="WARN", # private `TypedDict` never used + PYI050="CONVENTION", # prefer `Never` over `NoReturn` for function arguments + PYI051="REFACTOR", # redundant literal union + PYI052="WARN", # un-typed assignment in stub + PYI053="WARN", # string too long in stub + PYI054="WARN", # int too long in stub + PYI055="REFACTOR", # unnecessary type union + PYI056="WARN", # unsupported method call on `__all__` + PYI057="WARN", # use of `ByteString` + PYI058="REFACTOR", # use of `Generator` instead of `Iterator` + PYI059="WARN", # `Generic[]` should always be last base class + PYI062="WARN", # duplicate literal member + PYI063="WARN", # pre pip570 syntax for positional arg + PYI064="REFACTOR", # redundant final literal + PYI066="WARN", # prefer `>=` when using if-else with sys.version_info ), - flake8_pytest_style = dict( - PT001 = 'WARN', # `pytest.fixture()` used instead of `pytest.fixture` - PT002 = 'WARN', # incorrect config for `pytest.fixture` - PT003 = 'REFACTOR', # redundant `scope=function` in `pytest.fixture(...)` - PT004 = None, # fixture should have leading underscore (lint deprecated) - PT005 = None, # fixture should not have leading underscore (lint deprecated) - PT006 = 'ERROR', # wrong type passed to first argument of `pytest.mark.parametrize` - PT007 = 'ERROR', # wrong values passed to `pytest.mark.parametrize` - PT008 = 'REFACTOR', # use `return_value=` instead of patching with lambda - PT009 = 'REFACTOR', # use regular `assert` instead of unittest-style - PT010 = 'WARN', # set expected exception in `pytest.raises` - PT011 = 'WARN', # `pytest.raises` too broad (set match param) - PT012 = 'WARN', # `pytest.raises` should contain a single statement - PT013 = 'WARN', # incorrect import of pytest - PT014 = 'WARN', # duplicate test case - PT015 = 'WARN', # assertion always fails (replace with `pytest.fail`) - PT016 = 'WARN', # no message passed to `pytest.fail` - PT017 = 'WARN', # assert in `except` block (use `pytest.fail` instead) - PT018 = 'WARN', # assertion should be broken down into multiple parts - PT019 = 'WARN', # fixture without value is injected as param, use `pytest.mark.usefixtures` - PT020 = 'WARN', # `pytest.yield_fixture` is deprecated - PT021 = 'WARN', # use `yield` instead of `request.addfinalizer` - PT022 = 'WARN', # no teardown, use `return` instead of `yield` - PT023 = 'WARN', # incorrecet `pytest.mark` parenthesis style - PT024 = 'WARN', # `pytest.mark.asyncio` is unnecessary for fixtures - PT025 = 'REFACTOR', # `pytest.mark.usefixtures` has no effect on fixtures - PT026 = 'REFACTOR', # useless `pytest.mark.usefixtures` without parameters - PT027 = 'REFACTOR', # use `pytest.raises` instead of unittest-style + flake8_pytest_style=dict( + PT001="WARN", # `pytest.fixture()` used instead of `pytest.fixture` + PT002="WARN", # incorrect config for `pytest.fixture` + PT003="REFACTOR", # redundant `scope=function` in `pytest.fixture(...)` + PT004=None, # fixture should have leading underscore (lint deprecated) + PT005=None, # fixture should not have leading underscore (lint deprecated) + PT006="ERROR", # wrong type passed to first argument of `pytest.mark.parametrize` + PT007="ERROR", # wrong values passed to `pytest.mark.parametrize` + PT008="REFACTOR", # use `return_value=` instead of patching with lambda + PT009="REFACTOR", # use regular `assert` instead of unittest-style + PT010="WARN", # set expected exception in `pytest.raises` + PT011="WARN", # `pytest.raises` too broad (set match param) + PT012="WARN", # `pytest.raises` should contain a single statement + PT013="WARN", # incorrect import of pytest + PT014="WARN", # duplicate test case + PT015="WARN", # assertion always fails (replace with `pytest.fail`) + PT016="WARN", # no message passed to `pytest.fail` + PT017="WARN", # assert in `except` block (use `pytest.fail` instead) + PT018="WARN", # assertion should be broken down into multiple parts + PT019="WARN", # fixture without value is injected as param, use `pytest.mark.usefixtures` + PT020="WARN", # `pytest.yield_fixture` is deprecated + PT021="WARN", # use `yield` instead of `request.addfinalizer` + PT022="WARN", # no teardown, use `return` instead of `yield` + PT023="WARN", # incorrecet `pytest.mark` parenthesis style + PT024="WARN", # `pytest.mark.asyncio` is unnecessary for fixtures + PT025="REFACTOR", # `pytest.mark.usefixtures` has no effect on fixtures + PT026="REFACTOR", # useless `pytest.mark.usefixtures` without parameters + PT027="REFACTOR", # use `pytest.raises` instead of unittest-style ), - flake8_quotes = dict( - Q000 = None, # single quotes found but double preferred - Q001 = None, # single quote multiline found but double preferred - Q002 = None, # single quote docstring found but double preferred - Q003 = 'REFACTOR', # change outer quotes to avoid escaping inner quotes - Q004 = 'REFACTOR', # unnecessary escape on inner quote character + flake8_quotes=dict( + Q000=None, # single quotes found but double preferred + Q001=None, # single quote multiline found but double preferred + Q002=None, # single quote docstring found but double preferred + Q003="REFACTOR", # change outer quotes to avoid escaping inner quotes + Q004="REFACTOR", # unnecessary escape on inner quote character ), - flake8_raise = dict( - RSE102 = 'REFACTOR', # unnecessary parentheses on raised exceptions + flake8_raise=dict( + RSE102="REFACTOR", # unnecessary parentheses on raised exceptions ), - flake8_return = dict( - RET501 = 'CONVENTION', # do not explicitly return `None` if it's the only possible return - RET502 = 'CONVENTION', # do not implicitly return `None` if other return values are possible - RET503 = 'CONVENTION', # missing explicit return at end of function able to return non-`None` value - RET504 = 'REFACTOR', # unnecessary assignment before return - RET505 = 'REFACTOR', # unnecessary branch after return - RET506 = 'REFACTOR', # unnecessary branch after raise - RET507 = 'REFACTOR', # unnecessary branch after continue - RET508 = 'REFACTOR', # unnecessary branch after break + flake8_return=dict( + RET501="CONVENTION", # do not explicitly return `None` if it's the only possible return + RET502="CONVENTION", # do not implicitly return `None` if other return values are possible + RET503="CONVENTION", # missing explicit return at end of function able to return non-`None` value + RET504="REFACTOR", # unnecessary assignment before return + RET505="REFACTOR", # unnecessary branch after return + RET506="REFACTOR", # unnecessary branch after raise + RET507="REFACTOR", # unnecessary branch after continue + RET508="REFACTOR", # unnecessary branch after break ), - flake8_self = dict( - SLF001 = 'WARN', # private member of a class accessed + flake8_self=dict( + SLF001="WARN", # private member of a class accessed ), - flake8_slots = dict( - SLOT000 = 'WARN', # subclass of str should define __slots__ - SLOT001 = 'WARN', # subclass of tuple should define __slots__ - SLOT002 = 'WARN', # subclass of namedtuple should define __slots__ + flake8_slots=dict( + SLOT000="WARN", # subclass of str should define __slots__ + SLOT001="WARN", # subclass of tuple should define __slots__ + SLOT002="WARN", # subclass of namedtuple should define __slots__ ), - flake8_simplify = dict( - SIM101 = 'REFACTOR', # multiple `isinstance` calls can be merged - SIM102 = 'REFACTOR', # nested `if` statements can be collapsed - SIM103 = 'REFACTOR', # bool check can be returned directly - - SIM105 = 'REFACTOR', # use `contextlib.suppress` to supress exception - - SIM107 = 'WARN', # return in `try-except-finally` - SIM108 = 'REFACTOR', # `if-else` can be turnery - SIM109 = 'REFACTOR', # use `if in` instead of multiple `==` checks - SIM110 = 'REFACTOR', # reimplementation of builtin - - SIM112 = None, # capitalize ENVVARS - SIM113 = 'REFACTOR', # use `enumerate` for index in for loop - SIM114 = 'REFACTOR', # multiple if branches can be combined - SIM115 = 'REFACTOR', # use context manager for opening files - SIM116 = 'REFACTOR', # use dictionary instead of consecutive if statements - SIM117 = 'REFACTOR', # merge multiple `with` statements - SIM118 = 'REFACTOR', # use `in dict` rather than `in dict.keys()`` - - SIM201 = 'REFACTOR', # use `!=` instead of `not ... ==` - SIM202 = 'REFACTOR', # use `==` instead of `not ... !=` - - SIM208 = 'REFACTOR', # expr negated twice - - SIM210 = 'REFACTOR', # unnecessary turnery - SIM211 = 'REFACTOR', # unnecessary turnery (with not) - SIM212 = None, # if-else checks negated expression - - SIM220 = 'REFACTOR', # unnecessary `and False` - SIM221 = 'REFACTOR', # unnecessary `or False` - SIM222 = 'REFACTOR', # truthy value in `or` - SIM223 = 'REFACTOR', # falsey value in `and` - - SIM300 = 'REFACTOR', # yoda expression (constant on left side of check) - - SIM401 = 'REFACTOR', # if can be replaced with `dict.get(key, default=...)` - - SIM910 = 'REFACTOR', # if can be replaced with `dict.get(key, default=None)` - SIM911 = 'REFACTOR', # zip can be replaced with `dict.items()` + flake8_simplify=dict( + SIM101="REFACTOR", # multiple `isinstance` calls can be merged + SIM102="REFACTOR", # nested `if` statements can be collapsed + SIM103="REFACTOR", # bool check can be returned directly + SIM105="REFACTOR", # use `contextlib.suppress` to supress exception + SIM107="WARN", # return in `try-except-finally` + SIM108="REFACTOR", # `if-else` can be turnery + SIM109="REFACTOR", # use `if in` instead of multiple `==` checks + SIM110="REFACTOR", # reimplementation of builtin + SIM112=None, # capitalize ENVVARS + SIM113="REFACTOR", # use `enumerate` for index in for loop + SIM114="REFACTOR", # multiple if branches can be combined + SIM115="REFACTOR", # use context manager for opening files + SIM116="REFACTOR", # use dictionary instead of consecutive if statements + SIM117="REFACTOR", # merge multiple `with` statements + SIM118="REFACTOR", # use `in dict` rather than `in dict.keys()`` + SIM201="REFACTOR", # use `!=` instead of `not ... ==` + SIM202="REFACTOR", # use `==` instead of `not ... !=` + SIM208="REFACTOR", # expr negated twice + SIM210="REFACTOR", # unnecessary turnery + SIM211="REFACTOR", # unnecessary turnery (with not) + SIM212=None, # if-else checks negated expression + SIM220="REFACTOR", # unnecessary `and False` + SIM221="REFACTOR", # unnecessary `or False` + SIM222="REFACTOR", # truthy value in `or` + SIM223="REFACTOR", # falsey value in `and` + SIM300="REFACTOR", # yoda expression (constant on left side of check) + SIM401="REFACTOR", # if can be replaced with `dict.get(key, default=...)` + SIM910="REFACTOR", # if can be replaced with `dict.get(key, default=None)` + SIM911="REFACTOR", # zip can be replaced with `dict.items()` ), - flake8_tidy_imports = dict( - TID251 = 'WARN', # some import, imports a part of the api that is considered insecure and/or poor form - TID252 = None, # prefer absolute import over relative - TID253 = 'WARN', # import should not be done at module level due to performance issues + flake8_tidy_imports=dict( + TID251="WARN", # some import, imports a part of the api that is considered insecure and/or poor form + TID252=None, # prefer absolute import over relative + TID253="WARN", # import should not be done at module level due to performance issues ), - flake8_type_checking = dict( - TCH001 = 'REFACTOR', # import of local type only module outside of `TYPE_CHECKING` block - TCH002 = 'REFACTOR', # import of third party type only module outside of `TYPE_CHECKING` block - TCH003 = 'REFACTOR', # import of std library type only module outside of `TYPE_CHECKING` block - TCH004 = 'ERROR', # import of non-type only module inside of `TYPE_CHECKING` - TCH005 = 'REFACTOR', # found empty type-checking block - - TCH010 = 'REFACTOR', # incorrect usage of typing `|` operator with quoted type annotations + flake8_type_checking=dict( + TCH001="REFACTOR", # import of local type only module outside of `TYPE_CHECKING` block + TCH002="REFACTOR", # import of third party type only module outside of `TYPE_CHECKING` block + TCH003="REFACTOR", # import of std library type only module outside of `TYPE_CHECKING` block + TCH004="ERROR", # import of non-type only module inside of `TYPE_CHECKING` + TCH005="REFACTOR", # found empty type-checking block + TCH010="REFACTOR", # incorrect usage of typing `|` operator with quoted type annotations ), - flake8_gettext = dict( - INT001 = 'WARN', # gettext with f-string (f-string resolved before gettext) - INT002 = 'WARN', # gettext with `.format` (`.format` resolved before gettext) - INT003 = 'WARN', # gettext with `%` (`%` resolved before gettext) + flake8_gettext=dict( + INT001="WARN", # gettext with f-string (f-string resolved before gettext) + INT002="WARN", # gettext with `.format` (`.format` resolved before gettext) + INT003="WARN", # gettext with `%` (`%` resolved before gettext) ), - flake8_unused_arguments = dict( - ARG001 = 'REFACTOR', # unused function argument - ARG002 = 'REFACTOR', # unused method argument - ARG003 = 'REFACTOR', # unused class method argument - ARG004 = 'REFACTOR', # unused static method argument - ARG005 = 'REFACTOR', # unused lambda argument + flake8_unused_arguments=dict( + ARG001="REFACTOR", # unused function argument + ARG002="REFACTOR", # unused method argument + ARG003="REFACTOR", # unused class method argument + ARG004="REFACTOR", # unused static method argument + ARG005="REFACTOR", # unused lambda argument ), - flake8_use_pathlib = dict( - PTH100 = None, # replace `os.path.abspath()` with `Path.resolve()` - PTH101 = None, # replace `os.chmod()` with `Path.chmod()` - PTH102 = None, # replace `os.mkdir()` with `Path.mkdir()` - PTH103 = None, # replace `os.makedirs()` with `Path.mkdir(parents=True)` - PTH104 = None, # replace `os.rename()` with `Path.rename()` - PTH105 = None, # replace `os.rmdir()` with `Path.rmdir()` - PTH106 = None, # replace `os.remove()` with `Path.remove()` - PTH107 = None, # replace `os.remove()` with `Path.remove()` - PTH108 = None, # replace `os.unlink()` with `Path.unlink()` - PTH109 = None, # replace `os.getcwd()` with `Path.cwd()` - PTH110 = None, # replace `os.path.exists()` with `Path.exists()` - PTH111 = None, # replace `os.path.expanduser()` with `Path.expanduser()` - PTH112 = None, # replace `os.path.isdir()` with `Path.is_dir()` - PTH113 = None, # replace `os.path.isfile()` with `Path.is_file()` - PTH114 = None, # replace `os.path.islink()` with `Path.is_symlink()` - PTH115 = None, # replace `os.readlink()` with `Path.readlink()` - PTH116 = None, # replace `os.stat()` with `Path.stat()`, `Path.owner()` or `Path.group()` - PTH117 = None, # replace `os.path.isabs()` with `Path.is_absolute()` - PTH118 = None, # replace `os.path.join()` with `Path / Path` operator - PTH119 = None, # replace `os.path.basename()` with `Path.name` - PTH120 = None, # replace `os.path.dirname()` with `Path.parent` - PTH121 = None, # replace `os.path.samefile()` with `Path.samefile()` - PTH122 = None, # replace `os.path.splitext()` with `Path.suffix`, `Path.stem` or `Path.parent` - PTH123 = None, # replace `open` with `Path.open` - PTH124 = 'REFACTOR', # replace `py.path` with `pathlib` - - PTH201 = 'REFACTOR', # replace `Path('.')` with `Path()` - PTH202 = None, # replace `os.path.getsize()` with `Path.stat().st_size` - PTH203 = None, # replace `os.path.getatime()` with `Path.stat().st_atime` - PTH204 = None, # replace `os.path.getmtime()` with `Path.stat().st_mtime` - PTH205 = None, # replace `os.path.getctime()` with `Path.stat().st_ctime` - PTH206 = None, # replace `.split(os.sep)` with `Path.parts` - PTH207 = None, # replace `glob` with `Path.glob` or `Path.rglob` + flake8_use_pathlib=dict( + PTH100=None, # replace `os.path.abspath()` with `Path.resolve()` + PTH101=None, # replace `os.chmod()` with `Path.chmod()` + PTH102=None, # replace `os.mkdir()` with `Path.mkdir()` + PTH103=None, # replace `os.makedirs()` with `Path.mkdir(parents=True)` + PTH104=None, # replace `os.rename()` with `Path.rename()` + PTH105=None, # replace `os.rmdir()` with `Path.rmdir()` + PTH106=None, # replace `os.remove()` with `Path.remove()` + PTH107=None, # replace `os.remove()` with `Path.remove()` + PTH108=None, # replace `os.unlink()` with `Path.unlink()` + PTH109=None, # replace `os.getcwd()` with `Path.cwd()` + PTH110=None, # replace `os.path.exists()` with `Path.exists()` + PTH111=None, # replace `os.path.expanduser()` with `Path.expanduser()` + PTH112=None, # replace `os.path.isdir()` with `Path.is_dir()` + PTH113=None, # replace `os.path.isfile()` with `Path.is_file()` + PTH114=None, # replace `os.path.islink()` with `Path.is_symlink()` + PTH115=None, # replace `os.readlink()` with `Path.readlink()` + PTH116=None, # replace `os.stat()` with `Path.stat()`, `Path.owner()` or `Path.group()` + PTH117=None, # replace `os.path.isabs()` with `Path.is_absolute()` + PTH118=None, # replace `os.path.join()` with `Path / Path` operator + PTH119=None, # replace `os.path.basename()` with `Path.name` + PTH120=None, # replace `os.path.dirname()` with `Path.parent` + PTH121=None, # replace `os.path.samefile()` with `Path.samefile()` + PTH122=None, # replace `os.path.splitext()` with `Path.suffix`, `Path.stem` or `Path.parent` + PTH123=None, # replace `open` with `Path.open` + PTH124="REFACTOR", # replace `py.path` with `pathlib` + PTH201="REFACTOR", # replace `Path('.')` with `Path()` + PTH202=None, # replace `os.path.getsize()` with `Path.stat().st_size` + PTH203=None, # replace `os.path.getatime()` with `Path.stat().st_atime` + PTH204=None, # replace `os.path.getmtime()` with `Path.stat().st_mtime` + PTH205=None, # replace `os.path.getctime()` with `Path.stat().st_ctime` + PTH206=None, # replace `.split(os.sep)` with `Path.parts` + PTH207=None, # replace `glob` with `Path.glob` or `Path.rglob` ), - flake8_todos = dict( - TD001 = None, # replace XXX and FIXME with TODO - TD002 = None, # missing author in TODO - TD003 = None, # missing issue link in TODO - TD004 = None, # missing colon in TODO - TD005 = 'REFACTOR', # missing description in TODO - TD006 = 'CONVENTION', # missing TODO capitalization - TD007 = 'CONVENTION', # missing space after colon in TODO + flake8_todos=dict( + TD001=None, # replace XXX and FIXME with TODO + TD002=None, # missing author in TODO + TD003=None, # missing issue link in TODO + TD004=None, # missing colon in TODO + TD005="REFACTOR", # missing description in TODO + TD006="CONVENTION", # missing TODO capitalization + TD007="CONVENTION", # missing space after colon in TODO ), - flake8_fixme = dict( - FIX001 = 'WARN', # line contains FIXME - FIX002 = 'WARN', # line contains TODO - FIX003 = 'WARN', # line contains XXX - FIX004 = 'WARN', # line contains HACK + flake8_fixme=dict( + FIX001="WARN", # line contains FIXME + FIX002="WARN", # line contains TODO + FIX003="WARN", # line contains XXX + FIX004="WARN", # line contains HACK ), - eradicate = dict( - ERA001 = 'REFACTOR', # found commented code + eradicate=dict( + ERA001="REFACTOR", # found commented code ), - pandas_vet = dict( - PD002 = 'WARN', # `inplace=True` should be avoided (inconsistent) - PD003 = 'REFACTOR', # `.isna` prefered over `.isnull` - PD004 = 'REFACTOR', # `.notna` prefered over `.notnull` - - PD007 = 'REFACTOR', # `.ix` deprecated, use more explicit `.loc` or `.iloc` - PD008 = 'REFACTOR', # use `.loc` instead of `.at` - PD009 = 'REFACTOR', # use `.iloc` instead of `.iat` - PD010 = 'REFACTOR', # `.pivot_table` preferred over `.pivot` or `.unstack` - PD011 = 'REFACTOR', # use `.to_numpy()` instead of `.values` - PD012 = 'REFACTOR', # use `.read_csv` instead of `.read_tables` to read CSV files - PD013 = 'REFACTOR', # `.melt` prefered to `.stack` - - PD015 = 'REFACTOR', # use `.merge` method instead of `pd.merge` function - - PD101 = 'REFACTOR', # using `series.nunique()` for checking a series is constant, is ineffecient + pandas_vet=dict( + PD002="WARN", # `inplace=True` should be avoided (inconsistent) + PD003="REFACTOR", # `.isna` prefered over `.isnull` + PD004="REFACTOR", # `.notna` prefered over `.notnull` + PD007="REFACTOR", # `.ix` deprecated, use more explicit `.loc` or `.iloc` + PD008="REFACTOR", # use `.loc` instead of `.at` + PD009="REFACTOR", # use `.iloc` instead of `.iat` + PD010="REFACTOR", # `.pivot_table` preferred over `.pivot` or `.unstack` + PD011="REFACTOR", # use `.to_numpy()` instead of `.values` + PD012="REFACTOR", # use `.read_csv` instead of `.read_tables` to read CSV files + PD013="REFACTOR", # `.melt` prefered to `.stack` + PD015="REFACTOR", # use `.merge` method instead of `pd.merge` function + PD101="REFACTOR", # using `series.nunique()` for checking a series is constant, is ineffecient ), - pygrep_hooks = dict( - PGH001 = None, # use of `eval`, deprecated in favor of S307 - PGH002 = None, # use of `.warn` logging function, deprecated in favor if G010 - PGH003 = 'WARN', # catch-all typing ignore line - PGH004 = 'WARN', # catch-all `noqa` used - PGH005 = 'ERROR', # invalid `mock` usage + pygrep_hooks=dict( + PGH001=None, # use of `eval`, deprecated in favor of S307 + PGH002=None, # use of `.warn` logging function, deprecated in favor if G010 + PGH003="WARN", # catch-all typing ignore line + PGH004="WARN", # catch-all `noqa` used + PGH005="ERROR", # invalid `mock` usage ), - pylint = dict( - PLC0105 = 'WARN', # type name does not reeflect it's variance - - PLC0131 = 'WARN', # type cannot be both covariant and contravariant - PLC0132 = 'WARN', # type name does not matched assigned variable name - - PLC0205 = 'WARN', # `__slots__` should be a non-string iterable - PLC0206 = 'REFACTOR', # extracting value from dictionary without calling `.items()` - - PLC0208 = 'REFACTOR', # iterating over set literal (not effecient) - - PLC0414 = 'REFACTOR', # useless import alias - PLC0415 = 'REFACTOR', # import outside module scope, not at top of file - - PLC1901 = 'REFACTOR', # comparison with empty string, can be changed to check if string is falsy - - PLC2401 = 'REFACTOR', # name is not ascii - PLC2403 = 'REFACTOR', # import name is not ascii - - PLC2801 = 'WARN', # import of private name - PLC3002 = 'REFACTOR', # unnecessary lambda call + pylint=dict( + PLC0105="WARN", # type name does not reeflect it's variance + PLC0131="WARN", # type cannot be both covariant and contravariant + PLC0132="WARN", # type name does not matched assigned variable name + PLC0205="WARN", # `__slots__` should be a non-string iterable + PLC0206="REFACTOR", # extracting value from dictionary without calling `.items()` + PLC0208="REFACTOR", # iterating over set literal (not effecient) + PLC0414="REFACTOR", # useless import alias + PLC0415="REFACTOR", # import outside module scope, not at top of file + PLC1901="REFACTOR", # comparison with empty string, can be changed to check if string is falsy + PLC2401="REFACTOR", # name is not ascii + PLC2403="REFACTOR", # import name is not ascii + PLC2801="WARN", # import of private name + PLC3002="REFACTOR", # unnecessary lambda call ), - pylint_error = dict( - PLE0100 = 'ERROR', # `yield` in `__init__` - PLE0101 = 'ERROR', # `return` in `__init__` - - PLE0115 = 'ERROR', # variable is both `nonlocal` and `global` - PLE0116 = 'ERROR', # `continue` in `finally` - PLE0117 = 'ERROR', # `nonlocal` without binding - PLE0118 = 'ERROR', # name used prior to global decleration - - PLE0237 = 'ERROR', # `__slots__` is defined, but an attribute is defined that is not in the slots - - PLE0241 = 'ERROR', # duplicate base for class - - PLE0302 = 'ERROR', # bad special method signature - PLE0304 = 'ERROR', # invalid `__bool__` return type - PLE0305 = 'ERROR', # invalid `__index__` return type - - PLE0307 = 'ERROR', # invalid `__str__` return type - PLE0308 = 'ERROR', # invalid `__bytes__` return type - PLE0309 = 'ERROR', # invalid `__hash__` return type - - PLE0604 = 'ERROR', # invalid `__all__` (must contain only strings) - PLE0605 = 'ERROR', # invalid type for `__all__` (must be tuple or list) - - PLE0643 = 'WARN', # likely invalid index - - PLE0704 = 'ERROR', # bare `raise` outside exception handler - - PLE1132 = 'ERROR', # repeated keyword argument - - PLE1141 = 'REFACTOR', # unppacking a dictionary in iteration without `.items()` - PLE1142 = 'ERROR', # `await` outside of `async` function - - PLE1205 = 'ERROR', # too many arguments for logging format string - PLE1206 = 'ERROR', # too few arguments for logging format string - - PLE1300 = 'ERROR', # bad string format character - PLE1307 = 'ERROR', # bad string format type - - PLE1507 = 'ERROR', # invalid type for `os.getenv` argument - - PLE1519 = 'ERROR', # `@singledispatch` should not be used on methods - PLE1520 = 'ERROR', # `@singledispatchmethod` should only be used on methods - - PLE1700 = 'ERROR', # `yield from` statement in async function - - PLE2502 = 'ERROR', # uses bidirectional unicode, which can obfuscate code - - PLE2510 = 'ERROR', # invalid unescaped character backspace (use '\b' instead) - - PLE2512 = 'ERROR', # invalid unescaped character sub (use '\x1A' instead) - PLE2513 = 'ERROR', # invalid unescaped character esc (use '\x1B' instead) - PLE2514 = 'ERROR', # invalid unescaped character nul (use '\0' instead) - PLE2515 = 'ERROR', # invalid unescaped character zero-width space (use '\u200B' instead) - - PLE4703 = 'ERROR', # iterated set is modified during iteration + pylint_error=dict( + PLE0100="ERROR", # `yield` in `__init__` + PLE0101="ERROR", # `return` in `__init__` + PLE0115="ERROR", # variable is both `nonlocal` and `global` + PLE0116="ERROR", # `continue` in `finally` + PLE0117="ERROR", # `nonlocal` without binding + PLE0118="ERROR", # name used prior to global decleration + PLE0237="ERROR", # `__slots__` is defined, but an attribute is defined that is not in the slots + PLE0241="ERROR", # duplicate base for class + PLE0302="ERROR", # bad special method signature + PLE0304="ERROR", # invalid `__bool__` return type + PLE0305="ERROR", # invalid `__index__` return type + PLE0307="ERROR", # invalid `__str__` return type + PLE0308="ERROR", # invalid `__bytes__` return type + PLE0309="ERROR", # invalid `__hash__` return type + PLE0604="ERROR", # invalid `__all__` (must contain only strings) + PLE0605="ERROR", # invalid type for `__all__` (must be tuple or list) + PLE0643="WARN", # likely invalid index + PLE0704="ERROR", # bare `raise` outside exception handler + PLE1132="ERROR", # repeated keyword argument + PLE1141="REFACTOR", # unppacking a dictionary in iteration without `.items()` + PLE1142="ERROR", # `await` outside of `async` function + PLE1205="ERROR", # too many arguments for logging format string + PLE1206="ERROR", # too few arguments for logging format string + PLE1300="ERROR", # bad string format character + PLE1307="ERROR", # bad string format type + PLE1507="ERROR", # invalid type for `os.getenv` argument + PLE1519="ERROR", # `@singledispatch` should not be used on methods + PLE1520="ERROR", # `@singledispatchmethod` should only be used on methods + PLE1700="ERROR", # `yield from` statement in async function + PLE2502="ERROR", # uses bidirectional unicode, which can obfuscate code + PLE2510="ERROR", # invalid unescaped character backspace (use '\b' instead) + PLE2512="ERROR", # invalid unescaped character sub (use '\x1A' instead) + PLE2513="ERROR", # invalid unescaped character esc (use '\x1B' instead) + PLE2514="ERROR", # invalid unescaped character nul (use '\0' instead) + PLE2515="ERROR", # invalid unescaped character zero-width space (use '\u200B' instead) + PLE4703="ERROR", # iterated set is modified during iteration ), - pylint_refactor = dict( - PLR0124 = 'ERROR', # name compared with itself - - PLR0133 = 'ERROR', # constant compared with another constant - - PLR0202 = 'ERROR', # classmethod defined without decorator - PLR0203 = 'ERROR', # staticmethod defined without decorator - - PLR0206 = 'ERROR', # property has parameters - - PLR0402 = 'REFACTOR', # using `import x.y as y` instead of `from x import y` - - PLR0904 = 'REFACTOR', # too many public methods - PLR0911 = 'REFACTOR', # too many return statements - PLR0912 = 'REFACTOR', # too many branches - PLR0913 = 'REFACTOR', # too many arguments - PLR0914 = 'REFACTOR', # too many locals - PLR0915 = 'REFACTOR', # too many statements - PLR0916 = 'REFACTOR', # too many boolean expressions - PLR0917 = 'REFACTOR', # too many positional arguments - - PLR1701 = None, # merge isinstance calls (deprecated in favor of SIM101) - PLR1702 = 'REFACTOR', # too many nested blocks - - PLR1704 = 'REFACTOR', # redefined argument - - PLR1706 = None, # replace pre 2.5 ternary syntax with new, removed for false positives - - PLR1711 = 'REFACTOR', # useless return - - PLR1714 = 'REFACTOR', # repeated equality checks could be replaced with `in` - - PLR1722 = 'REFACTOR', # use of `quit` or `exit` instead of `os.exit` - - PLR1730 = 'REFACTOR', # manual reimplementation of `min` or `max` - - PLR1733 = 'REFACTOR', # unnecessary lookup of dict value by key - - PLR1736 = 'REFACTOR', # unnecessary lookup of index in enumerate loop - - PLR2004 = 'REFACTOR', # magic value used in comparison - - PLR2044 = 'REFACTOR', # empty comment - - PLR5501 = 'REFACTOR', # collapsible else if - - PLR6104 = 'REFACTOR', # non-augmented assign - - PLR6201 = None, # literal membership test doesn't use set. Disabled because this can introduce errors when members aren't hashable - - PLR6301 = 'REFACTOR', # method doesn't need to be a method + pylint_refactor=dict( + PLR0124="ERROR", # name compared with itself + PLR0133="ERROR", # constant compared with another constant + PLR0202="ERROR", # classmethod defined without decorator + PLR0203="ERROR", # staticmethod defined without decorator + PLR0206="ERROR", # property has parameters + PLR0402="REFACTOR", # using `import x.y as y` instead of `from x import y` + PLR0904="REFACTOR", # too many public methods + PLR0911="REFACTOR", # too many return statements + PLR0912="REFACTOR", # too many branches + PLR0913="REFACTOR", # too many arguments + PLR0914="REFACTOR", # too many locals + PLR0915="REFACTOR", # too many statements + PLR0916="REFACTOR", # too many boolean expressions + PLR0917="REFACTOR", # too many positional arguments + PLR1701=None, # merge isinstance calls (deprecated in favor of SIM101) + PLR1702="REFACTOR", # too many nested blocks + PLR1704="REFACTOR", # redefined argument + PLR1706=None, # replace pre 2.5 ternary syntax with new, removed for false positives + PLR1711="REFACTOR", # useless return + PLR1714="REFACTOR", # repeated equality checks could be replaced with `in` + PLR1722="REFACTOR", # use of `quit` or `exit` instead of `os.exit` + PLR1730="REFACTOR", # manual reimplementation of `min` or `max` + PLR1733="REFACTOR", # unnecessary lookup of dict value by key + PLR1736="REFACTOR", # unnecessary lookup of index in enumerate loop + PLR2004="REFACTOR", # magic value used in comparison + PLR2044="REFACTOR", # empty comment + PLR5501="REFACTOR", # collapsible else if + PLR6104="REFACTOR", # non-augmented assign + PLR6201=None, # literal membership test doesn't use set. Disabled because this can introduce errors when members aren't hashable + PLR6301="REFACTOR", # method doesn't need to be a method ), - pylint_warning = dict( - PLW0108 = 'REFACTOR', # unnecessary lambda - - PLW0120 = 'REFACTOR', # else on loop - - PLW0127 = 'REFACTOR', # assigning var to itself - PLW0128 = 'REFACTOR', # redeclared variable in assignment - PLW0129 = 'WARN', # assert on string literal - - PLW0131 = 'REFACTOR', # named expression used without context - - PLW0133 = 'WARN', # exception created without raise - - PLW0177 = 'WARN', # comparing against NaN value - - PLW0211 = 'WARN', # bad staticmethod - - PLW0245 = 'WARN', # super call missing parenthesis - - PLW0406 = 'WARN', # module imports itself - - PLW0602 = 'WARN', # global variable not assigned - PLW0603 = None, # global variable updated in function - PLW0604 = 'REFACTOR', # redundant global variable at module level - - PLW0642 = 'REFACTOR', # reassignment of `self` or `class` - - PLW0711 = 'ERROR', # catching binary operation instead of exception - - PLW1501 = 'ERROR', # bad/unknown `open` mode - - PLW1508 = 'ERROR', # invalid type for environment variable default - PLW1509 = 'ERROR', # `preexec_fn` in `Popen` - PLW1510 = None, # `subprocess.run` without `check` set - - PLW1514 = None, # `open` without `encoding` set - - PLW1641 = 'WARN', # object implements `__eq__` but not `__hash__` - PLW2101 = 'ERROR', # useless `with` on `Lock` - - PLW2901 = 'REFACTOR', # variable shadowed by loop - - PLW3201 = 'REFACTOR', # bad or misspelled dunder method - - PLW3301 = 'REFACTOR', # nested `min` or `max` calls + pylint_warning=dict( + PLW0108="REFACTOR", # unnecessary lambda + PLW0120="REFACTOR", # else on loop + PLW0127="REFACTOR", # assigning var to itself + PLW0128="REFACTOR", # redeclared variable in assignment + PLW0129="WARN", # assert on string literal + PLW0131="REFACTOR", # named expression used without context + PLW0133="WARN", # exception created without raise + PLW0177="WARN", # comparing against NaN value + PLW0211="WARN", # bad staticmethod + PLW0245="WARN", # super call missing parenthesis + PLW0406="WARN", # module imports itself + PLW0602="WARN", # global variable not assigned + PLW0603=None, # global variable updated in function + PLW0604="REFACTOR", # redundant global variable at module level + PLW0642="REFACTOR", # reassignment of `self` or `class` + PLW0711="ERROR", # catching binary operation instead of exception + PLW1501="ERROR", # bad/unknown `open` mode + PLW1508="ERROR", # invalid type for environment variable default + PLW1509="ERROR", # `preexec_fn` in `Popen` + PLW1510=None, # `subprocess.run` without `check` set + PLW1514=None, # `open` without `encoding` set + PLW1641="WARN", # object implements `__eq__` but not `__hash__` + PLW2101="ERROR", # useless `with` on `Lock` + PLW2901="REFACTOR", # variable shadowed by loop + PLW3201="REFACTOR", # bad or misspelled dunder method + PLW3301="REFACTOR", # nested `min` or `max` calls ), - tryceratops = dict( - TRY002 = 'CONVENTION', # raising vanilla exception - TRY003 = None, # don't pass long strings to exception - TRY004 = 'CONVENTION', # raising ValueError instead of TypeError when type is the issue - - TRY200 = None, # re-raise without cause, removed in favor of B904 - TRY201 = 'REFACTOR', # unnecessary re-raising exception with explicit name - - TRY300 = 'REFACTOR', # extra code in `try` block, should put it in an `else block` - TRY301 = 'REFACTOR', # raise statement inside `try` block - TRY302 = 'REFACTOR', # unnecessary immediate re-raise - - TRY400 = 'REFACTOR', # use of `logging.error` instead of `logging.exception` - TRY401 = 'REFACTOR', # redundant exception formatted in `logger.exception` call + tryceratops=dict( + TRY002="CONVENTION", # raising vanilla exception + TRY003=None, # don't pass long strings to exception + TRY004="CONVENTION", # raising ValueError instead of TypeError when type is the issue + TRY200=None, # re-raise without cause, removed in favor of B904 + TRY201="REFACTOR", # unnecessary re-raising exception with explicit name + TRY300="REFACTOR", # extra code in `try` block, should put it in an `else block` + TRY301="REFACTOR", # raise statement inside `try` block + TRY302="REFACTOR", # unnecessary immediate re-raise + TRY400="REFACTOR", # use of `logging.error` instead of `logging.exception` + TRY401="REFACTOR", # redundant exception formatted in `logger.exception` call ), - flynt = dict( - FLY002 = 'REFACTOR', # `''.join` used where f-string might be more readable + flynt=dict( + FLY002="REFACTOR", # `''.join` used where f-string might be more readable ), - numpy = dict( - NPY001 = 'WARN', # deprecated type - NPY002 = 'WARN', # legacy random, use `np.random.Generator` - NPY003 = 'WARN', # deprecated function - NPY201 = 'WARN', # will be deprecated in future numpy + numpy=dict( + NPY001="WARN", # deprecated type + NPY002="WARN", # legacy random, use `np.random.Generator` + NPY003="WARN", # deprecated function + NPY201="WARN", # will be deprecated in future numpy ), - fastapi = dict( - FAST001 = 'WARN', # fastapi route has redundant `response_model` - FAST002 = 'WARN', # dependency without `Annotated` - FAST003 = 'WARN', # parameter argument appears in route path but not in function signature + fastapi=dict( + FAST001="WARN", # fastapi route has redundant `response_model` + FAST002="WARN", # dependency without `Annotated` + FAST003="WARN", # parameter argument appears in route path but not in function signature ), - airflow = dict( - AIR001 = 'WARN', # task variable name should match the `task_id` + airflow=dict( + AIR001="WARN", # task variable name should match the `task_id` ), - perflint = dict( - PERF101 = 'REFACTOR', # casting iterable to list, then iterating over it - PERF102 = 'REFACTOR', # useless `.items()` iterator over dict - - PERF203 = 'REFACTOR', # `try-except` within a loop (performance overhead) - - PERF401 = 'REFACTOR', # `for` loop could be refactored into list comprehension - PERF402 = 'REFACTOR', # manual list copied - PERF403 = 'REFACTOR', # `for` loop could be refactored into dict comprehension + perflint=dict( + PERF101="REFACTOR", # casting iterable to list, then iterating over it + PERF102="REFACTOR", # useless `.items()` iterator over dict + PERF203="REFACTOR", # `try-except` within a loop (performance overhead) + PERF401="REFACTOR", # `for` loop could be refactored into list comprehension + PERF402="REFACTOR", # manual list copied + PERF403="REFACTOR", # `for` loop could be refactored into dict comprehension ), - refurb = dict( - FURB101 = None, # replace `open` and `read` with `pathlib` - - FURB103 = None, # replace `open` and `write` with `pathlib` - - FURB105 = 'REFACTOR', # empty string passed to print - - FURB110 = 'REFACTOR', # ternary can be replaced with `or` operator - - FURB113 = 'REFACTOR', # repeated `.append` - - FURB116 = 'REFACTOR', # `bin`, `hex` or `oct` can be refacted to `f-string` - - FURB118 = 'REFACTOR', # manually re-implements operator - - FURB129 = 'REFACTOR', # `.readlines` instead of iterating directly over file object - - FURB131 = 'REFACTOR', # prefer `clear` of deleting slice - FURB132 = 'REFACTOR', # use of member check + `set.remove` instead of `set.discard` - - FURB136 = 'REFACTOR', # if can be replaced with `min` or `max` calls - - FURB140 = 'REFACTOR', # use `itertools.starmap` instead of generator - - FURB142 = 'REFACTOR', # set mutation in loop that could be replaced - - FURB145 = 'REFACTOR', # prefer `.copy` over `[:]` - - FURB148 = 'REFACTOR', # `enumerate` used where iteration over length would suffice - - FURB152 = 'REFACTOR', # defined math constant used as literal - - FURB154 = 'REFACTOR', # multiple consecutive `global` or `nonlocal` keywords - - FURB157 = None, # unnecessary cast to argument of decimal constructor (disabled because this may lower precision of decimal) - - FURB161 = 'REFACTOR', # manual reimplementation of `.bit_count()` - - FURB163 = 'REFACTOR', # specifying `math.log` base instead of using specific variant - FURB164 = 'REFACTOR', # unnecessary use of `.from_float` instead of `Decimal` or `Fraction` constructor - - FURB166 = 'REFACTOR', # use of explicit base with `int` constructor after stripping prefix - FURB167 = 'REFACTOR', # use of regex alias - FURB168 = 'REFACTOR', # use of `isinstance` on `None` instead of `is` - FURB169 = 'REFACTOR', # type comparison with `None` - - FURB171 = 'REFACTOR', # membership test against single item container - - FURB177 = 'REFACTOR', # use of `Path().resolve()` instead of `Path.cwd()` for current directory - - FURB180 = 'REFACTOR', # use of `metaclass=abc.ABCMeta` - FURB181 = 'REFACTOR', # use of hash `.digest().hex() instead of `.hexdigest()` - - FURB187 = 'REFACTOR', # assigning `reversed` instead of using `.reverse` - - FURB192 = 'REFACTOR', # use of `sorted` instead of `min` or `max` + refurb=dict( + FURB101=None, # replace `open` and `read` with `pathlib` + FURB103=None, # replace `open` and `write` with `pathlib` + FURB105="REFACTOR", # empty string passed to print + FURB110="REFACTOR", # ternary can be replaced with `or` operator + FURB113="REFACTOR", # repeated `.append` + FURB116="REFACTOR", # `bin`, `hex` or `oct` can be refacted to `f-string` + FURB118="REFACTOR", # manually re-implements operator + FURB129="REFACTOR", # `.readlines` instead of iterating directly over file object + FURB131="REFACTOR", # prefer `clear` of deleting slice + FURB132="REFACTOR", # use of member check + `set.remove` instead of `set.discard` + FURB136="REFACTOR", # if can be replaced with `min` or `max` calls + FURB140="REFACTOR", # use `itertools.starmap` instead of generator + FURB142="REFACTOR", # set mutation in loop that could be replaced + FURB145="REFACTOR", # prefer `.copy` over `[:]` + FURB148="REFACTOR", # `enumerate` used where iteration over length would suffice + FURB152="REFACTOR", # defined math constant used as literal + FURB154="REFACTOR", # multiple consecutive `global` or `nonlocal` keywords + FURB157=None, # unnecessary cast to argument of decimal constructor (disabled because this may lower precision of decimal) + FURB161="REFACTOR", # manual reimplementation of `.bit_count()` + FURB163="REFACTOR", # specifying `math.log` base instead of using specific variant + FURB164="REFACTOR", # unnecessary use of `.from_float` instead of `Decimal` or `Fraction` constructor + FURB166="REFACTOR", # use of explicit base with `int` constructor after stripping prefix + FURB167="REFACTOR", # use of regex alias + FURB168="REFACTOR", # use of `isinstance` on `None` instead of `is` + FURB169="REFACTOR", # type comparison with `None` + FURB171="REFACTOR", # membership test against single item container + FURB177="REFACTOR", # use of `Path().resolve()` instead of `Path.cwd()` for current directory + FURB180="REFACTOR", # use of `metaclass=abc.ABCMeta` + FURB181="REFACTOR", # use of hash `.digest().hex() instead of `.hexdigest()` + FURB187="REFACTOR", # assigning `reversed` instead of using `.reverse` + FURB192="REFACTOR", # use of `sorted` instead of `min` or `max` ), - pydoclint = dict( - DOC201 = 'CONVENTION', # return not documented in docstring - DOC202 = 'CONVENTION', # function does not return, return should not be documented in docstring - - DOC402 = 'CONVENTION', # yield is not documented in docstring - DOC403 = 'CONVENTION', # function does not yield, yield should not be documented in docstring - - DOC501 = 'CONVENTION', # raised exception not documented in docstring - DOC502 = 'CONVENTION', # documented raised exception not explicitly raised + pydoclint=dict( + DOC201="CONVENTION", # return not documented in docstring + DOC202="CONVENTION", # function does not return, return should not be documented in docstring + DOC402="CONVENTION", # yield is not documented in docstring + DOC403="CONVENTION", # function does not yield, yield should not be documented in docstring + DOC501="CONVENTION", # raised exception not documented in docstring + DOC502="CONVENTION", # documented raised exception not explicitly raised + ), + ruff=dict( + RUF001="REFACTOR", # ambiguous unicode character in string + RUF002="REFACTOR", # ambiguous unicode character in docstring + RUF003="REFACTOR", # ambiguous unicode character in comment + RUF005="REFACTOR", # literal concatenation of collection instead of spread + RUF006="ERROR", # dangling async task + RUF007="REFACTOR", # prefer `itertools.pairwise` over `zip` when iterating over successive pairs + RUF008="WARN", # mutable dataclass default value + RUF009="WARN", # function call in dataclass default + RUF010="WARN", # manual conversions inside f-string + RUF011=None, # static key used in dict comprehension (prefer B035 instead) + RUF012="WARN", # improperly typed mutable class variable + RUF013="WARN", # implicit optional type + RUF015="WARN", # prefer `next()` over single element slice + RUF016="WARN", # invalid index type + RUF017="WARN", # quadratic list summation + RUF018="WARN", # named expression in assert + RUF019="WARN", # unnecessary key check before dictionary access + RUF020="WARN", # never union + RUF021="WARN", # chained binary operators should be parenthesized to make precedence clear + RUF022="CONVENTION", # unsorted __all__ + RUF023="CONVENTION", # unsorted __slots__ + RUF024="CONVENTION", # mutable values passed to `dict.fromkeys` + RUF026="ERROR", # `default_factory` passed as kw arg to `defaultdict` + RUF027="WARN", # possible f-string missing `f` prefix + RUF028="ERROR", # suppression comment is invalid + RUF029="WARN", # async func doesn't do any async + RUF030="WARN", # print in `assert` + RUF031="WARN", # incorrect tuple subscript parenthesization + RUF032="WARN", # `Decimal()` called with float literal + RUF100="WARN", # unused `noqa` directive + RUF101="WARN", # `noqa` suppresses rule that has been deprecated in favor of another + RUF200="ERROR", # failed to parse `pyproject.toml` ), - ruff = dict( - RUF001 = 'REFACTOR', # ambiguous unicode character in string - RUF002 = 'REFACTOR', # ambiguous unicode character in docstring - RUF003 = 'REFACTOR', # ambiguous unicode character in comment - - RUF005 = 'REFACTOR', # literal concatenation of collection instead of spread - RUF006 = 'ERROR', # dangling async task - RUF007 = 'REFACTOR', # prefer `itertools.pairwise` over `zip` when iterating over successive pairs - RUF008 = 'WARN', # mutable dataclass default value - RUF009 = 'WARN', # function call in dataclass default - RUF010 = 'WARN', # manual conversions inside f-string - RUF011 = None, # static key used in dict comprehension (prefer B035 instead) - RUF012 = 'WARN', # improperly typed mutable class variable - RUF013 = 'WARN', # implicit optional type - - RUF015 = 'WARN', # prefer `next()` over single element slice - RUF016 = 'WARN', # invalid index type - RUF017 = 'WARN', # quadratic list summation - RUF018 = 'WARN', # named expression in assert - RUF019 = 'WARN', # unnecessary key check before dictionary access - RUF020 = 'WARN', # never union - RUF021 = 'WARN', # chained binary operators should be parenthesized to make precedence clear - RUF022 = 'CONVENTION', # unsorted __all__ - RUF023 = 'CONVENTION', # unsorted __slots__ - RUF024 = 'CONVENTION', # mutable values passed to `dict.fromkeys` - - RUF026 = 'ERROR', # `default_factory` passed as kw arg to `defaultdict` - RUF027 = 'WARN', # possible f-string missing `f` prefix - RUF028 = 'ERROR', # suppression comment is invalid - RUF029 = 'WARN', # async func doesn't do any async - RUF030 = 'WARN', # print in `assert` - RUF031 = 'WARN', # incorrect tuple subscript parenthesization - RUF032 = 'WARN', # `Decimal()` called with float literal - - RUF100 = 'WARN', # unused `noqa` directive - RUF101 = 'WARN', # `noqa` suppresses rule that has been deprecated in favor of another - - RUF200 = 'ERROR', # failed to parse `pyproject.toml` - ) ) if is_in_path("ruff"): + @register_linter class RuffLinter(FileLinter): ENABLE_TAGS: set[str] = { @@ -1229,18 +1022,25 @@ class RuffLinter(FileLinter): def check(self, item: FileItem) -> Generator[Report, None, None]: for report in json.loads( subprocess.run( - ["ruff", "check", "--select=ALL", "--output-format", "json", item.abspath], + [ + "ruff", + "check", + "--select=ALL", + "--output-format", + "json", + item.abspath, + ], capture_output=True, text=True, ).stdout ): - location_metadata = '' + location_metadata = "" - level = '' + level = "" lint_is_known = False for group in RUFF_LINTS.values(): - if report['code'] in group: - level = group[report['code']] + if report["code"] in group: + level = group[report["code"]] lint_is_known = True break @@ -1249,7 +1049,7 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: continue if not lint_is_known: - level = 'error' + level = "error" yield FileReport( item=item, @@ -1277,5 +1077,5 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: fix=None, source="ruff", raw=report, - level=parse_report_level('error') + level=parse_report_level("error"), ) diff --git a/tkldet_modules/shellcheck.py b/tkldet_modules/shellcheck.py index 8414ccb..2024f8a 100644 --- a/tkldet_modules/shellcheck.py +++ b/tkldet_modules/shellcheck.py @@ -19,19 +19,23 @@ import subprocess from libtkldet.linter import FileLinter, FileItem, register_linter -from libtkldet.report import Report, FileReport, parse_report_level, Replacement +from libtkldet.report import ( + Report, + FileReport, + parse_report_level, + Replacement, +) from libtkldet.apt_file import is_installed if is_installed("shellcheck"): + def insert_str(v: str, i: int, instr: str) -> str: return v[:i] + instr + v[i:] - def expand_lines(lines: list[str]) -> Generator[str, None, None]: for line in lines: yield from line.splitlines() - def format_replacement( path: str, line_span: tuple[int, int], @@ -57,17 +61,20 @@ def format_replacement( if replacement["insertionPoint"] == "beforeStart": line = replacement["line"] - start_line - 1 lines[line] = insert_str( - lines[line], replacement["column"], replacement["replacement"] + lines[line], + replacement["column"], + replacement["replacement"], ) elif replacement["insertionPoint"] == "afterEnd": line = replacement["endLine"] - start_line - 1 lines[line] = insert_str( - lines[line], replacement["endColumn"] - 1, replacement["replacement"] + lines[line], + replacement["endColumn"] - 1, + replacement["replacement"], ) return Replacement(start_line, end_line, expand_lines(lines)) - @register_linter class Shellcheck(FileLinter): ENABLE_TAGS: set[str] = { diff --git a/tkldet_modules/to_ignore.py b/tkldet_modules/to_ignore.py index ed4f12f..30fe6a5 100644 --- a/tkldet_modules/to_ignore.py +++ b/tkldet_modules/to_ignore.py @@ -20,14 +20,16 @@ import os.path from typing import ClassVar + def is_or_has_ancestor_dir(path: str, directory: str) -> bool: """Checks if path has an ancestor directory with a given name""" - while path not in ('/', ''): + while path not in ("/", ""): path, path_segment = os.path.split(path) if path_segment == directory: return True return False + @register_classifier class FiletypeClassifier(FileClassifier): """Classify files by a parent directory""" @@ -35,8 +37,7 @@ class FiletypeClassifier(FileClassifier): WEIGHT: ClassVar[int] = 5 def classify(self, item: FileItem) -> None: - - if is_or_has_ancestor_dir(item.abspath, '__pycache__'): - item.add_tags(self, ['ignore:__pycache__']) - if is_or_has_ancestor_dir(item.abspath, '.git'): - item.add_tags(self, ['ignore:.git']) + if is_or_has_ancestor_dir(item.abspath, "__pycache__"): + item.add_tags(self, ["ignore:__pycache__"]) + if is_or_has_ancestor_dir(item.abspath, ".git"): + item.add_tags(self, ["ignore:.git"]) diff --git a/tkldet_modules/yaml_check.py b/tkldet_modules/yaml_check.py index 930640d..f2297a4 100644 --- a/tkldet_modules/yaml_check.py +++ b/tkldet_modules/yaml_check.py @@ -25,6 +25,7 @@ from libtkldet.linter import FileLinter, FileItem, register_linter from libtkldet.report import Report, FileReport, ReportLevel + class YamlLinter(FileLinter): ENABLE_TAGS: set[str] = {"ext:yaml", "ext:yml"} DISABLE_TAGS: set[str] = set() @@ -59,5 +60,6 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: level=ReportLevel.ERROR, ) + if YAML: register_linter(YamlLinter) From cccb99b013079f1058f91f37b54e5a122b5bf6bc Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Tue, 10 Sep 2024 05:01:07 +0000 Subject: [PATCH 11/14] auto ruff config generator --- contrib/create_ruff_config.py | 48 +++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 contrib/create_ruff_config.py diff --git a/contrib/create_ruff_config.py b/contrib/create_ruff_config.py new file mode 100644 index 0000000..21bb4a0 --- /dev/null +++ b/contrib/create_ruff_config.py @@ -0,0 +1,48 @@ +""" +This file creates a ruff.toml file using the configuration found inside +tkldet_modules/ruff.py + +Mostly to be used with ruff's formatter +""" + +import json +from os.path import abspath, dirname, join +import sys + +PROJECT_PATH = dirname(dirname(abspath(__file__))) +TKLDET_MODULE_PATH = join(PROJECT_PATH, "tkldet_modules") + +sys.path.insert(0, PROJECT_PATH) +sys.path.insert(1, TKLDET_MODULE_PATH) + +import libtkldet +from libtkldet.report import ReportLevel +from ruff import RUFF_LINTS + +# these rules cause issues with the formatter +incompatible = ["ISC001", "D203"] + +output = """ + +line-length = 79 +indent-width = 4 + +target-version = "py311" + +[lint] +""" + +select = [] +ignore = [*incompatible] + +for group, lints in RUFF_LINTS.items(): + for lint, level in lints.items(): + if level is not None and lint not in incompatible: + select.append(lint) + else: + ignore.append(lint) + +output += "select = " + json.dumps(select, indent=4) + "\n" +output += "ignore = " + json.dumps(ignore, indent=4) + "\n" + +print(output) From 1069a04018f6a7ac8ad317d23c2757ef1361859e Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Thu, 12 Sep 2024 04:23:56 +0000 Subject: [PATCH 12/14] disable line-length warnings on ruff module --- tkldet_modules/ruff.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 50f377a..2fa4f21 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -14,6 +14,12 @@ # # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . + +# disable line-length warnings, fixing them makes this file +# too difficult to read + +# ruff: noqa: E501 + import json from typing import Generator import subprocess From 84f028101e72e1ed69684370aca1a887b75665be Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Thu, 12 Sep 2024 04:33:40 +0000 Subject: [PATCH 13/14] apply a bunch more lint fixes --- contrib/create_ruff_config.py | 11 ++++----- libtkldet/__init__.py | 10 +++++--- libtkldet/apt_file.py | 6 +++-- libtkldet/classifier.py | 27 +++++++++++++------- libtkldet/colors.py | 3 ++- libtkldet/common_data.py | 22 +++++++++-------- libtkldet/error.py | 9 ++++--- libtkldet/file_util.py | 6 +++-- libtkldet/fuzzy.py | 6 +++-- libtkldet/hint_extract.py | 3 ++- libtkldet/linter.py | 22 ++++++++++------- libtkldet/locator.py | 30 +++++++++++----------- libtkldet/mkparser.py | 20 +++++++++------ libtkldet/modman.py | 4 +-- libtkldet/plan_resolve.py | 8 +++--- libtkldet/report.py | 33 ++++++++++++++++--------- tkldet_modules/appliance_confd.py | 9 ++++--- tkldet_modules/appliance_files.py | 3 ++- tkldet_modules/appliance_makefile.py | 17 ++++++++----- tkldet_modules/filetype.py | 7 +++--- tkldet_modules/json_check.py | 7 +++--- tkldet_modules/missing_module_filter.py | 17 +++++++------ tkldet_modules/pylint.py | 11 +++++---- tkldet_modules/ruff.py | 8 +++--- tkldet_modules/shellcheck.py | 10 ++++---- tkldet_modules/to_ignore.py | 3 ++- tkldet_modules/yaml_check.py | 6 ++--- 27 files changed, 188 insertions(+), 130 deletions(-) mode change 100644 => 100755 contrib/create_ruff_config.py diff --git a/contrib/create_ruff_config.py b/contrib/create_ruff_config.py old mode 100644 new mode 100755 index 21bb4a0..57907ef --- a/contrib/create_ruff_config.py +++ b/contrib/create_ruff_config.py @@ -1,13 +1,14 @@ +#!/usr/bin/env python3 + """ -This file creates a ruff.toml file using the configuration found inside -tkldet_modules/ruff.py +Create a ruff config file from tkldet_modules/ruff.py Mostly to be used with ruff's formatter """ import json -from os.path import abspath, dirname, join import sys +from os.path import abspath, dirname, join PROJECT_PATH = dirname(dirname(abspath(__file__))) TKLDET_MODULE_PATH = join(PROJECT_PATH, "tkldet_modules") @@ -15,8 +16,6 @@ sys.path.insert(0, PROJECT_PATH) sys.path.insert(1, TKLDET_MODULE_PATH) -import libtkldet -from libtkldet.report import ReportLevel from ruff import RUFF_LINTS # these rules cause issues with the formatter @@ -35,7 +34,7 @@ select = [] ignore = [*incompatible] -for group, lints in RUFF_LINTS.items(): +for lints in RUFF_LINTS.values(): for lint, level in lints.items(): if level is not None and lint not in incompatible: select.append(lint) diff --git a/libtkldet/__init__.py b/libtkldet/__init__.py index 857e136..c330414 100644 --- a/libtkldet/__init__.py +++ b/libtkldet/__init__.py @@ -15,15 +15,17 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Iterator -from os.path import relpath, abspath -from . import locator, common_data, classifier +from collections.abc import Iterator +from os.path import abspath, relpath + +from . import classifier, common_data, locator from .common_data import APPLIANCE_ROOT from .error import ApplianceNotFoundError def initialize(path: str, ignore_non_appliance: bool) -> None: - """Initialize everything + """ + Initialize everything Involves scraping makefiles, parsing plans, etc. """ diff --git a/libtkldet/apt_file.py b/libtkldet/apt_file.py index dc1b0a0..3bfd762 100644 --- a/libtkldet/apt_file.py +++ b/libtkldet/apt_file.py @@ -15,7 +15,8 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""Utilities for finding packages +""" +Utilities for finding packages Finds packages based on files they provide, including those not installed @@ -74,7 +75,8 @@ def find_python_package(package_name: str) -> list[str]: def find_python_package_from_import(module_str: str) -> list[str]: - """Find python package from import name + """ + Find python package from import name Return a list of packages that provide a given python import module, may be several modules deep (e.g. `foo.bar.baz`), attempts to find most diff --git a/libtkldet/classifier.py b/libtkldet/classifier.py index 6d8970c..9c9f038 100644 --- a/libtkldet/classifier.py +++ b/libtkldet/classifier.py @@ -21,14 +21,16 @@ code here provides ability to "classify" different files """ +from collections.abc import Iterable, Iterator from dataclasses import dataclass -from typing import Iterator, Iterable, Iterator, cast, ClassVar from os.path import dirname +from typing import ClassVar, cast @dataclass(frozen=True) class Item: - """Some "thing" which can be classified + """ + Some "thing" which can be classified tags are used to classify items, value is dependant on type of "thing" @@ -80,7 +82,8 @@ def pretty_print(self) -> None: @dataclass(frozen=True) class FileItem(Item): - """Specifically files which can be classified + """ + Specifically files which can be classified value is the raw path found by the locator """ @@ -98,7 +101,8 @@ class FileItem(Item): @dataclass(frozen=True) class PackageItem(Item): - """Specifically packages installed via plan which can be classied + """ + Specifically packages installed via plan which can be classied value is the package name """ @@ -112,9 +116,11 @@ class PackageItem(Item): class Classifier: - """Classifier base class + """ + Classifier base class - all registered classifiers are called with each item yielded by the locator. + All registered classifiers are called with each item yielded by the + locator. the tags determined by the classifiers determine which linters are run on which files. @@ -128,7 +134,8 @@ class Classifier: ItemType: ClassVar[type[Item]] = Item def do_classify(self, item: Item) -> None: - """Perform classification + """ + Perform classification Perform a classification so long as the concrete item type is compatible with this classifier @@ -178,7 +185,8 @@ class SubdirClassifier(FileClassifier): "the parent directory" recursive: ClassVar[bool] - "whether to match a child of any depth or only files directly inside the given dir" + """whether to match a child of any depth or only files directly inside + the given dir""" tags: ClassVar[list[str]] "exact tags to add to matched item" @@ -200,7 +208,8 @@ def classify(self, item: Item) -> None: def register_classifier(classifier: type[Classifier]) -> type[Classifier]: - """Register a classifier + """ + Register a classifier This must be called on classifiers added """ diff --git a/libtkldet/colors.py b/libtkldet/colors.py index 963c001..4d2e110 100644 --- a/libtkldet/colors.py +++ b/libtkldet/colors.py @@ -82,7 +82,8 @@ def set_colors_enabled(enabled: bool) -> None: - """Set color globals to ANSI color codes + """ + Set color globals to ANSI color codes If not enabled, sets them to empty strings """ diff --git a/libtkldet/common_data.py b/libtkldet/common_data.py index 135d2ac..143f2fe 100644 --- a/libtkldet/common_data.py +++ b/libtkldet/common_data.py @@ -17,12 +17,13 @@ """Utilities and data relating to ${FAB_PATH}/common""" import os -from os.path import join, isfile -from typing import Iterator -from .plan_resolve import parse_plan, PlanEntry -from .locator import iter_plan +from collections.abc import Iterator +from os.path import isfile, join + from .classifier import PackageItem -from .mkparser import parse_makefile, CommonFabBuildData +from .locator import iter_plan +from .mkparser import CommonFabBuildData, parse_makefile +from .plan_resolve import PlanEntry, parse_plan APPLIANCE_ROOT: str = "" _PLAN_RESOLVE_CACHE: list[PlanEntry] = [] @@ -69,27 +70,28 @@ def iter_packages() -> Iterator[PackageItem]: def get_common_overlays() -> list[str]: - """Return a list of all common overlays included in this appliance""" + """Return a list of all common overlays in this appliance""" return _FAB_DATA.overlays[:] def get_common_conf() -> list[str]: - """Return a list of all common conf scripts included in this appliance""" + """Return a list of all common conf scripts in this appliance""" return _FAB_DATA.conf[:] def get_common_removelists() -> list[str]: - """Return a list of all common removelists included in this appliance""" + """Return a list of all common removelists in this appliance""" return _FAB_DATA.removelists[:] def get_common_removelists_final() -> list[str]: - """Return a list of all common final removelists included in this appliance""" + """Return a list of all common final removelists in this appliance""" return _FAB_DATA.removelists_final[:] def get_path_in_common_overlay(path: str) -> str | None: - """Get overlay path from absolute path + """ + Get overlay path from absolute path Check if a given path (expressed as an absolute path, where it would be placed in a build) is included in build, if so the path to the file/dir IN diff --git a/libtkldet/error.py b/libtkldet/error.py index 976b9b4..6f84164 100644 --- a/libtkldet/error.py +++ b/libtkldet/error.py @@ -22,14 +22,16 @@ class TKLDevDetectiveError(Exception): class ApplianceNotFoundError(TKLDevDetectiveError): - """Appliance was not found + """ + Appliance was not found Likely path/app name was incorrect """ class PlanNotFoundError(TKLDevDetectiveError): - """A plan could not be included + """ + A plan could not be included Likely include name is incorrect """ @@ -40,7 +42,8 @@ class UnknownPlanDirectiveError(TKLDevDetectiveError): class InvalidPlanError(TKLDevDetectiveError): - """Plan appears to not be valid + """ + Plan appears to not be valid Mismatched #if* and #endif directives likely """ diff --git a/libtkldet/file_util.py b/libtkldet/file_util.py index 6bf0177..529890a 100644 --- a/libtkldet/file_util.py +++ b/libtkldet/file_util.py @@ -21,7 +21,8 @@ def position_from_char_offset( path: str, offset: int ) -> tuple[int, int] | None: - """Get column/line from offset into file + """ + Get column/line from offset into file Given an offset into a file, returns the line and column numbers respectively, expressed as a tuple. If offset is invalid (such as too @@ -45,7 +46,8 @@ def position_from_char_offset( def position_from_byte_offset( path: str, offset: int ) -> tuple[int, int] | None: - """Get column/line from offset into file in binary mode + """ + Get column/line from offset into file in binary mode Given an offset into a file (in binary mode), returns the line and column numbers respectively, expressed as a tuple. If offset is invalid (such as diff --git a/libtkldet/fuzzy.py b/libtkldet/fuzzy.py index e83846d..2b86e6d 100644 --- a/libtkldet/fuzzy.py +++ b/libtkldet/fuzzy.py @@ -22,7 +22,8 @@ def fuzzy_diff(x: str, y: str) -> int: - """Calculate difference between two strings + """ + Calculate difference between two strings Return value has no objective meaning, only for comparison """ @@ -38,7 +39,8 @@ def fuzzy_diff(x: str, y: str) -> int: def fuzzy_suggest( check: str, options: list[str], max_diff: int = MAX_DIFF ) -> str | None: - """Suggest a string from given options + """ + Suggest a string from given options Given a 'check' value, and a list of valid options, find the option closest to the 'check' value, given that it's 'difference' (calculated by diff --git a/libtkldet/hint_extract.py b/libtkldet/hint_extract.py index d530930..9b85706 100644 --- a/libtkldet/hint_extract.py +++ b/libtkldet/hint_extract.py @@ -127,7 +127,8 @@ def format_extract( row_span: tuple[int, int] | int, col_span: tuple[int, int] | int | None, ) -> list[str]: - """Annotate segment of file + """ + Annotate segment of file Given a row or span of rows and optionally a column or span of columns return an annotated segment of the specified file diff --git a/libtkldet/linter.py b/libtkldet/linter.py index 05d1922..bfc0cc7 100644 --- a/libtkldet/linter.py +++ b/libtkldet/linter.py @@ -21,14 +21,16 @@ code here provides interface for modules to provide linting """ -from typing import ClassVar, Iterator +from collections.abc import Iterator +from typing import ClassVar -from .classifier import Item, FileItem +from .classifier import FileItem, Item from .report import Report class Linter: - """Base class for linters + """ + Base class for linters by default linters automatically enable/disable based on `ENABLE_TAGS` and `DISABLE_TAGS` @@ -44,13 +46,14 @@ class Linter: ItemType: ClassVar[type[Item]] = Item def should_check(self, item: Item) -> bool: - """Actually performs check to see if the linter should run on this item + """ + Actually performs check to see if the linter should run on this item - if `ENABLE_TAGS` is empty, run lint on all items except those that have - tags in `DISABLE_TAGS` + if `ENABLE_TAGS` is empty, run lint on all items except those + that have tags in `DISABLE_TAGS` - if `ENABLE_TAGS` has tags, run lint only on items which have at least 1 tag - from `ENABLE_TAGS` and non from `DISABLE_TAGS` + if `ENABLE_TAGS` has tags, run lint only on items which have at least + 1 tag from `ENABLE_TAGS` and non from `DISABLE_TAGS` (safe to override) """ @@ -96,7 +99,8 @@ def check(self, item: Item) -> Iterator[Report]: def register_linter(linter: type[Linter]) -> type[Linter]: - """Register a linter + """ + Register a linter Must be called on all linters added """ diff --git a/libtkldet/locator.py b/libtkldet/locator.py index bbd391c..660546e 100644 --- a/libtkldet/locator.py +++ b/libtkldet/locator.py @@ -17,22 +17,20 @@ """locates files to be classified and eventually linted""" -from os.path import join, normpath, basename, isdir, isfile +from collections.abc import Iterator from glob import iglob - -from typing import Iterator +from logging import getLogger +from os.path import basename, isdir, isfile, join, normpath from .error import ApplianceNotFoundError -from logging import getLogger - PRODUCTS_DIR = "/turnkey/fab/products" logger = getLogger(__name__) def is_appliance_path(path: str) -> bool: - """is path, a path to an appliance?""" + """Is path, a path to an appliance?""" path = normpath(path) if path == join(PRODUCTS_DIR, basename(path)): return isfile(join(path, "Makefile")) @@ -40,12 +38,12 @@ def is_appliance_path(path: str) -> bool: def is_appliance_name(name: str) -> bool: - """is name, the name of an existing appliance on tkldev?""" + """Is name, the name of an existing appliance on tkldev?""" return name != "." and "/" not in name and isdir(join(PRODUCTS_DIR, name)) def is_inside_appliance(path: str) -> bool: - """is path, a path to a file inside an appliance""" + """Is path, a path to a file inside an appliance""" path = normpath(path) if not path.startswith(PRODUCTS_DIR + "/"): return False @@ -56,7 +54,8 @@ def is_inside_appliance(path: str) -> bool: def get_appliance_root(path: str) -> str: - """Get appliance root from path + """ + Get appliance root from path Given a path to appliance, file inside appliance or appliance name, return absolute path to the appliance @@ -76,15 +75,16 @@ def get_appliance_root(path: str) -> str: if root is None or not isfile(join(root, "Makefile")): logger.info("lint root is not an appliance") error_message = ( - "input does not appear to be an appliance name, path to an appliance" - " or path to a file inside of an appliance" + "input does not appear to be an appliance name, path to an" + " appliance or path to a file inside of an appliance" ) raise ApplianceNotFoundError(error_message) return root def locator(root: str, ignore_non_appliance: bool) -> Iterator[str]: - """Yield most files inside appliance + """ + Yield most files inside appliance Yields almost every file in an appliance of potential concern or a specific file only if given a path to a file inside an appliance @@ -131,15 +131,15 @@ def full_appliance_locator(root: str) -> Iterator[str]: def iter_conf(root: str) -> Iterator[str]: - """yield each conf file in the appliance""" + """Yield each conf file in the appliance""" yield from iglob(join(root, "conf.d/*")) def iter_plan(root: str) -> Iterator[str]: - """yield each plan file in the appliance""" + """Yield each plan file in the appliance""" yield from iglob(join(root, "plan/*")) def iter_overlay(root: str) -> Iterator[str]: - """yield each file in the appliance overlay""" + """Yield each file in the appliance overlay""" yield from iglob(join(root, "overlay/**"), recursive=True) diff --git a/libtkldet/mkparser.py b/libtkldet/mkparser.py index 033e998..9a97365 100644 --- a/libtkldet/mkparser.py +++ b/libtkldet/mkparser.py @@ -15,7 +15,8 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""Utilities for parsing and extracting information from makefiles +""" +Utilities for parsing and extracting information from makefiles Extracts variable definitions from makefiles, purpose built for fab tool-chain on tkldev, so ignores tests & definitions, butchers most @@ -24,9 +25,9 @@ makefile parsing) """ +import os import typing from dataclasses import dataclass -import os ASSIGNMENT_OPERATORS = ["?=", ":=", "+=", "="] CHECKS = ["ifeq", "ifneq", "ifdef", "ifndef"] @@ -58,7 +59,8 @@ def split_value(raw: str) -> list[str]: def parse_assignment(line: str) -> tuple[str, str, str] | None: - """Parse assignment line + """ + Parse assignment line Attempt to parse a makefile assignment operation, if successful return tuple of (variable_name, operator, variable_value) @@ -100,7 +102,8 @@ class MutMakefileData: included: list[str] def resolve_var(self, value: str) -> ValueList: - """Expand variables + """ + Expand variables Expands makefile and env variables, then split into multiple values """ @@ -143,7 +146,8 @@ def assign_var(self, name: str, operator: str, values: str) -> None: raise ValueError(error_message) def finish(self) -> "MakefileData": - """Return concrete class + """ + Return concrete class Resolve unresolved variables and return a concrete version of this class with simpler typing @@ -202,7 +206,8 @@ def __getitem__(self, key: str) -> list[str]: return self.variables[key] def to_fab_data(self) -> CommonFabBuildData: - """Return high level appliance data + """ + Return high level appliance data Returns included overlays, conf and removelists """ @@ -223,7 +228,8 @@ def to_dict(self) -> dict: def parse_makefile( # noqa: C901, PLR0912 path: str, makefile_data: MakefileData | None = None ) -> MakefileData: - """Get all variables in makefile including included makefiles + """ + Get all variables in makefile including included makefiles Attempts to naively get all variables defined in makefile tree. This function is recursive and makefile_data is used when including other diff --git a/libtkldet/modman.py b/libtkldet/modman.py index 4c080df..2206378 100644 --- a/libtkldet/modman.py +++ b/libtkldet/modman.py @@ -19,9 +19,9 @@ import importlib.machinery import importlib.util -from os.path import join, dirname, abspath, splitext, isfile, exists -from os import listdir import sys +from os import listdir +from os.path import abspath, dirname, exists, isfile, join, splitext from . import colors as co from .error import TKLDevDetectiveError diff --git a/libtkldet/plan_resolve.py b/libtkldet/plan_resolve.py index 202c8c1..d33f1a7 100644 --- a/libtkldet/plan_resolve.py +++ b/libtkldet/plan_resolve.py @@ -17,12 +17,13 @@ """very naive cpp parser for plan parsing""" -from os.path import join, isfile from dataclasses import dataclass +from os.path import isfile, join + from .error import ( + InvalidPlanError, PlanNotFoundError, UnknownPlanDirectiveError, - InvalidPlanError, ) static_vars = {"KERNEL": "", "DEBIAN": "", "AMD64": ""} @@ -102,7 +103,8 @@ def _remove_multiline_comments(raw: str) -> str: def _parse_plan( # noqa: C901, PLR0912 path: str, include_paths: list[str], plan_stack: list[str] | None = None ) -> list[PlanEntry]: - """Parse a plan + """ + Parse a plan (uses cpp, but notably does not use *most* cpp functionality). This code will not work on *most* cpp related projects diff --git a/libtkldet/report.py b/libtkldet/report.py index 2cb0705..13042b2 100644 --- a/libtkldet/report.py +++ b/libtkldet/report.py @@ -15,25 +15,28 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -"""Encapsulates "reports" +""" +Encapsulates "reports" these are issues, warnings or notes about "Item"s produced by "Linter"s """ -from dataclasses import dataclass -from enum import Enum import enum -from typing import Iterator, Iterable, ClassVar import textwrap +from collections.abc import Iterable, Iterator +from dataclasses import dataclass +from enum import Enum +from typing import ClassVar -from .classifier import Item, FileItem from . import colors as co +from .classifier import FileItem, Item from .hint_extract import format_extract @dataclass class Replacement: - """Holds replacement data + """ + Holds replacement data a list of replacements in form provided by linters """ @@ -103,7 +106,8 @@ def parse_report_level(raw: str) -> ReportLevel: @dataclass(frozen=True) class Report: - """Information to be presented to user + """ + Information to be presented to user Holds all information about a particular issue in a particular location possibly including metadata, possible fixes, severity, which linter @@ -156,7 +160,8 @@ def to_dict(self) -> dict: } def modified(self, **kwargs: Item | str | dict | None) -> "Report": - """Return new modified version of this report + """ + Return new modified version of this report Return a copy of this report with fields specified in `kwargs` replacing fields from this report @@ -184,7 +189,8 @@ def format(self, suggested_fix: bool = True) -> str: @dataclass(frozen=True) class FileReport(Report): - """Holds information about a report for a file + """ + Holds information about a report for a file Holds all information about a particular issue in a particular location in a particular file possibly including metadata, possible fixes, @@ -246,7 +252,8 @@ def to_dict(self) -> dict: class ReportFilter: - """A filter to change reports before presenting + """ + A filter to change reports before presenting Last stop before presenting to the user, report filters can modify, split, generate or even remove reports @@ -255,7 +262,8 @@ class ReportFilter: WEIGHT: ClassVar[int] = 100 def filter(self, report: Report) -> Iterator[Report]: - """Given a report filter or modify it + """ + Given a report filter or modify it There doesn't need to be a 1-1 relationship between inputs and outputs @@ -270,7 +278,8 @@ def filter(self, report: Report) -> Iterator[Report]: def register_filter(filt: type[ReportFilter]) -> type[ReportFilter]: - """Register a report filter + """ + Register a report filter Must be called on all filters added """ diff --git a/tkldet_modules/appliance_confd.py b/tkldet_modules/appliance_confd.py index cccd47b..4ea1c16 100644 --- a/tkldet_modules/appliance_confd.py +++ b/tkldet_modules/appliance_confd.py @@ -17,12 +17,13 @@ """Lints for appliance conf.d/* scripts""" -from typing import Generator, ClassVar -import stat import os +import stat +from collections.abc import Generator +from typing import ClassVar -from libtkldet.linter import FileLinter, register_linter, FileItem -from libtkldet.report import Report, FileReport, ReportLevel +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, ReportLevel @register_linter diff --git a/tkldet_modules/appliance_files.py b/tkldet_modules/appliance_files.py index 72f418a..4f3d6c0 100644 --- a/tkldet_modules/appliance_files.py +++ b/tkldet_modules/appliance_files.py @@ -16,12 +16,13 @@ # tkldev-detective. If not, see . """Classifiers for appliance specific files""" +from typing import ClassVar + from libtkldet.classifier import ( ExactPathClassifier, SubdirClassifier, register_classifier, ) -from typing import ClassVar @register_classifier diff --git a/tkldet_modules/appliance_makefile.py b/tkldet_modules/appliance_makefile.py index f2ad828..95daea6 100644 --- a/tkldet_modules/appliance_makefile.py +++ b/tkldet_modules/appliance_makefile.py @@ -17,11 +17,12 @@ """Linters for appliance makefile""" -from typing import Generator, ClassVar +from collections.abc import Generator +from typing import ClassVar -from libtkldet.linter import FileLinter, register_linter, FileItem -from libtkldet.report import Report, FileReport, ReportLevel from libtkldet.fuzzy import fuzzy_suggest +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, ReportLevel @register_linter @@ -65,11 +66,15 @@ def check(self, item: FileItem) -> Generator[Report, None, None]: if var not in mk_confvars: suggested_var = fuzzy_suggest(var, mk_confvars) if suggested_var: - fix = f"did you mean {suggested_var!r} instead of {var!r} ?" + fix = ( + f"did you mean {suggested_var!r}" + f" instead of {var!r} ?" + ) else: fix = ( - f"either replace with one of {mk_confvars} or add it to" - " turnkey.mk's list of valid CONF_VARS", + f"either replace with one of {mk_confvars}" + " or add it to turnkey.mk's list of valid" + " CONF_VARS" ) yield FileReport( item=item, diff --git a/tkldet_modules/filetype.py b/tkldet_modules/filetype.py index 6c9ef8d..695730d 100644 --- a/tkldet_modules/filetype.py +++ b/tkldet_modules/filetype.py @@ -16,10 +16,11 @@ # tkldev-detective. If not, see . """General file classification""" -from libtkldet.classifier import FileClassifier, FileItem, register_classifier -from os.path import splitext, isfile -from typing import ClassVar from logging import getLogger +from os.path import isfile, splitext +from typing import ClassVar + +from libtkldet.classifier import FileClassifier, FileItem, register_classifier logger = getLogger(__name__) diff --git a/tkldet_modules/json_check.py b/tkldet_modules/json_check.py index 40992fe..eb40515 100644 --- a/tkldet_modules/json_check.py +++ b/tkldet_modules/json_check.py @@ -18,10 +18,11 @@ """Json related linters""" import json -from typing import Generator, ClassVar +from collections.abc import Generator +from typing import ClassVar -from libtkldet.linter import FileLinter, FileItem, register_linter -from libtkldet.report import Report, FileReport, ReportLevel +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, ReportLevel @register_linter diff --git a/tkldet_modules/missing_module_filter.py b/tkldet_modules/missing_module_filter.py index e6cf3f1..1d418cb 100644 --- a/tkldet_modules/missing_module_filter.py +++ b/tkldet_modules/missing_module_filter.py @@ -15,17 +15,17 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . -from typing import Generator +import re +from collections.abc import Generator +from os.path import dirname -from libtkldet.report import Report, ReportLevel, register_filter, ReportFilter -from libtkldet.linter import FileItem from libtkldet.apt_file import find_python_package_from_import from libtkldet.common_data import ( - is_package_to_be_installed, get_path_in_common_overlay, + is_package_to_be_installed, ) -from os.path import dirname -import re +from libtkldet.linter import FileItem +from libtkldet.report import Report, ReportFilter, ReportLevel, register_filter MISSING_MODULE_RE = re.compile(r"^Unable to import '(.*)'$") @@ -58,7 +58,10 @@ def filter_packaged(report: Report, module_name: str) -> Report | None: if not package_installed: if len(packages) > 1: packages_str = ", ".join('"' + pkg + '"' for pkg in packages) - modified_message += f" (perhaps you meant to add one of {packages_str} to the plan?)" + modified_message += ( + " (perhaps you meant to add one of" + f" {packages_str} to the plan?)" + ) else: modified_message += ( f' (perhaps you meant to add "{packages[0]}" to the plan?)' diff --git a/tkldet_modules/pylint.py b/tkldet_modules/pylint.py index 9420de5..478a7d4 100644 --- a/tkldet_modules/pylint.py +++ b/tkldet_modules/pylint.py @@ -15,13 +15,14 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . import json -from typing import Generator, ClassVar import subprocess -from os.path import join, dirname, abspath +from collections.abc import Generator +from os.path import abspath, dirname, join +from typing import ClassVar -from libtkldet.linter import FileLinter, FileItem, register_linter -from libtkldet.report import Report, FileReport, parse_report_level -from libtkldet.apt_file import is_installed, is_in_path +from libtkldet.apt_file import is_in_path, is_installed +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, parse_report_level if is_installed("pylint") and not is_in_path("ruff"): rcfile = join(dirname(dirname(abspath(__file__))), "pylint_rcfile") diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 2fa4f21..5cd3bd8 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -18,15 +18,15 @@ # disable line-length warnings, fixing them makes this file # too difficult to read -# ruff: noqa: E501 +# ruff: noqa: E501, C408 import json -from typing import Generator import subprocess +from collections.abc import Generator -from libtkldet.linter import FileLinter, FileItem, register_linter -from libtkldet.report import Report, FileReport, parse_report_level from libtkldet.apt_file import is_in_path +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, parse_report_level RUFF_LINTS = dict( pyflakes=dict( diff --git a/tkldet_modules/shellcheck.py b/tkldet_modules/shellcheck.py index 2024f8a..37c2e72 100644 --- a/tkldet_modules/shellcheck.py +++ b/tkldet_modules/shellcheck.py @@ -15,17 +15,17 @@ # You should have received a copy of the GNU General Public License along with # tkldev-detective. If not, see . import json -from typing import Generator import subprocess +from collections.abc import Generator -from libtkldet.linter import FileLinter, FileItem, register_linter +from libtkldet.apt_file import is_installed +from libtkldet.linter import FileItem, FileLinter, register_linter from libtkldet.report import ( - Report, FileReport, - parse_report_level, Replacement, + Report, + parse_report_level, ) -from libtkldet.apt_file import is_installed if is_installed("shellcheck"): diff --git a/tkldet_modules/to_ignore.py b/tkldet_modules/to_ignore.py index 30fe6a5..d27739f 100644 --- a/tkldet_modules/to_ignore.py +++ b/tkldet_modules/to_ignore.py @@ -16,10 +16,11 @@ # tkldev-detective. If not, see . """For marking files which should be ignored""" -from libtkldet.classifier import FileClassifier, FileItem, register_classifier import os.path from typing import ClassVar +from libtkldet.classifier import FileClassifier, FileItem, register_classifier + def is_or_has_ancestor_dir(path: str, directory: str) -> bool: """Checks if path has an ancestor directory with a given name""" diff --git a/tkldet_modules/yaml_check.py b/tkldet_modules/yaml_check.py index f2297a4..1b88510 100644 --- a/tkldet_modules/yaml_check.py +++ b/tkldet_modules/yaml_check.py @@ -20,10 +20,10 @@ YAML = False else: YAML = True -from typing import Generator +from collections.abc import Generator -from libtkldet.linter import FileLinter, FileItem, register_linter -from libtkldet.report import Report, FileReport, ReportLevel +from libtkldet.linter import FileItem, FileLinter, register_linter +from libtkldet.report import FileReport, Report, ReportLevel class YamlLinter(FileLinter): From 9fe58d1a35113ece4f5ac295418346e1a445bc31 Mon Sep 17 00:00:00 2001 From: Stefan Davis Date: Mon, 16 Sep 2024 02:12:53 +0000 Subject: [PATCH 14/14] fix some lints --- contrib/create_ruff_config.py | 14 ++++++++++++++ libtkldet/__init__.py | 2 +- tkldet_modules/ruff.py | 14 ++++++++++---- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/contrib/create_ruff_config.py b/contrib/create_ruff_config.py index 57907ef..f89994e 100755 --- a/contrib/create_ruff_config.py +++ b/contrib/create_ruff_config.py @@ -6,6 +6,7 @@ Mostly to be used with ruff's formatter """ +import subprocess import json import sys from os.path import abspath, dirname, join @@ -18,6 +19,10 @@ from ruff import RUFF_LINTS +known_ruff_lints = json.loads(subprocess.run(['ruff', 'rule', '--all', '--output-format', 'json'], + capture_output=True).stdout) +known_ruff_lints = { rule['code'] for rule in known_ruff_lints } + # these rules cause issues with the formatter incompatible = ["ISC001", "D203"] @@ -34,8 +39,13 @@ select = [] ignore = [*incompatible] +found = set() + for lints in RUFF_LINTS.values(): for lint, level in lints.items(): + if lint in known_ruff_lints: + found.add(lint) + if level is not None and lint not in incompatible: select.append(lint) else: @@ -45,3 +55,7 @@ output += "ignore = " + json.dumps(ignore, indent=4) + "\n" print(output) + +missing = known_ruff_lints-found +if missing: + sys.stderr.write(f'missing {missing}\n') diff --git a/libtkldet/__init__.py b/libtkldet/__init__.py index c330414..cac7000 100644 --- a/libtkldet/__init__.py +++ b/libtkldet/__init__.py @@ -43,7 +43,7 @@ def yield_appliance_items() -> Iterator[classifier.Item]: """Yield everything 'lintable'""" yield from common_data.iter_packages() - for path in locator.locator(APPLIANCE_ROOT): + for path in locator.locator(APPLIANCE_ROOT, False): yield classifier.FileItem( value=path, _tags={}, diff --git a/tkldet_modules/ruff.py b/tkldet_modules/ruff.py index 5cd3bd8..6485006 100644 --- a/tkldet_modules/ruff.py +++ b/tkldet_modules/ruff.py @@ -511,8 +511,8 @@ flake8_logging=dict( LOG001="WARN", # using `logger.Logger` directly LOG002="WARN", # using `__cached__` or `__file__` for logger name - LOG003="WARN", # using `logger.exception` without `exc_info` - LOG004="REFACTOR", # using `logging.WARN` + LOG007="WARN", # using `logger.exception` without `exc_info` + LOG009="REFACTOR", # using `logging.WARN` ), flake8_logging_format=dict( G001="REFACTOR", # logging statement uses `.format` @@ -539,7 +539,7 @@ ), flake8_print=dict( T201=None, # `print` call found - T202=None, # `pprint` call found + T203=None, # `pprint` call found ), flake8_pyi=dict( PYI001="REFACTOR", # private type param should start with `_` @@ -775,6 +775,7 @@ PD013="REFACTOR", # `.melt` prefered to `.stack` PD015="REFACTOR", # use `.merge` method instead of `pd.merge` function PD101="REFACTOR", # using `series.nunique()` for checking a series is constant, is ineffecient + PD901="REFACTOR", # using generic variable name df for DataFrames ), pygrep_hooks=dict( PGH001=None, # use of `eval`, deprecated in favor of S307 @@ -795,7 +796,8 @@ PLC1901="REFACTOR", # comparison with empty string, can be changed to check if string is falsy PLC2401="REFACTOR", # name is not ascii PLC2403="REFACTOR", # import name is not ascii - PLC2801="WARN", # import of private name + PLC2701="WARN", # import of private name + PLC2801="REFACTOR", # unnecessary dunder call PLC3002="REFACTOR", # unnecessary lambda call ), pylint_error=dict( @@ -808,6 +810,7 @@ PLE0237="ERROR", # `__slots__` is defined, but an attribute is defined that is not in the slots PLE0241="ERROR", # duplicate base for class PLE0302="ERROR", # bad special method signature + PLE0303="ERROR", # invalid `__len__` return PLE0304="ERROR", # invalid `__bool__` return type PLE0305="ERROR", # invalid `__index__` return type PLE0307="ERROR", # invalid `__str__` return type @@ -824,6 +827,7 @@ PLE1206="ERROR", # too few arguments for logging format string PLE1300="ERROR", # bad string format character PLE1307="ERROR", # bad string format type + PLE1310="ERROR", # bad string strip call PLE1507="ERROR", # invalid type for `os.getenv` argument PLE1519="ERROR", # `@singledispatch` should not be used on methods PLE1520="ERROR", # `@singledispatchmethod` should only be used on methods @@ -1004,6 +1008,8 @@ RUF030="WARN", # print in `assert` RUF031="WARN", # incorrect tuple subscript parenthesization RUF032="WARN", # `Decimal()` called with float literal + RUF033="WARN", # __post_init__ with default arguments + RUF034="WARN", # useless if else RUF100="WARN", # unused `noqa` directive RUF101="WARN", # `noqa` suppresses rule that has been deprecated in favor of another RUF200="ERROR", # failed to parse `pyproject.toml`