Skip to content

Commit

Permalink
Replace black/isort with ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
leifwar committed Aug 15, 2024
1 parent c8afb2d commit 057e6df
Show file tree
Hide file tree
Showing 7 changed files with 554 additions and 395 deletions.
41 changes: 6 additions & 35 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,47 +3,18 @@ default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
rev: v4.6.0
hooks:
- id: check-json
- id: check-merge-conflict
- id: debug-statements
- id: detect-private-key
- id: check-yaml
- id: check-toml
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/hadialqattan/pycln
rev: v2.1.1
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.7
hooks:
- id: pycln
args: [--all]
- repo: https://github.com/psf/black
rev: 22.10.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.10.1
hooks:
- id: isort
additional_dependencies: [toml]
- repo: https://github.com/pycqa/flake8
rev: 5.0.4
hooks:
- id: flake8
additional_dependencies:
- pyflakes
- pycodestyle
- flake8-bugbear
- flake8-comprehensions
- flake8-eradicate
- flake8-mutable
- flake8-simplify
- flake8-builtins
- repo: https://github.com/Lucas-C/pre-commit-hooks-bandit
rev: v1.0.6
hooks:
- id: python-bandit-vulnerability-check
args: [-ll, -ii, --exclude, ./.venv/*, --recursive, .]
pass_filenames: false
always_run: true
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- id: ruff-format
708 changes: 419 additions & 289 deletions poetry.lock

Large diffs are not rendered by default.

121 changes: 95 additions & 26 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,45 +10,114 @@ python = "^3.9"
pandas = "*"

[tool.poetry.dev-dependencies]
black = "*"
coverage = { version = "*", extras = ['toml'] }
flake8 = "*"
flake8-bugbear = "*"
isort = ">= 5"
mock = "*"
ruff = "*"
pre-commit = "*"
pytest = ">= 6" # needs pyproject.toml support
pytest-cov = "*"
pytest-integration = "*"

[tool.ruff]
# Assume Python 3.9
target-version = "py39"
line-length = 100

[tool.ruff.format]
docstring-code-format = true

[tool.ruff.lint]

select = [
"A", # flake8-builtins
"ANN", # flake8-annotations
"ARG", # flake8-unused-arguments
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"C90", # mccabe
"COM", # flake8-commas
"D", # pydocstyle
"DTZ", # flake8-datetimez
"E", "W", # pycodestyle
"F", # Pyflakes
"FLY", # flynt
"FURB", # refurb
"G", # flake8-logging-format
"I", # isort
"LOG", # flake8-logging
"N", # pep8-nameing
"NPY", # numpy specific rules
"PERF", # Perflint
"PIE", # flake8-pie
"RUF", # Ruff specific rules
"S", # flake8-bandit
"SIM", # flake8-simplify
"T20", # flake8-print
"T100", # debug statements
"TCH", # flake8-type-checking
"TRY", # tryceratops
"UP", # pyupgrade
"YTT", # flake8-2020
]

ignore = ['S101', 'COM812', 'ANN101', 'ANN102', 'ANN401', 'TRY003', 'D100', 'D101', 'D102', 'D103', 'D104', 'D105', 'D107']

# Allow autofix for all enabled rules (when `--fix`) is provided.
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
unfixable = []

# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".mypy_cache",
".nox",
".pants.d",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"venv",
]

# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"

[tool.ruff.lint.pydocstyle]
convention = "google"

[tool.ruff.lint.per-file-ignores]
"docs/conf.py" = ["A001"]
"tests/test*.py" = ["ANN201"]

[tool.ruff.lint.flake8-import-conventions]
[tool.ruff.lint.flake8-import-conventions.aliases]
# Declare the default aliases.
numpy = "np"
pandas = "pd"
scipy = "sp"

[tool.ruff.lint.isort]
known-first-party = []
known-local-folder = ["abot_opf", "tests"]

[tool.poetry-dynamic-versioning]
enable = true
vcs = "git"
style = "semver"
pattern = "^(?P<base>\\d+\\.\\d+\\.\\d+)(-?((?P<stage>[a-zA-Z]+)\\.?(?P<revision>\\d+)?))?$"

[tool.isort]
profile = "black"
line_length = 100
known_first_party = []

[tool.black]
line-length = 100
include = '\.pyi?$'
exclude = '''
/(
\.git
| \.mypy_cache
| \.tox
| \.venv
| _build
| buck-out
| build
| dist
| profiling
)/
'''

[tool.coverage.report]
# Regexes for lines to exclude from consideration
exclude_lines = [
Expand Down
9 changes: 3 additions & 6 deletions rawxio/data_model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from enum import Enum, auto
from typing import List

REQUIRED_PK_FIELDS = {
"bus": ["ibus"],
Expand Down Expand Up @@ -66,11 +65,11 @@
}


def get_pk_fields(name: str) -> List[str]:
def get_pk_fields(name: str) -> list[str]:
return REQUIRED_PK_FIELDS.get(name, []) + OPTIONAL_PK_FIELDS.get(name, [])


def get_required_fields(name: str) -> List[str]:
def get_required_fields(name: str) -> list[str]:
return REQUIRED_PK_FIELDS.get(name, []) + REQUIRED_NON_PK_FIELDS.get(name, [])


Expand All @@ -82,9 +81,7 @@ def has_primary_key(name: str) -> bool:


class DataSetType(Enum):
"""
Rawx has two record types. ParameterSet and DataSet
"""
"""Rawx has two record types. ParameterSet and DataSet."""

PARAMETER_SET = auto()
DATA_SET = auto()
33 changes: 15 additions & 18 deletions rawxio/rawx.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
"""Convert between base case and PSS/E rawx format.
"""Convert between base case and PSS/E rawx format."""

"""
import hashlib
import json
import logging
from pathlib import Path
from typing import Any, Dict, List, Union
from typing import Any
from uuid import UUID

import pandas as pd
Expand All @@ -18,10 +16,8 @@
has_primary_key,
)

logger = logging.getLogger(__name__)


def get_rawx_record_type(data: Union[List[Any], List[List[Any]]]) -> DataSetType:
def get_rawx_record_type(data: list[Any] | list[list[Any]]) -> DataSetType:
return DataSetType.DATA_SET if isinstance(data[0], list) else DataSetType.PARAMETER_SET


Expand All @@ -31,10 +27,10 @@ def uuid(value: Any) -> str:
return str(UUID(bytes=h.digest()))


def read_rawx(fname: Path) -> Dict[str, pd.DataFrame]:
"""
Read data from rawx format. The index of the returned dataframe is constructed in the
following priotized order
def read_rawx(fname: Path) -> dict[str, pd.DataFrame]:
"""Read data from rawx format.
The index of the returned dataframe is constructed in the following priotized order
1. mrid (Master Resource Identifier) is used if present in a column
2. uid (Unique Identifier) is used if present in a column
Expand All @@ -45,9 +41,10 @@ def read_rawx(fname: Path) -> Dict[str, pd.DataFrame]:
If a dataset has defined primary keys in the RAWX
documentation, an index constructed by hashing the tuple of primary keys is added.
The name of index is uid (Unique Identifier). If an uid alrady exists,
it will be used as index
it will be used as index.
"""
with open(fname, "r") as infile:
with open(fname) as infile:
data = json.load(infile)

result = {}
Expand All @@ -73,10 +70,10 @@ def read_rawx(fname: Path) -> Dict[str, pd.DataFrame]:
return result


def write_rawx(fname: Path, data: Dict[str, pd.DataFrame]):
"""
Write dataframes to rawx format. If a dataframe as a named index, it will be included
as a regular column
def write_rawx(fname: Path, data: dict[str, pd.DataFrame]) -> None:
"""Write dataframes to rawx format.
If a dataframe as a named index, it will be included as a regular column
"""
rawx_data = {}
for k, df in data.items():
Expand All @@ -94,7 +91,7 @@ def write_rawx(fname: Path, data: Dict[str, pd.DataFrame]):
json.dump({"network": rawx_data}, out, indent=4)


def raise_on_missing_required_field(name: str, df: pd.DataFrame):
def raise_on_missing_required_field(name: str, df: pd.DataFrame) -> None:
req_fields = set(get_required_fields(name))
if not req_fields.issubset(set(df.columns)):
raise ValueError(
Expand Down
27 changes: 11 additions & 16 deletions rawxio/utils.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,24 @@
"""
This module contains a set of utility functions included for convenience
"""
from typing import Dict, Optional, Set
"""This module contains a set of utility functions included for convenience."""

import pandas as pd


def shift_array_indices(
df: pd.DataFrame, amount: int, cols: Optional[Set[str]] = None
df: pd.DataFrame, amount: int, cols: set[str] | None = None
) -> pd.DataFrame:
cols = cols or {"ibus", "jbus", "kbus"}
cols = list(cols.intersection(df.columns))
columns = list(cols.intersection(df.columns))
if not cols:
return df
df[cols] = df[cols].applymap(lambda x: x + amount)
df[columns] = df[columns].applymap(lambda x: x + amount)
return df


def one2zero_indexed(
data: Dict[str, pd.DataFrame], cols: Optional[Set[str]] = None
) -> Dict[str, pd.DataFrame]:
"""
Shift array indices down one such that they start at zero.
data: dict[str, pd.DataFrame], cols: set[str] | None = None
) -> dict[str, pd.DataFrame]:
"""Shift array indices down one such that they start at zero.
The format of the dict is the same as returned by read_rawx (or passed to write_rawx)
"""
for k, df in data.items():
Expand All @@ -30,11 +27,9 @@ def one2zero_indexed(


def zero2one_indexed(
data: Dict[str, pd.DataFrame], cols: Optional[Set[str]] = None
) -> Dict[str, pd.DataFrame]:
"""
Shift array indices up one such that they start at one
"""
data: dict[str, pd.DataFrame], cols: set[str] | None = None
) -> dict[str, pd.DataFrame]:
"""Shift array indices up one such that they start at one."""
for k, df in data.items():
data[k] = shift_array_indices(df, -1, cols)
return data
10 changes: 5 additions & 5 deletions tests/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,25 @@ def test_read():
assert all(result[k].index.name == "uid" for k in with_uid)


def test_read_write_round_trip(tmpdir):
def test_read_write_round_trip(tmpdir: Path):
result = read_rawx(minimal_rawx())
outfname = tmpdir / "out.json"

write_rawx(outfname, result)
result2 = read_rawx(outfname)
assert result.keys() == result2.keys()
assert all(result[k].equals(result2[k]) for k in result.keys())
assert all(result[k].equals(result2[k]) for k in result)


def test_array_index_shifting():
result = read_rawx(minimal_rawx())
dfs = deepcopy(result)

# Make sure all dfs where deep-copied
assert all(dfs[k] is not result[k] for k in result.keys())
assert all(dfs[k] is not result[k] for k in result)
dfs = one2zero_indexed(dfs)
dfs = zero2one_indexed(dfs)
assert all(dfs[k].equals(result[k]) for k in result.keys())
assert all(dfs[k].equals(result[k]) for k in result)


def test_raise_on_missing():
Expand All @@ -53,7 +53,7 @@ def test_raise_on_missing():


@pytest.mark.parametrize("index_name", ["mrid", "uid"])
def test_read_pick_up_index(tmpdir, index_name):
def test_read_pick_up_index(tmpdir: Path, index_name: list[str]):
df = pd.DataFrame({"ibus": 1}, index=pd.Index(["0xab"], name=index_name))
out = tmpdir / "rawx.json"
write_rawx(out, {"bus": df})
Expand Down

0 comments on commit 057e6df

Please sign in to comment.