Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

STY: Use ruff to format docstrings #56863

Merged
merged 24 commits into from
Feb 8, 2024
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d7fbb3e
Bump ruff
mroeschke Jan 13, 2024
dacdd0b
Format docstrings using ruff
mroeschke Jan 13, 2024
77dc845
Fix double {{ examples
mroeschke Jan 13, 2024
255b16c
Missed shift
mroeschke Jan 14, 2024
15a52fd
Add comma
mroeschke Jan 14, 2024
d5baf82
Remove import
mroeschke Jan 14, 2024
d004398
Remove old flake8 validation
mroeschke Jan 14, 2024
6386676
Remove unit test
mroeschke Jan 14, 2024
78aa82d
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 14, 2024
e97155c
Revert "Remove unit test"
mroeschke Jan 14, 2024
a902f8e
Revert "Remove old flake8 validation"
mroeschke Jan 14, 2024
359736e
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 15, 2024
2ea6780
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 23, 2024
b416125
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 24, 2024
5dcbce8
docstring formatting
mroeschke Jan 24, 2024
df03285
Ignore formatting in scripts
mroeschke Jan 24, 2024
f33d435
Ignore flake8 conflicts with ruff
mroeschke Jan 25, 2024
23e5d93
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 25, 2024
b40d961
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 30, 2024
65ac7cc
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 31, 2024
3434878
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Jan 31, 2024
65c4c33
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Feb 6, 2024
6fbe5ca
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Feb 6, 2024
b09a550
Merge remote-tracking branch 'upstream/main' into sty/ruff
mroeschke Feb 7, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ ci:
skip: [pylint, pyright, mypy]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.6
rev: v0.1.13
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
Expand All @@ -31,8 +31,7 @@ repos:
exclude: ^pandas/tests
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
- id: ruff-format
# TODO: "." not needed in ruff 0.1.8
args: ["."]
exclude: ^scripts
- repo: https://github.com/jendrikseipp/vulture
rev: 'v2.10'
hooks:
Expand Down
4 changes: 2 additions & 2 deletions doc/make.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _run_os(*args) -> None:

Examples
--------
>>> DocBuilder()._run_os('python', '--version')
>>> DocBuilder()._run_os("python", "--version")
"""
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)

Expand All @@ -129,7 +129,7 @@ def _sphinx_build(self, kind: str):

Examples
--------
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
>>> DocBuilder(num_jobs=4)._sphinx_build("html")
"""
if kind not in ("html", "latex", "linkcheck"):
raise ValueError(f"kind must be html, latex or linkcheck, not {kind}")
Expand Down
2 changes: 1 addition & 1 deletion pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ class option_context(ContextDecorator):
Examples
--------
>>> from pandas import option_context
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
>>> with option_context("display.max_rows", 10, "display.max_columns", 5):
... pass
"""

Expand Down
2 changes: 0 additions & 2 deletions pandas/_testing/_warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,8 @@ class for all warnings. To raise multiple types of exceptions,
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
Expand Down
4 changes: 2 additions & 2 deletions pandas/_testing/asserters.py
Original file line number Diff line number Diff line change
Expand Up @@ -1178,8 +1178,8 @@ def assert_frame_equal(
but with columns of differing dtypes.

>>> from pandas.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
>>> df1 = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
>>> df2 = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0]})

df1 equals itself.

Expand Down
3 changes: 1 addition & 2 deletions pandas/_testing/contexts.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,8 @@ def set_timezone(tz: str) -> Generator[None, None, None]:
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'

>>> with set_timezone('US/Eastern'):
>>> with set_timezone("US/Eastern"):
... tzlocal().tzname(datetime(2021, 1, 1))
...
'EST'
"""
import time
Expand Down
4 changes: 1 addition & 3 deletions pandas/core/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def __init__(self, pandas_object): # noqa: E999
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.

>>> pd.Series(['a', 'b']).dt
>>> pd.Series(["a", "b"]).dt
Traceback (most recent call last):
...
AttributeError: Can only use .dt accessor with datetimelike values
Expand All @@ -274,8 +274,6 @@ def __init__(self, pandas_object): # noqa: E999
--------
In your library code::

import pandas as pd

@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_obj):
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1215,8 +1215,9 @@ def take(
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])

>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
>>> pd.api.extensions.take(
... np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, fill_value=-10
... )
array([ 10, 10, -10])
"""
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
Expand Down
10 changes: 5 additions & 5 deletions pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -1794,14 +1794,14 @@ def normalize_keyword_aggregation(


def _make_unique_kwarg_list(
seq: Sequence[tuple[Any, Any]]
seq: Sequence[tuple[Any, Any]],
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this isn't a docstring. did ruff change this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I believe from ruff's upgrade to v0.1.13 in this PR

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

great.

) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list

Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> kwarg_list = [("a", "<lambda>"), ("a", "<lambda>"), ("b", "<lambda>")]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
Expand Down Expand Up @@ -1833,7 +1833,7 @@ def relabel_result(
>>> from pandas.core.apply import relabel_result
>>> result = pd.DataFrame(
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
... index=["max", "mean", "min"]
... index=["max", "mean", "min"],
... )
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
Expand Down Expand Up @@ -1972,7 +1972,7 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:

Examples
--------
>>> maybe_mangle_lambdas('sum')
>>> maybe_mangle_lambdas("sum")
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
Expand Down Expand Up @@ -2017,7 +2017,7 @@ def validate_func_kwargs(

Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
>>> validate_func_kwargs({"one": "min", "two": "max"})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
Expand Down
28 changes: 15 additions & 13 deletions pandas/core/arraylike.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,40 +119,41 @@ def __add__(self, other):

Examples
--------
>>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
... index=['elk', 'moose'])
>>> df = pd.DataFrame(
... {"height": [1.5, 2.6], "weight": [500, 800]}, index=["elk", "moose"]
... )
>>> df
height weight
elk 1.5 500
moose 2.6 800

Adding a scalar affects all rows and columns.

>>> df[['height', 'weight']] + 1.5
>>> df[["height", "weight"]] + 1.5
height weight
elk 3.0 501.5
moose 4.1 801.5

Each element of a list is added to a column of the DataFrame, in order.

>>> df[['height', 'weight']] + [0.5, 1.5]
>>> df[["height", "weight"]] + [0.5, 1.5]
height weight
elk 2.0 501.5
moose 3.1 801.5

Keys of a dictionary are aligned to the DataFrame, based on column names;
each value in the dictionary is added to the corresponding column.

>>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
>>> df[["height", "weight"]] + {"height": 0.5, "weight": 1.5}
height weight
elk 2.0 501.5
moose 3.1 801.5

When `other` is a :class:`Series`, the index of `other` is aligned with the
columns of the DataFrame.

>>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
>>> df[['height', 'weight']] + s1
>>> s1 = pd.Series([0.5, 1.5], index=["weight", "height"])
>>> df[["height", "weight"]] + s1
height weight
elk 3.0 500.5
moose 4.1 800.5
Expand All @@ -161,23 +162,24 @@ def __add__(self, other):
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
:meth:`DataFrame.add` should be used with `axis='index'`.

>>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
>>> df[['height', 'weight']] + s2
>>> s2 = pd.Series([0.5, 1.5], index=["elk", "moose"])
>>> df[["height", "weight"]] + s2
elk height moose weight
elk NaN NaN NaN NaN
moose NaN NaN NaN NaN

>>> df[['height', 'weight']].add(s2, axis='index')
>>> df[["height", "weight"]].add(s2, axis="index")
height weight
elk 2.0 500.5
moose 4.1 801.5

When `other` is a :class:`DataFrame`, both columns names and the
index are aligned.

>>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
... index=['elk', 'moose', 'deer'])
>>> df[['height', 'weight']] + other
>>> other = pd.DataFrame(
... {"height": [0.2, 0.4, 0.6]}, index=["elk", "moose", "deer"]
... )
>>> df[["height", "weight"]] + other
height weight
deer NaN NaN
elk 1.7 NaN
Expand Down
46 changes: 21 additions & 25 deletions pandas/core/arrays/arrow/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,7 @@ def len(self) -> Series:
... [1, 2, 3],
... [3],
... ],
... dtype=pd.ArrowDtype(pa.list_(
... pa.int64()
... ))
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list.len()
0 3
Expand Down Expand Up @@ -136,9 +134,7 @@ def __getitem__(self, key: int | slice) -> Series:
... [1, 2, 3],
... [3],
... ],
... dtype=pd.ArrowDtype(pa.list_(
... pa.int64()
... ))
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list[0]
0 1
Expand Down Expand Up @@ -195,9 +191,7 @@ def flatten(self) -> Series:
... [1, 2, 3],
... [3],
... ],
... dtype=pd.ArrowDtype(pa.list_(
... pa.int64()
... ))
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list.flatten()
0 1
Expand Down Expand Up @@ -253,9 +247,9 @@ def dtypes(self) -> Series:
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(pa.struct(
... [("version", pa.int64()), ("project", pa.string())]
... ))
... dtype=pd.ArrowDtype(
... pa.struct([("version", pa.int64()), ("project", pa.string())])
... ),
... )
>>> s.struct.dtypes
version int64[pyarrow]
Expand Down Expand Up @@ -324,9 +318,9 @@ def field(
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(pa.struct(
... [("version", pa.int64()), ("project", pa.string())]
... ))
... dtype=pd.ArrowDtype(
... pa.struct([("version", pa.int64()), ("project", pa.string())])
... ),
... )

Extract by field name.
Expand Down Expand Up @@ -357,19 +351,21 @@ def field(
For nested struct types, you can pass a list of values to index
multiple levels:

>>> version_type = pa.struct([
... ("major", pa.int64()),
... ("minor", pa.int64()),
... ])
>>> version_type = pa.struct(
... [
... ("major", pa.int64()),
... ("minor", pa.int64()),
... ]
... )
>>> s = pd.Series(
... [
... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(pa.struct(
... [("version", version_type), ("project", pa.string())]
... ))
... dtype=pd.ArrowDtype(
... pa.struct([("version", version_type), ("project", pa.string())])
... ),
... )
>>> s.struct.field(["version", "minor"])
0 5
Expand Down Expand Up @@ -454,9 +450,9 @@ def explode(self) -> DataFrame:
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(pa.struct(
... [("version", pa.int64()), ("project", pa.string())]
... ))
... dtype=pd.ArrowDtype(
... pa.struct([("version", pa.int64()), ("project", pa.string())])
... ),
... )

>>> s.struct.explode()
Expand Down
Loading
Loading