From e5b72ca3546c1e42ce9d5550a1e339ac87d3d52c Mon Sep 17 00:00:00 2001 From: Alex Parsons Date: Wed, 21 Aug 2024 19:01:26 +0000 Subject: [PATCH] Switch back to panda stubs in develpment --- poetry.lock | 90 +- pyproject.toml | 2 +- typings/pandas/__init__.pyi | 116 - typings/pandas/_libs/__init__.pyi | 10 - typings/pandas/_libs/interval.pyi | 175 -- typings/pandas/_libs/json.pyi | 4 - typings/pandas/_libs/missing.pyi | 48 - typings/pandas/_libs/properties.pyi | 13 - typings/pandas/_libs/tslibs/__init__.pyi | 24 - typings/pandas/_libs/tslibs/nattype.pyi | 125 - typings/pandas/_libs/tslibs/np_datetime.pyi | 1 - typings/pandas/_libs/tslibs/offsets.pyi | 255 -- typings/pandas/_libs/tslibs/period.pyi | 89 - typings/pandas/_libs/tslibs/timedeltas.pyi | 148 -- typings/pandas/_libs/tslibs/timestamps.pyi | 221 -- typings/pandas/_testing.pyi | 205 -- typings/pandas/_typing.pyi | 159 -- typings/pandas/compat/pickle_compat.pyi | 8 - typings/pandas/core/__init__.pyi | 0 typings/pandas/core/accessor.pyi | 18 - typings/pandas/core/algorithms.pyi | 75 - typings/pandas/core/api.pyi | 67 - typings/pandas/core/arraylike.pyi | 42 - typings/pandas/core/arrays/__init__.pyi | 15 - typings/pandas/core/arrays/_arrow_utils.pyi | 0 typings/pandas/core/arrays/_ranges.pyi | 0 typings/pandas/core/arrays/base.pyi | 47 - typings/pandas/core/arrays/boolean.pyi | 32 - typings/pandas/core/arrays/categorical.pyi | 201 -- typings/pandas/core/arrays/datetimelike.pyi | 84 - typings/pandas/core/arrays/datetimes.pyi | 84 - typings/pandas/core/arrays/integer.pyi | 37 - typings/pandas/core/arrays/interval.pyi | 69 - typings/pandas/core/arrays/masked.pyi | 22 - typings/pandas/core/arrays/numpy_.pyi | 116 - typings/pandas/core/arrays/period.pyi | 47 - .../pandas/core/arrays/sparse/__init__.pyi | 10 - .../pandas/core/arrays/sparse/accessor.pyi | 27 - typings/pandas/core/arrays/sparse/array.pyi | 106 - typings/pandas/core/arrays/sparse/dtype.pyi | 44 - .../core/arrays/sparse/scipy_sparse.pyi | 2 - typings/pandas/core/arrays/string_.pyi | 20 - typings/pandas/core/arrays/timedeltas.pyi | 62 - typings/pandas/core/base.pyi | 96 - typings/pandas/core/computation/align.pyi | 2 - typings/pandas/core/computation/common.pyi | 3 - typings/pandas/core/computation/engines.pyi | 19 - .../pandas/core/computation/expressions.pyi | 6 - typings/pandas/core/computation/parsing.pyi | 12 - typings/pandas/core/config_init.pyi | 62 - typings/pandas/core/dtypes/__init__.pyi | 0 typings/pandas/core/dtypes/api.pyi | 43 - typings/pandas/core/dtypes/base.pyi | 23 - typings/pandas/core/dtypes/cast.pyi | 42 - typings/pandas/core/dtypes/common.pyi | 75 - typings/pandas/core/dtypes/concat.pyi | 7 - typings/pandas/core/dtypes/dtypes.pyi | 130 - typings/pandas/core/dtypes/generic.pyi | 31 - typings/pandas/core/dtypes/inference.pyi | 19 - typings/pandas/core/dtypes/missing.pyi | 39 - typings/pandas/core/frame.pyi | 2253 ----------------- typings/pandas/core/generic.pyi | 513 ---- typings/pandas/core/groupby/__init__.pyi | 2 - typings/pandas/core/groupby/categorical.pyi | 6 - typings/pandas/core/groupby/generic.pyi | 295 --- typings/pandas/core/groupby/groupby.pyi | 138 - typings/pandas/core/groupby/ops.pyi | 101 - typings/pandas/core/indexes/__init__.pyi | 0 typings/pandas/core/indexes/accessors.pyi | 30 - typings/pandas/core/indexes/base.pyi | 221 -- typings/pandas/core/indexes/datetimelike.pyi | 36 - typings/pandas/core/indexes/datetimes.pyi | 76 - typings/pandas/core/indexes/extension.pyi | 18 - typings/pandas/core/indexes/frozen.pyi | 25 - typings/pandas/core/indexes/multi.pyi | 127 - typings/pandas/core/indexes/numeric.pyi | 36 - typings/pandas/core/indexes/timedeltas.pyi | 44 - typings/pandas/core/indexing.pyi | 50 - typings/pandas/core/internals/__init__.pyi | 17 - typings/pandas/core/internals/blocks.pyi | 261 -- typings/pandas/core/internals/concat.pyi | 15 - .../pandas/core/internals/construction.pyi | 12 - typings/pandas/core/internals/managers.pyi | 133 - typings/pandas/core/ops/__init__.pyi | 15 - typings/pandas/core/ops/array_ops.pyi | 19 - typings/pandas/core/ops/dispatch.pyi | 10 - typings/pandas/core/ops/docstrings.pyi | 1 - typings/pandas/core/ops/invalid.pyi | 2 - typings/pandas/core/ops/mask_ops.pyi | 23 - typings/pandas/core/ops/methods.pyi | 2 - typings/pandas/core/ops/missing.pyi | 3 - typings/pandas/core/ops/roperator.pyi | 12 - typings/pandas/core/resample.pyi | 74 - typings/pandas/core/reshape/__init__.pyi | 0 typings/pandas/core/reshape/api.pyi | 18 - typings/pandas/core/reshape/concat.pyi | 56 - typings/pandas/core/reshape/melt.pyi | 17 - typings/pandas/core/reshape/merge.pyi | 130 - typings/pandas/core/reshape/pivot.pyi | 36 - typings/pandas/core/reshape/util.pyi | 1 - typings/pandas/core/series.pyi | 1763 ------------- typings/pandas/core/sorting.pyi | 30 - typings/pandas/core/strings.pyi | 101 - typings/pandas/core/tools/__init__.pyi | 2 - typings/pandas/core/tools/datetimes.pyi | 91 - typings/pandas/core/tools/numeric.pyi | 1 - typings/pandas/core/tools/timedeltas.pyi | 29 - typings/pandas/core/window/__init__.pyi | 7 - typings/pandas/core/window/ewm.pyi | 35 - typings/pandas/core/window/expanding.pyi | 21 - typings/pandas/core/window/numba_.pyi | 16 - typings/pandas/core/window/rolling.pyi | 115 - typings/pandas/io/__init__.pyi | 0 typings/pandas/io/api.pyi | 27 - typings/pandas/io/excel/_base.pyi | 240 -- typings/pandas/io/gcs.pyi | 7 - typings/pandas/io/json/__init__.pyi | 8 - typings/pandas/io/json/_json.pyi | 206 -- typings/pandas/io/json/_normalize.pyi | 22 - typings/pandas/io/json/_table_schema.pyi | 12 - typings/pandas/io/parsers.pyi | 616 ----- typings/pandas/io/s3.pyi | 14 - typings/pandas/io/sas/sas_constants.pyi | 108 - typings/pandas/io/stata.pyi | 159 -- typings/pandas/plotting/__init__.pyi | 21 - typings/pandas/plotting/_core.pyi | 95 - .../pandas/plotting/_matplotlib/__init__.pyi | 19 - .../pandas/plotting/_matplotlib/boxplot.pyi | 58 - .../pandas/plotting/_matplotlib/compat.pyi | 0 .../pandas/plotting/_matplotlib/converter.pyi | 103 - typings/pandas/plotting/_matplotlib/core.pyi | 117 - typings/pandas/plotting/_matplotlib/hist.pyi | 45 - typings/pandas/plotting/_matplotlib/misc.pyi | 34 - typings/pandas/plotting/_matplotlib/style.pyi | 0 .../plotting/_matplotlib/timeseries.pyi | 1 - typings/pandas/plotting/_matplotlib/tools.pyi | 2 - typings/pandas/plotting/_misc.pyi | 81 - typings/pandas/py.typed | 1 - typings/pandas/tseries/__init__.pyi | 0 typings/pandas/tseries/api.pyi | 1 - typings/pandas/tseries/frequencies.pyi | 7 - typings/pandas/tseries/offsets.pyi | 44 - 142 files changed, 59 insertions(+), 12557 deletions(-) delete mode 100644 typings/pandas/__init__.pyi delete mode 100644 typings/pandas/_libs/__init__.pyi delete mode 100644 typings/pandas/_libs/interval.pyi delete mode 100644 typings/pandas/_libs/json.pyi delete mode 100644 typings/pandas/_libs/missing.pyi delete mode 100644 typings/pandas/_libs/properties.pyi delete mode 100644 typings/pandas/_libs/tslibs/__init__.pyi delete mode 100644 typings/pandas/_libs/tslibs/nattype.pyi delete mode 100644 typings/pandas/_libs/tslibs/np_datetime.pyi delete mode 100644 typings/pandas/_libs/tslibs/offsets.pyi delete mode 100644 typings/pandas/_libs/tslibs/period.pyi delete mode 100644 typings/pandas/_libs/tslibs/timedeltas.pyi delete mode 100644 typings/pandas/_libs/tslibs/timestamps.pyi delete mode 100644 typings/pandas/_testing.pyi delete mode 100644 typings/pandas/_typing.pyi delete mode 100644 typings/pandas/compat/pickle_compat.pyi delete mode 100644 typings/pandas/core/__init__.pyi delete mode 100644 typings/pandas/core/accessor.pyi delete mode 100644 typings/pandas/core/algorithms.pyi delete mode 100644 typings/pandas/core/api.pyi delete mode 100644 typings/pandas/core/arraylike.pyi delete mode 100644 typings/pandas/core/arrays/__init__.pyi delete mode 100644 typings/pandas/core/arrays/_arrow_utils.pyi delete mode 100644 typings/pandas/core/arrays/_ranges.pyi delete mode 100644 typings/pandas/core/arrays/base.pyi delete mode 100644 typings/pandas/core/arrays/boolean.pyi delete mode 100644 typings/pandas/core/arrays/categorical.pyi delete mode 100644 typings/pandas/core/arrays/datetimelike.pyi delete mode 100644 typings/pandas/core/arrays/datetimes.pyi delete mode 100644 typings/pandas/core/arrays/integer.pyi delete mode 100644 typings/pandas/core/arrays/interval.pyi delete mode 100644 typings/pandas/core/arrays/masked.pyi delete mode 100644 typings/pandas/core/arrays/numpy_.pyi delete mode 100644 typings/pandas/core/arrays/period.pyi delete mode 100644 typings/pandas/core/arrays/sparse/__init__.pyi delete mode 100644 typings/pandas/core/arrays/sparse/accessor.pyi delete mode 100644 typings/pandas/core/arrays/sparse/array.pyi delete mode 100644 typings/pandas/core/arrays/sparse/dtype.pyi delete mode 100644 typings/pandas/core/arrays/sparse/scipy_sparse.pyi delete mode 100644 typings/pandas/core/arrays/string_.pyi delete mode 100644 typings/pandas/core/arrays/timedeltas.pyi delete mode 100644 typings/pandas/core/base.pyi delete mode 100644 typings/pandas/core/computation/align.pyi delete mode 100644 typings/pandas/core/computation/common.pyi delete mode 100644 typings/pandas/core/computation/engines.pyi delete mode 100644 typings/pandas/core/computation/expressions.pyi delete mode 100644 typings/pandas/core/computation/parsing.pyi delete mode 100644 typings/pandas/core/config_init.pyi delete mode 100644 typings/pandas/core/dtypes/__init__.pyi delete mode 100644 typings/pandas/core/dtypes/api.pyi delete mode 100644 typings/pandas/core/dtypes/base.pyi delete mode 100644 typings/pandas/core/dtypes/cast.pyi delete mode 100644 typings/pandas/core/dtypes/common.pyi delete mode 100644 typings/pandas/core/dtypes/concat.pyi delete mode 100644 typings/pandas/core/dtypes/dtypes.pyi delete mode 100644 typings/pandas/core/dtypes/generic.pyi delete mode 100644 typings/pandas/core/dtypes/inference.pyi delete mode 100644 typings/pandas/core/dtypes/missing.pyi delete mode 100644 typings/pandas/core/frame.pyi delete mode 100644 typings/pandas/core/generic.pyi delete mode 100644 typings/pandas/core/groupby/__init__.pyi delete mode 100644 typings/pandas/core/groupby/categorical.pyi delete mode 100644 typings/pandas/core/groupby/generic.pyi delete mode 100644 typings/pandas/core/groupby/groupby.pyi delete mode 100644 typings/pandas/core/groupby/ops.pyi delete mode 100644 typings/pandas/core/indexes/__init__.pyi delete mode 100644 typings/pandas/core/indexes/accessors.pyi delete mode 100644 typings/pandas/core/indexes/base.pyi delete mode 100644 typings/pandas/core/indexes/datetimelike.pyi delete mode 100644 typings/pandas/core/indexes/datetimes.pyi delete mode 100644 typings/pandas/core/indexes/extension.pyi delete mode 100644 typings/pandas/core/indexes/frozen.pyi delete mode 100644 typings/pandas/core/indexes/multi.pyi delete mode 100644 typings/pandas/core/indexes/numeric.pyi delete mode 100644 typings/pandas/core/indexes/timedeltas.pyi delete mode 100644 typings/pandas/core/indexing.pyi delete mode 100644 typings/pandas/core/internals/__init__.pyi delete mode 100644 typings/pandas/core/internals/blocks.pyi delete mode 100644 typings/pandas/core/internals/concat.pyi delete mode 100644 typings/pandas/core/internals/construction.pyi delete mode 100644 typings/pandas/core/internals/managers.pyi delete mode 100644 typings/pandas/core/ops/__init__.pyi delete mode 100644 typings/pandas/core/ops/array_ops.pyi delete mode 100644 typings/pandas/core/ops/dispatch.pyi delete mode 100644 typings/pandas/core/ops/docstrings.pyi delete mode 100644 typings/pandas/core/ops/invalid.pyi delete mode 100644 typings/pandas/core/ops/mask_ops.pyi delete mode 100644 typings/pandas/core/ops/methods.pyi delete mode 100644 typings/pandas/core/ops/missing.pyi delete mode 100644 typings/pandas/core/ops/roperator.pyi delete mode 100644 typings/pandas/core/resample.pyi delete mode 100644 typings/pandas/core/reshape/__init__.pyi delete mode 100644 typings/pandas/core/reshape/api.pyi delete mode 100644 typings/pandas/core/reshape/concat.pyi delete mode 100644 typings/pandas/core/reshape/melt.pyi delete mode 100644 typings/pandas/core/reshape/merge.pyi delete mode 100644 typings/pandas/core/reshape/pivot.pyi delete mode 100644 typings/pandas/core/reshape/util.pyi delete mode 100644 typings/pandas/core/series.pyi delete mode 100644 typings/pandas/core/sorting.pyi delete mode 100644 typings/pandas/core/strings.pyi delete mode 100644 typings/pandas/core/tools/__init__.pyi delete mode 100644 typings/pandas/core/tools/datetimes.pyi delete mode 100644 typings/pandas/core/tools/numeric.pyi delete mode 100644 typings/pandas/core/tools/timedeltas.pyi delete mode 100644 typings/pandas/core/window/__init__.pyi delete mode 100644 typings/pandas/core/window/ewm.pyi delete mode 100644 typings/pandas/core/window/expanding.pyi delete mode 100644 typings/pandas/core/window/numba_.pyi delete mode 100644 typings/pandas/core/window/rolling.pyi delete mode 100644 typings/pandas/io/__init__.pyi delete mode 100644 typings/pandas/io/api.pyi delete mode 100644 typings/pandas/io/excel/_base.pyi delete mode 100644 typings/pandas/io/gcs.pyi delete mode 100644 typings/pandas/io/json/__init__.pyi delete mode 100644 typings/pandas/io/json/_json.pyi delete mode 100644 typings/pandas/io/json/_normalize.pyi delete mode 100644 typings/pandas/io/json/_table_schema.pyi delete mode 100644 typings/pandas/io/parsers.pyi delete mode 100644 typings/pandas/io/s3.pyi delete mode 100644 typings/pandas/io/sas/sas_constants.pyi delete mode 100644 typings/pandas/io/stata.pyi delete mode 100644 typings/pandas/plotting/__init__.pyi delete mode 100644 typings/pandas/plotting/_core.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/__init__.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/boxplot.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/compat.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/converter.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/core.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/hist.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/misc.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/style.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/timeseries.pyi delete mode 100644 typings/pandas/plotting/_matplotlib/tools.pyi delete mode 100644 typings/pandas/plotting/_misc.pyi delete mode 100644 typings/pandas/py.typed delete mode 100644 typings/pandas/tseries/__init__.pyi delete mode 100644 typings/pandas/tseries/api.pyi delete mode 100644 typings/pandas/tseries/frequencies.pyi delete mode 100644 typings/pandas/tseries/offsets.pyi diff --git a/poetry.lock b/poetry.lock index 63ce785..a8252a2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2475,39 +2475,39 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync" [[package]] name = "numpy" -version = "1.21.0" -description = "NumPy is the fundamental package for array computing with Python." +version = "1.24.4" +description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "numpy-1.21.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d5caa946a9f55511e76446e170bdad1d12d6b54e17a2afe7b189112ed4412bb8"}, - {file = "numpy-1.21.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ac4fd578322842dbda8d968e3962e9f22e862b6ec6e3378e7415625915e2da4d"}, - {file = "numpy-1.21.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:598fe100b2948465cf3ed64b1a326424b5e4be2670552066e17dfaa67246011d"}, - {file = "numpy-1.21.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c55407f739f0bfcec67d0df49103f9333edc870061358ac8a8c9e37ea02fcd2"}, - {file = "numpy-1.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:75579acbadbf74e3afd1153da6177f846212ea2a0cc77de53523ae02c9256513"}, - {file = "numpy-1.21.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cc367c86eb87e5b7c9592935620f22d13b090c609f1b27e49600cd033b529f54"}, - {file = "numpy-1.21.0-cp37-cp37m-win32.whl", hash = "sha256:d89b0dc7f005090e32bb4f9bf796e1dcca6b52243caf1803fdd2b748d8561f63"}, - {file = "numpy-1.21.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eda2829af498946c59d8585a9fd74da3f810866e05f8df03a86f70079c7531dd"}, - {file = "numpy-1.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1a784e8ff7ea2a32e393cc53eb0003eca1597c7ca628227e34ce34eb11645a0e"}, - {file = "numpy-1.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bba474a87496d96e61461f7306fba2ebba127bed7836212c360f144d1e72ac54"}, - {file = "numpy-1.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd0a359c1c17f00cb37de2969984a74320970e0ceef4808c32e00773b06649d9"}, - {file = "numpy-1.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e4d5a86a5257843a18fb1220c5f1c199532bc5d24e849ed4b0289fb59fbd4d8f"}, - {file = "numpy-1.21.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:620732f42259eb2c4642761bd324462a01cdd13dd111740ce3d344992dd8492f"}, - {file = "numpy-1.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9205711e5440954f861ceeea8f1b415d7dd15214add2e878b4d1cf2bcb1a914"}, - {file = "numpy-1.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ad09f55cc95ed8d80d8ab2052f78cc21cb231764de73e229140d81ff49d8145e"}, - {file = "numpy-1.21.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a1f2fb2da242568af0271455b89aee0f71e4e032086ee2b4c5098945d0e11cf6"}, - {file = "numpy-1.21.0-cp38-cp38-win32.whl", hash = "sha256:e58ddb53a7b4959932f5582ac455ff90dcb05fac3f8dcc8079498d43afbbde6c"}, - {file = "numpy-1.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:d2910d0a075caed95de1a605df00ee03b599de5419d0b95d55342e9a33ad1fb3"}, - {file = "numpy-1.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a290989cd671cd0605e9c91a70e6df660f73ae87484218e8285c6522d29f6e38"}, - {file = "numpy-1.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3537b967b350ad17633b35c2f4b1a1bbd258c018910b518c30b48c8e41272717"}, - {file = "numpy-1.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc6c650f8700ce1e3a77668bb7c43e45c20ac06ae00d22bdf6760b38958c883"}, - {file = "numpy-1.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:709884863def34d72b183d074d8ba5cfe042bc3ff8898f1ffad0209161caaa99"}, - {file = "numpy-1.21.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bebab3eaf0641bba26039fb0b2c5bf9b99407924b53b1ea86e03c32c64ef5aef"}, - {file = "numpy-1.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf680682ad0a3bef56dae200dbcbac2d57294a73e5b0f9864955e7dd7c2c2491"}, - {file = "numpy-1.21.0-cp39-cp39-win32.whl", hash = "sha256:d95d16204cd51ff1a1c8d5f9958ce90ae190be81d348b514f9be39f878b8044a"}, - {file = "numpy-1.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:2ba579dde0563f47021dcd652253103d6fd66165b18011dce1a0609215b2791e"}, - {file = "numpy-1.21.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3c40e6b860220ed862e8097b8f81c9af6d7405b723f4a7af24a267b46f90e461"}, - {file = "numpy-1.21.0.zip", hash = "sha256:e80fe25cba41c124d04c662f33f6364909b985f2eb5998aaa5ae4b9587242cce"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, ] [[package]] @@ -2589,6 +2589,21 @@ pytz = ">=2020.1" [package.extras] test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] +[[package]] +name = "pandas-stubs" +version = "2.2.2.240807" +description = "Type annotations for pandas" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas_stubs-2.2.2.240807-py3-none-any.whl", hash = "sha256:893919ad82be4275f0d07bb47a95d08bae580d3fdea308a7acfcb3f02e76186e"}, + {file = "pandas_stubs-2.2.2.240807.tar.gz", hash = "sha256:64a559725a57a449f46225fbafc422520b7410bff9252b661a225b5559192a93"}, +] + +[package.dependencies] +numpy = ">=1.23.5" +types-pytz = ">=2022.1.1" + [[package]] name = "pandocfilters" version = "1.5.0" @@ -4531,6 +4546,17 @@ files = [ {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, ] +[[package]] +name = "types-pytz" +version = "2024.1.0.20240417" +description = "Typing stubs for pytz" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, + {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, +] + [[package]] name = "typing-extensions" version = "4.12.2" @@ -4780,4 +4806,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.11" -content-hash = "9b0cbd959d3723ae9fec64ac4694da64ae2e040288542547ae659e58d1d3befa" +content-hash = "f4938c34ae186b8dd617d5a9115e04e89aebf3f95fbab4e1edabbcd591f05c01" diff --git a/pyproject.toml b/pyproject.toml index ec1a6af..e9abcf5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ authors = [] [tool.poetry.dependencies] python = ">=3.10,<3.11" -numpy = "1.21.0" openpyxl = "3.0.7" pandas = "1.4.2" scikit-learn = "^1.0.2" @@ -49,6 +48,7 @@ altair-viewer = "^0.4.0" vl-convert-python = "^1.6.0" ruff = "^0.6.1" types-pillow = "^10.2.0.20240520" +pandas-stubs = "^2.2.2.240807" [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/typings/pandas/__init__.pyi b/typings/pandas/__init__.pyi deleted file mode 100644 index 536b55c..0000000 --- a/typings/pandas/__init__.pyi +++ /dev/null @@ -1,116 +0,0 @@ -from ._config import ( - describe_option as describe_option, - get_option as get_option, - option_context as option_context, - options as options, - reset_option as reset_option, - set_option as set_option, -) -from .core.api import ( - BooleanDtype as BooleanDtype, - Categorical as Categorical, - CategoricalDtype as CategoricalDtype, - CategoricalIndex as CategoricalIndex, - DataFrame as DataFrame, - DateOffset as DateOffset, - DatetimeIndex as DatetimeIndex, - DatetimeTZDtype as DatetimeTZDtype, - Float64Index as Float64Index, - Grouper as Grouper, - Index as Index, - IndexSlice as IndexSlice, - Int16Dtype as Int16Dtype, - Int32Dtype as Int32Dtype, - Int64Dtype as Int64Dtype, - Int64Index as Int64Index, - Int8Dtype as Int8Dtype, - Interval as Interval, - IntervalDtype as IntervalDtype, - IntervalIndex as IntervalIndex, - MultiIndex as MultiIndex, - NA as NA, - NaT as NaT, - NamedAgg as NamedAgg, - Period as Period, - PeriodDtype as PeriodDtype, - PeriodIndex as PeriodIndex, - RangeIndex as RangeIndex, - Series as Series, - StringDtype as StringDtype, - Timedelta as Timedelta, - TimedeltaIndex as TimedeltaIndex, - Timestamp as Timestamp, - UInt16Dtype as UInt16Dtype, - UInt32Dtype as UInt32Dtype, - UInt64Dtype as UInt64Dtype, - UInt64Index as UInt64Index, - UInt8Dtype as UInt8Dtype, - array as array, - bdate_range as bdate_range, - date_range as date_range, - factorize as factorize, - interval_range as interval_range, - isna as isna, - isnull as isnull, - notna as notna, - notnull as notnull, - period_range as period_range, - set_eng_float_format as set_eng_float_format, - timedelta_range as timedelta_range, - to_numeric as to_numeric, - unique as unique, - value_counts as value_counts, -) -from .core.tools import to_datetime as to_datetime, to_timedelta as to_timedelta -from .core.arrays.sparse import SparseDtype as SparseDtype -from .tseries import offsets as offsets -from .tseries.api import infer_freq as infer_freq -from .core.computation.api import eval as eval -from .core.reshape.api import ( - concat as concat, - crosstab as crosstab, - cut as cut, - get_dummies as get_dummies, - lreshape as lreshape, - melt as melt, - merge as merge, - merge_asof as merge_asof, - merge_ordered as merge_ordered, - pivot as pivot, - pivot_table as pivot_table, - qcut as qcut, - wide_to_long as wide_to_long, -) -from .util._print_versions import show_versions as show_versions -from .io.json import json_normalize as json_normalize -from .io.api import ( - ExcelFile as ExcelFile, - ExcelWriter as ExcelWriter, - HDFStore as HDFStore, - read_clipboard as read_clipboard, - read_csv as read_csv, - read_excel as read_excel, - read_feather as read_feather, - read_fwf as read_fwf, - read_gbq as read_gbq, - read_hdf as read_hdf, - read_html as read_html, - read_json as read_json, - read_orc as read_orc, - read_parquet as read_parquet, - read_pickle as read_pickle, - read_sas as read_sas, - read_spss as read_spss, - read_sql as read_sql, - read_sql_query as read_sql_query, - read_sql_table as read_sql_table, - read_stata as read_stata, - read_table as read_table, - to_pickle as to_pickle, -) - -from .util._tester import test as test - -import pandas.testing as testing - -__version__: str diff --git a/typings/pandas/_libs/__init__.pyi b/typings/pandas/_libs/__init__.pyi deleted file mode 100644 index 021fe88..0000000 --- a/typings/pandas/_libs/__init__.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from .tslibs import ( - NaT as NaT, - NaTType as NaTType, - OutOfBoundsDatetime as OutOfBoundsDatetime, - Period as Period, - Timedelta as Timedelta, - Timestamp as Timestamp, - iNaT as iNaT, -) -from .interval import Interval as Interval diff --git a/typings/pandas/_libs/interval.pyi b/typings/pandas/_libs/interval.pyi deleted file mode 100644 index 60825c2..0000000 --- a/typings/pandas/_libs/interval.pyi +++ /dev/null @@ -1,175 +0,0 @@ -from __future__ import annotations - -from typing import ( - Any, - Generic, - TypeVar, - Union, - overload, -) - -import numpy as np - -from pandas._typing import npt - -from pandas._typing import ( - IntervalClosedType, - Timedelta, - Timestamp, -) - -VALID_CLOSED: frozenset[str] - -_OrderableScalarT = TypeVar("_OrderableScalarT", int, float) -_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta) -_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta) - -class _LengthDescriptor: - @overload - def __get__( - self, instance: Interval[_OrderableScalarT], owner: Any - ) -> _OrderableScalarT: ... - @overload - def __get__( - self, instance: Interval[_OrderableTimesT], owner: Any - ) -> Timedelta: ... - @overload - def __get__(self, instance: IntervalTree, owner: Any) -> np.ndarray: ... - -class _MidDescriptor: - @overload - def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ... - @overload - def __get__( - self, instance: Interval[_OrderableTimesT], owner: Any - ) -> _OrderableTimesT: ... - @overload - def __get__(self, instance: IntervalTree, owner: Any) -> np.ndarray: ... - -class IntervalMixin: - @property - def closed_left(self) -> bool: ... - @property - def closed_right(self) -> bool: ... - @property - def open_left(self) -> bool: ... - @property - def open_right(self) -> bool: ... - @property - def is_empty(self) -> bool: ... - def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... - -class Interval(IntervalMixin, Generic[_OrderableT]): - @property - def left(self: Interval[_OrderableT]) -> _OrderableT: ... - @property - def right(self: Interval[_OrderableT]) -> _OrderableT: ... - @property - def closed(self) -> IntervalClosedType: ... - mid: _MidDescriptor - length: _LengthDescriptor - def __init__( - self, - left: _OrderableT, - right: _OrderableT, - closed: IntervalClosedType = ..., - ): ... - def __hash__(self) -> int: ... - @overload - def __contains__(self: Interval[_OrderableTimesT], _OrderableTimesT) -> bool: ... - @overload - def __contains__( - self: Interval[_OrderableScalarT], key: Union[int, float] - ) -> bool: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - @overload - def __add__( - self: Interval[_OrderableTimesT], y: Timedelta - ) -> Interval[_OrderableTimesT]: ... - @overload - def __add__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __add__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __add__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __radd__( - self: Interval[_OrderableTimesT], y: Timedelta - ) -> Interval[_OrderableTimesT]: ... - @overload - def __radd__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __radd__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __radd__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __sub__( - self: Interval[_OrderableTimesT], y: Timedelta - ) -> Interval[_OrderableTimesT]: ... - @overload - def __sub__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __sub__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __sub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __rsub__( - self: Interval[_OrderableTimesT], y: Timedelta - ) -> Interval[_OrderableTimesT]: ... - @overload - def __rsub__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __rsub__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __rsub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __mul__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __mul__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __mul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __rmul__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __rmul__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __rmul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __truediv__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __truediv__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __truediv__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ... - @overload - def __floordiv__(self: Interval[int], y: int) -> Interval[int]: ... - @overload - def __floordiv__(self: Interval[int], y: float) -> Interval[float]: ... - @overload - def __floordiv__( - self: Interval[float], y: Union[int, float] - ) -> Interval[float]: ... - def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ... - -def intervals_to_interval_bounds( - intervals: np.ndarray, validate_closed: bool = ... -) -> tuple[np.ndarray, np.ndarray, str]: ... - -class IntervalTree(IntervalMixin): - def __init__( - self, - left: np.ndarray, - right: np.ndarray, - closed: IntervalClosedType = ..., - leaf_size: int = ..., - ): ... - def get_indexer(self, target) -> npt.NDArray[np.intp]: ... - def get_indexer_non_unique( - self, target - ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... - _na_count: int - @property - def is_overlapping(self) -> bool: ... - @property - def is_monotonic_increasing(self) -> bool: ... - def clear_mapping(self) -> None: ... diff --git a/typings/pandas/_libs/json.pyi b/typings/pandas/_libs/json.pyi deleted file mode 100644 index 59086a4..0000000 --- a/typings/pandas/_libs/json.pyi +++ /dev/null @@ -1,4 +0,0 @@ -def decode(*args, **kwargs): ... -def dumps(*args, **kwargs): ... -def encode(*args, **kwargs): ... -def loads(*args, **kwargs): ... diff --git a/typings/pandas/_libs/missing.pyi b/typings/pandas/_libs/missing.pyi deleted file mode 100644 index 2f5cae4..0000000 --- a/typings/pandas/_libs/missing.pyi +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations -from typing import Union - -class NAType: - def __new__(cls, *args, **kwargs) -> NAType: ... - def __repr__(self) -> str: ... - def __format__(self, format_spec: str) -> str: ... - def __bool__(self) -> None: ... - def __hash__(self) -> int: ... - def __reduce__(self) -> str: ... - def __add__(self, other) -> NAType: ... - def __radd__(self, other) -> NAType: ... - def __sub__(self, other) -> NAType: ... - def __rsub__(self, other) -> NAType: ... - def __mul__(self, other) -> NAType: ... - def __rmul__(self, other) -> NAType: ... - def __matmul__(self, other) -> NAType: ... - def __rmatmul__(self, other) -> NAType: ... - def __truediv__(self, other) -> NAType: ... - def __rtruediv__(self, other) -> NAType: ... - def __floordiv__(self, other) -> NAType: ... - def __rfloordiv__(self, other) -> NAType: ... - def __mod__(self, other) -> NAType: ... - def __rmod__(self, other) -> NAType: ... - def __divmod__(self, other) -> NAType: ... - def __rdivmod__(self, other) -> NAType: ... - def __eq__(self, other) -> bool: ... - def __ne__(self, other) -> bool: ... - def __le__(self, other) -> bool: ... - def __lt__(self, other) -> bool: ... - def __gt__(self, other) -> bool: ... - def __ge__(self, other) -> bool: ... - def __neg__(self, other) -> NAType: ... - def __pos__(self, other) -> NAType: ... - def __abs__(self, other) -> NAType: ... - def __invert__(self, other) -> NAType: ... - def __pow__(self, other) -> NAType: ... - def __rpow__(self, other) -> NAType: ... - def __and__(self, other) -> Union[None, NAType]: ... - __rand__ = __and__ - def __or__(self, other) -> Union[bool, NAType]: ... - __ror__ = __or__ - def __xor__(self, other) -> NAType: ... - __rxor__ = __xor__ - __array_priority__: int - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - -NA: NAType = ... diff --git a/typings/pandas/_libs/properties.pyi b/typings/pandas/_libs/properties.pyi deleted file mode 100644 index 78eabdc..0000000 --- a/typings/pandas/_libs/properties.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Callable - -class CachedProperty: - def __init__(self, func: Callable) -> None: ... - def __get__(self, obj, typ): ... - def __set__(self, obj, value) -> None: ... - -cache_readonly: CachedProperty = ... - -class AxisProperty: - def __init__(self, axis: int = ..., doc: str = ...) -> None: ... - def __get__(self, obj, typ): ... - def __set__(self, obj, value) -> None: ... diff --git a/typings/pandas/_libs/tslibs/__init__.pyi b/typings/pandas/_libs/tslibs/__init__.pyi deleted file mode 100644 index 06694f5..0000000 --- a/typings/pandas/_libs/tslibs/__init__.pyi +++ /dev/null @@ -1,24 +0,0 @@ -__all__ = [ - "Period", - "Timestamp", - "Timedelta", - "NaT", - "NaTType", - "iNaT", - "nat_strings", - "BaseOffset", - "Tick", - "OutofBoundsDatetime", -] - -from .period import Period -from .timestamps import Timestamp -from .timedeltas import Timedelta -from .nattype import ( - NaT, - NaTType, - iNaT, - nat_strings, -) -from .offsets import BaseOffset, Tick -from np_datetime import OutOfBoundsDatetime as OutOfBoundsDatetime diff --git a/typings/pandas/_libs/tslibs/nattype.pyi b/typings/pandas/_libs/tslibs/nattype.pyi deleted file mode 100644 index efadd8f..0000000 --- a/typings/pandas/_libs/tslibs/nattype.pyi +++ /dev/null @@ -1,125 +0,0 @@ -from datetime import ( - datetime, - timedelta, - tzinfo as _tzinfo, -) -from typing import ( - Any, - Union, -) - -import numpy as np - -from pandas._libs.tslibs.period import Period - -NaT: NaTType -iNaT: int -nat_strings: set[str] - -def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ... - -_NaTComparisonTypes = Union[datetime, timedelta, Period, np.datetime64, np.timedelta64] - -class _NatComparison: - def __call__(self, other: _NaTComparisonTypes) -> bool: ... - -class NaTType: - value: np.int64 - def asm8(self) -> np.datetime64: ... - def to_datetime64(self) -> np.datetime64: ... - def to_numpy( - self, dtype: np.dtype | str | None = ..., copy: bool = ... - ) -> np.datetime64 | np.timedelta64: ... - @property - def is_leap_year(self) -> bool: ... - @property - def is_month_start(self) -> bool: ... - @property - def is_quarter_start(self) -> bool: ... - @property - def is_year_start(self) -> bool: ... - @property - def is_month_end(self) -> bool: ... - @property - def is_quarter_end(self) -> bool: ... - @property - def is_year_end(self) -> bool: ... - @property - def day_of_year(self) -> float: ... - @property - def dayofyear(self) -> float: ... - @property - def days_in_month(self) -> float: ... - @property - def daysinmonth(self) -> float: ... - @property - def day_of_week(self) -> float: ... - @property - def dayofweek(self) -> float: ... - @property - def week(self) -> float: ... - @property - def weekofyear(self) -> float: ... - def day_name(self) -> float: ... - def month_name(self) -> float: ... - def weekday(self) -> float: ... - def isoweekday(self) -> float: ... - def total_seconds(self) -> float: ... - def today(self, *args, **kwargs) -> NaTType: ... - def now(self, *args, **kwargs) -> NaTType: ... - def to_pydatetime(self) -> NaTType: ... - def date(self) -> NaTType: ... - def round(self) -> NaTType: ... - def floor(self) -> NaTType: ... - def ceil(self) -> NaTType: ... - def tz_convert(self) -> NaTType: ... - def tz_localize(self) -> NaTType: ... - def replace( - self, - year: int | None = ..., - month: int | None = ..., - day: int | None = ..., - hour: int | None = ..., - minute: int | None = ..., - second: int | None = ..., - microsecond: int | None = ..., - nanosecond: int | None = ..., - tzinfo: _tzinfo | None = ..., - fold: int | None = ..., - ) -> NaTType: ... - @property - def year(self) -> float: ... - @property - def quarter(self) -> float: ... - @property - def month(self) -> float: ... - @property - def day(self) -> float: ... - @property - def hour(self) -> float: ... - @property - def minute(self) -> float: ... - @property - def second(self) -> float: ... - @property - def millisecond(self) -> float: ... - @property - def microsecond(self) -> float: ... - @property - def nanosecond(self) -> float: ... - # inject Timedelta properties - @property - def days(self) -> float: ... - @property - def microseconds(self) -> float: ... - @property - def nanoseconds(self) -> float: ... - # inject Period properties - @property - def qyear(self) -> float: ... - def __eq__(self, other: Any) -> bool: ... - def __ne__(self, other: Any) -> bool: ... - __lt__: _NatComparison - __le__: _NatComparison - __gt__: _NatComparison - __ge__: _NatComparison diff --git a/typings/pandas/_libs/tslibs/np_datetime.pyi b/typings/pandas/_libs/tslibs/np_datetime.pyi deleted file mode 100644 index db0c277..0000000 --- a/typings/pandas/_libs/tslibs/np_datetime.pyi +++ /dev/null @@ -1 +0,0 @@ -class OutOfBoundsDatetime(ValueError): ... diff --git a/typings/pandas/_libs/tslibs/offsets.pyi b/typings/pandas/_libs/tslibs/offsets.pyi deleted file mode 100644 index 3df14f0..0000000 --- a/typings/pandas/_libs/tslibs/offsets.pyi +++ /dev/null @@ -1,255 +0,0 @@ -from __future__ import annotations - -from datetime import ( - datetime, - timedelta, -) -from typing import ( - TYPE_CHECKING, - Any, - Collection, - Literal, - Tuple, - TypeVar, - Union, - overload, -) - -import numpy as np -from pandas._typing import npt - -from .timedeltas import Timedelta - -if TYPE_CHECKING: - from pandas.core.indexes.datetimes import DatetimeIndex -_BaseOffsetT = TypeVar("_BaseOffsetT", bound="BaseOffset") -_DatetimeT = TypeVar("_DatetimeT", bound=datetime) -_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta) - -_relativedelta_kwds: set[str] -prefix_mapping: dict[str, type] - -class ApplyTypeError(TypeError): ... - -class BaseOffset: - n: int - def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... - def __eq__(self, other) -> bool: ... - def __ne__(self, other) -> bool: ... - def __hash__(self) -> int: ... - @property - def kwds(self) -> dict: ... - @property - def base(self) -> BaseOffset: ... - @overload - def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... - @overload - def __add__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ... - @overload - def __add__(self, other: _DatetimeT) -> _DatetimeT: ... - @overload - def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ... - @overload - def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... - @overload - def __radd__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ... - @overload - def __radd__(self, other: _DatetimeT) -> _DatetimeT: ... - @overload - def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ... - def __sub__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ... - @overload - def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... - @overload - def __rsub__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ... - @overload - def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ... - @overload - def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ... - def __call__(self, other): ... - @overload - def __mul__(self, other: np.ndarray) -> np.ndarray: ... - @overload - def __mul__(self: _BaseOffsetT, other: int) -> _BaseOffsetT: ... - @overload - def __rmul__(self, other: np.ndarray) -> np.ndarray: ... - @overload - def __rmul__(self: _BaseOffsetT, other: int) -> _BaseOffsetT: ... - def __neg__(self: _BaseOffsetT) -> _BaseOffsetT: ... - def copy(self: _BaseOffsetT) -> _BaseOffsetT: ... - def __repr__(self) -> str: ... - @property - def name(self) -> str: ... - @property - def rule_code(self) -> str: ... - def freqstr(self) -> str: ... - def apply_index(self, dtindex: "DatetimeIndex") -> "DatetimeIndex": ... - def _apply_array(self, dtarr) -> None: ... - def rollback(self, dt: datetime) -> datetime: ... - def rollforward(self, dt: datetime) -> datetime: ... - def is_on_offset(self, dt: datetime) -> bool: ... - def __setstate__(self, state) -> None: ... - def __getstate__(self): ... - @property - def nanos(self) -> int: ... - def onOffset(self, dt: datetime) -> bool: ... - def isAnchored(self) -> bool: ... - def is_anchored(self) -> bool: ... - -def _get_offset(name: str) -> BaseOffset: ... - -class SingleConstructorOffset(BaseOffset): - @classmethod - def _from_name(cls, suffix=...): ... - def __reduce__(self): ... - -@overload -def to_offset(freq: None) -> None: ... -@overload -def to_offset(freq: timedelta | BaseOffset | str) -> BaseOffset: ... - -class Tick(SingleConstructorOffset): - def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... - @property - def delta(self) -> Timedelta: ... - @property - def nanos(self) -> int: ... - -def delta_to_tick(delta: timedelta) -> Tick: ... - -class Day(Tick): ... -class Hour(Tick): ... -class Minute(Tick): ... -class Second(Tick): ... -class Milli(Tick): ... -class Micro(Tick): ... -class Nano(Tick): ... - -class RelativeDeltaOffset(BaseOffset): - def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ... - -class BusinessMixin(SingleConstructorOffset): - def __init__( - self, n: int = ..., normalize: bool = ..., offset: timedelta = ... - ): ... - -class BusinessDay(BusinessMixin): ... - -class BusinessHour(BusinessMixin): - def __init__( - self, - n: int = ..., - normalize: bool = ..., - start: str | Collection[str] = ..., - end: str | Collection[str] = ..., - offset: timedelta = ..., - ): ... - -class WeekOfMonthMixin(SingleConstructorOffset): ... - -class YearOffset(SingleConstructorOffset): - def __init__( - self, n: int = ..., normalize: bool = ..., month: int | None = ... - ): ... - -class BYearEnd(YearOffset): ... -class BYearBegin(YearOffset): ... -class YearEnd(YearOffset): ... -class YearBegin(YearOffset): ... - -class QuarterOffset(SingleConstructorOffset): - def __init__( - self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ... - ) -> None: ... - -class BQuarterEnd(QuarterOffset): ... -class BQuarterBegin(QuarterOffset): ... -class QuarterEnd(QuarterOffset): ... -class QuarterBegin(QuarterOffset): ... -class MonthOffset(SingleConstructorOffset): ... -class MonthEnd(MonthOffset): ... -class MonthBegin(MonthOffset): ... -class BusinessMonthEnd(MonthOffset): ... -class BusinessMonthBegin(MonthOffset): ... - -class SemiMonthOffset(SingleConstructorOffset): - def __init__( - self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ... - ) -> None: ... - -class SemiMonthEnd(SemiMonthOffset): ... -class SemiMonthBegin(SemiMonthOffset): ... - -class Week(SingleConstructorOffset): - def __init__( - self, n: int = ..., normalize: bool = ..., weekday: int | None = ... - ) -> None: ... - -class WeekOfMonth(WeekOfMonthMixin): ... -class LastWeekOfMonth(WeekOfMonthMixin): ... - -class FY5253Mixin(SingleConstructorOffset): - def __init__( - self, - n: int = ..., - normalize: bool = ..., - weekday: int = ..., - startingMonth: int = ..., - variation: str = ..., - ) -> None: ... - -class FY5253(FY5253Mixin): ... -class FY5253Quarter(FY5253Mixin): ... -class Easter(SingleConstructorOffset): ... - -class _CustomBusinessMonth(BusinessMixin): - def __init__( - self, - n: int = ..., - normalize: bool = ..., - offset: timedelta = ..., - holidays: None | list = ..., - ): ... - -class CustomBusinessDay(BusinessDay): - def __init__( - self, - n: int = ..., - normalize: bool = ..., - offset: timedelta = ..., - weekmask: str = ..., - ): ... - -class CustomBusinessHour(BusinessHour): - def __init__( - self, - n: int = ..., - normalize: bool = ..., - start: str = ..., - end: str = ..., - offset: timedelta = ..., - holidays: None | list = ..., - ): ... - -class CustomBusinessMonthEnd(_CustomBusinessMonth): ... -class CustomBusinessMonthBegin(_CustomBusinessMonth): ... -class DateOffset(RelativeDeltaOffset): ... - -BDay = BusinessDay -BMonthEnd = BusinessMonthEnd -BMonthBegin = BusinessMonthBegin -CBMonthEnd = CustomBusinessMonthEnd -CBMonthBegin = CustomBusinessMonthBegin -CDay = CustomBusinessDay - -def roll_qtrday( - other: datetime, n: int, month: int, day_opt: str, modby: int -) -> int: ... - -INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"] - -def shift_months( - dtindex: npt.NDArray[np.int64], months: int, day_opt: str | None = ... -) -> npt.NDArray[np.int64]: ... - -_offset_map: dict[str, BaseOffset] diff --git a/typings/pandas/_libs/tslibs/period.pyi b/typings/pandas/_libs/tslibs/period.pyi deleted file mode 100644 index 8a30309..0000000 --- a/typings/pandas/_libs/tslibs/period.pyi +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations -from datetime import datetime -from typing import Any - -def is_period_object(obj: object) -> bool: ... -def get_period_ordinal(dts: datetime, freq: int) -> int: ... - -class IncompatibleFrequency(ValueError): ... - -class Period: - def __init__( - self, - value: Any = ..., - freqstr: Any = ..., - ordinal: Any = ..., - year: Any = ..., - month: int = ..., - quarter: Any = ..., - day: int = ..., - hour: int = ..., - minute: int = ..., - second: int = ..., - ) -> None: ... - def __add__(self, other) -> Period: ... - def __eq__(self, other) -> bool: ... - def __ge__(self, other) -> bool: ... - def __gt__(self, other) -> bool: ... - def __hash__(self) -> int: ... - def __le__(self, other) -> bool: ... - def __lt__(self, other) -> bool: ... - def __new__(cls, *args, **kwargs) -> Period: ... - def __ne__(self, other) -> bool: ... - def __radd__(self, other) -> Period: ... - def __reduce__(self, *args, **kwargs) -> Any: ... # what should this be? - def __repr__(self) -> str: ... - def __rsub__(self, other) -> Period: ... - def __setstate__(self, *args, **kwargs) -> Any: ... # what should this be? - def __str__(self) -> str: ... - @property - def day(self) -> int: ... - @property - def dayofweek(self) -> int: ... - @property - def dayofyear(self) -> int: ... - @property - def daysinmonth(self) -> int: ... - @property - def days_in_month(self) -> int: ... - @property - def end_time(self) -> Timestamp: ... - @property - def freq(self) -> Any: ... - @property - def freqstr(self) -> str: ... - @property - def hour(self) -> int: ... - @property - def minute(self) -> int: ... - @property - def month(self) -> int: ... - @property - def quarter(self) -> int: ... - @property - def qyear(self) -> int: ... - @property - def second(self) -> int: ... - @property - def ordinal(self) -> int: ... - @property - def is_leap_year(self) -> bool: ... - @property - def start_time(self) -> Timestamp: ... - @property - def week(self) -> int: ... - @property - def weekday(self) -> int: ... - @property - def weekofyear(self) -> int: ... - @property - def year(self) -> int: ... - # Static methods - @classmethod - def now(cls) -> Period: ... - # Methods - def asfreq(self, freq: str, how: str = ...) -> Period: ... - def strftime(self, fmt: str) -> str: ... - def to_timestamp(self, freq: str, how: str = ...) -> Timestamp: ... - -from .timestamps import Timestamp diff --git a/typings/pandas/_libs/tslibs/timedeltas.pyi b/typings/pandas/_libs/tslibs/timedeltas.pyi deleted file mode 100644 index 28c2f7d..0000000 --- a/typings/pandas/_libs/tslibs/timedeltas.pyi +++ /dev/null @@ -1,148 +0,0 @@ -from datetime import timedelta -from typing import ( - ClassVar, - Literal, - Type, - TypeVar, - overload, -) - -import numpy as np - -from pandas._libs.tslibs import ( - NaTType, - Tick, -) -from pandas._typing import npt - -# This should be kept consistent with the keys in the dict timedelta_abbrevs -# in pandas/_libs/tslibs/timedeltas.pyx -UnitChoices = Literal[ - "Y", - "y", - "M", - "W", - "w", - "D", - "d", - "days", - "day", - "hours", - "hour", - "hr", - "h", - "m", - "minute", - "min", - "minutes", - "t", - "s", - "seconds", - "sec", - "second", - "ms", - "milliseconds", - "millisecond", - "milli", - "millis", - "l", - "us", - "microseconds", - "microsecond", - "µs", - "micro", - "micros", - "u", - "ns", - "nanoseconds", - "nano", - "nanos", - "nanosecond", - "n", -] -_S = TypeVar("_S", bound=timedelta) - -def ints_to_pytimedelta( - arr: npt.NDArray[np.int64], # const int64_t[:] - box: bool = ..., -) -> npt.NDArray[np.object_]: ... -def array_to_timedelta64( - values: npt.NDArray[np.object_], - unit: str | None = ..., - errors: str = ..., -) -> np.ndarray: ... # np.ndarray[m8ns] -def parse_timedelta_unit(unit: str | None) -> UnitChoices: ... -def delta_to_nanoseconds(delta: np.timedelta64 | timedelta | Tick) -> int: ... - -class Timedelta(timedelta): - min: ClassVar[Timedelta] - max: ClassVar[Timedelta] - resolution: ClassVar[Timedelta] - value: int # np.int64 - def __new__( - cls: Type[_S], - value=..., - unit: str = ..., - **kwargs: int | float | np.integer | np.floating, - ) -> _S: ... - # GH 46171 - # While Timedelta can return pd.NaT, having the constructor return - # a Union with NaTType makes things awkward for users of pandas - @property - def days(self) -> int: ... - @property - def seconds(self) -> int: ... - @property - def microseconds(self) -> int: ... - def total_seconds(self) -> float: ... - def to_pytimedelta(self) -> timedelta: ... - def to_timedelta64(self) -> np.timedelta64: ... - @property - def asm8(self) -> np.timedelta64: ... - # TODO: round/floor/ceil could return NaT? - def round(self: _S, freq: str) -> _S: ... - def floor(self: _S, freq: str) -> _S: ... - def ceil(self: _S, freq: str) -> _S: ... - @property - def resolution_string(self) -> str: ... - def __add__(self, other: timedelta) -> Timedelta: ... - def __radd__(self, other: timedelta) -> Timedelta: ... - def __sub__(self, other: timedelta) -> Timedelta: ... - def __rsub__(self, other: timedelta) -> Timedelta: ... - def __neg__(self) -> Timedelta: ... - def __pos__(self) -> Timedelta: ... - def __abs__(self) -> Timedelta: ... - def __mul__(self, other: float) -> Timedelta: ... - def __rmul__(self, other: float) -> Timedelta: ... - # error: Signature of "__floordiv__" incompatible with supertype "timedelta" - @overload # type: ignore[override] - def __floordiv__(self, other: timedelta) -> int: ... - @overload - def __floordiv__(self, other: int | float) -> Timedelta: ... - @overload - def __floordiv__( - self, other: npt.NDArray[np.timedelta64] - ) -> npt.NDArray[np.intp]: ... - @overload - def __floordiv__( - self, other: npt.NDArray[np.number] - ) -> npt.NDArray[np.timedelta64] | Timedelta: ... - @overload - def __rfloordiv__(self, other: timedelta | str) -> int: ... - @overload - def __rfloordiv__(self, other: None | NaTType) -> NaTType: ... - @overload - def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ... - @overload - def __truediv__(self, other: timedelta) -> float: ... - @overload - def __truediv__(self, other: float) -> Timedelta: ... - def __mod__(self, other: timedelta) -> Timedelta: ... - def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ... - def __le__(self, other: timedelta) -> bool: ... - def __lt__(self, other: timedelta) -> bool: ... - def __ge__(self, other: timedelta) -> bool: ... - def __gt__(self, other: timedelta) -> bool: ... - def __hash__(self) -> int: ... - def isoformat(self) -> str: ... - def to_numpy(self) -> np.timedelta64: ... diff --git a/typings/pandas/_libs/tslibs/timestamps.pyi b/typings/pandas/_libs/tslibs/timestamps.pyi deleted file mode 100644 index c5ffaba..0000000 --- a/typings/pandas/_libs/tslibs/timestamps.pyi +++ /dev/null @@ -1,221 +0,0 @@ -from datetime import ( - date as _date, - datetime, - time as _time, - timedelta, - tzinfo as _tzinfo, -) -from time import struct_time -from typing import ( - ClassVar, - TypeVar, - overload, -) - -import numpy as np - -from pandas._libs.tslibs import ( - BaseOffset, - NaTType, - Period, - Tick, - Timedelta, -) - -_DatetimeT = TypeVar("_DatetimeT", bound=datetime) - -def integer_op_not_supported(obj: object) -> TypeError: ... - -class Timestamp(datetime): - min: ClassVar[Timestamp] - max: ClassVar[Timestamp] - - resolution: ClassVar[Timedelta] - value: int # np.int64 - def __new__( - cls: type[_DatetimeT], - ts_input: int - | np.integer - | float - | str - | _date - | datetime - | np.datetime64 = ..., - freq: int | None | str | BaseOffset = ..., - tz: str | _tzinfo | None | int = ..., - unit: str | int | None = ..., - year: int | None = ..., - month: int | None = ..., - day: int | None = ..., - hour: int | None = ..., - minute: int | None = ..., - second: int | None = ..., - microsecond: int | None = ..., - nanosecond: int | None = ..., - tzinfo: _tzinfo | None = ..., - *, - fold: int | None = ..., - ) -> _DatetimeT: ... - # GH 46171 - # While Timestamp can return pd.NaT, having the constructor return - # a Union with NaTType makes things awkward for users of pandas - def _set_freq(self, freq: BaseOffset | None) -> None: ... - @property - def year(self) -> int: ... - @property - def month(self) -> int: ... - @property - def day(self) -> int: ... - @property - def hour(self) -> int: ... - @property - def minute(self) -> int: ... - @property - def second(self) -> int: ... - @property - def microsecond(self) -> int: ... - @property - def tzinfo(self) -> _tzinfo | None: ... - @property - def tz(self) -> _tzinfo | None: ... - @property - def fold(self) -> int: ... - @classmethod - def fromtimestamp( - cls: type[_DatetimeT], t: float, tz: _tzinfo | None = ... - ) -> _DatetimeT: ... - @classmethod - def utcfromtimestamp(cls: type[_DatetimeT], t: float) -> _DatetimeT: ... - @classmethod - def today(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ... - @classmethod - def fromordinal( - cls: type[_DatetimeT], - ordinal: int, - freq: str | BaseOffset | None = ..., - tz: _tzinfo | str | None = ..., - ) -> _DatetimeT: ... - @classmethod - def now(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ... - @classmethod - def utcnow(cls: type[_DatetimeT]) -> _DatetimeT: ... - # error: Signature of "combine" incompatible with supertype "datetime" - @classmethod - def combine(cls, date: _date, time: _time) -> datetime: ... # type: ignore[override] - @classmethod - def fromisoformat(cls: type[_DatetimeT], date_string: str) -> _DatetimeT: ... - def strftime(self, format: str) -> str: ... - def __format__(self, fmt: str) -> str: ... - def toordinal(self) -> int: ... - def timetuple(self) -> struct_time: ... - def timestamp(self) -> float: ... - def utctimetuple(self) -> struct_time: ... - def date(self) -> _date: ... - def time(self) -> _time: ... - def timetz(self) -> _time: ... - def replace( - self, - year: int = ..., - month: int = ..., - day: int = ..., - hour: int = ..., - minute: int = ..., - second: int = ..., - microsecond: int = ..., - tzinfo: _tzinfo | None = ..., - fold: int = ..., - ) -> datetime: ... - def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ... - def ctime(self) -> str: ... - def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... - @classmethod - def strptime(cls, date_string: str, format: str) -> datetime: ... - def utcoffset(self) -> timedelta | None: ... - def tzname(self) -> str | None: ... - def dst(self) -> timedelta | None: ... - def __le__(self, other: datetime) -> bool: ... # type: ignore[override] - def __lt__(self, other: datetime) -> bool: ... # type: ignore[override] - def __ge__(self, other: datetime) -> bool: ... # type: ignore[override] - def __gt__(self, other: datetime) -> bool: ... # type: ignore[override] - # error: Signature of "__add__" incompatible with supertype "date"/"datetime" - @overload # type: ignore[override] - def __add__(self, other: np.ndarray) -> np.ndarray: ... - @overload - def __add__( - self: _DatetimeT, other: timedelta | np.timedelta64 | Tick - ) -> _DatetimeT: ... - def __radd__(self: _DatetimeT, other: timedelta) -> _DatetimeT: ... - @overload # type: ignore[override] - def __sub__(self, other: datetime) -> Timedelta: ... - @overload - def __sub__( - self: _DatetimeT, other: timedelta | np.timedelta64 | Tick - ) -> _DatetimeT: ... - def __hash__(self) -> int: ... - def weekday(self) -> int: ... - def isoweekday(self) -> int: ... - def isocalendar(self) -> tuple[int, int, int]: ... - @property - def is_leap_year(self) -> bool: ... - @property - def is_month_start(self) -> bool: ... - @property - def is_quarter_start(self) -> bool: ... - @property - def is_year_start(self) -> bool: ... - @property - def is_month_end(self) -> bool: ... - @property - def is_quarter_end(self) -> bool: ... - @property - def is_year_end(self) -> bool: ... - def to_pydatetime(self, warn: bool = ...) -> datetime: ... - def to_datetime64(self) -> np.datetime64: ... - def to_period(self, freq: BaseOffset | str | None = ...) -> Period: ... - def to_julian_date(self) -> np.float64: ... - @property - def asm8(self) -> np.datetime64: ... - def tz_convert(self: _DatetimeT, tz: _tzinfo | str | None) -> _DatetimeT: ... - # TODO: could return NaT? - def tz_localize( - self: _DatetimeT, - tz: _tzinfo | str | None, - ambiguous: str = ..., - nonexistent: str = ..., - ) -> _DatetimeT: ... - def normalize(self: _DatetimeT) -> _DatetimeT: ... - # TODO: round/floor/ceil could return NaT? - def round( - self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... - ) -> _DatetimeT: ... - def floor( - self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... - ) -> _DatetimeT: ... - def ceil( - self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... - ) -> _DatetimeT: ... - def day_name(self, locale: str | None = ...) -> str: ... - def month_name(self, locale: str | None = ...) -> str: ... - @property - def day_of_week(self) -> int: ... - @property - def dayofweek(self) -> int: ... - @property - def day_of_month(self) -> int: ... - @property - def day_of_year(self) -> int: ... - @property - def dayofyear(self) -> int: ... - @property - def quarter(self) -> int: ... - @property - def week(self) -> int: ... - def to_numpy( - self, dtype: np.dtype | None = ..., copy: bool = ... - ) -> np.datetime64: ... - @property - def _date_repr(self) -> str: ... - @property - def days_in_month(self) -> int: ... - @property - def daysinmonth(self) -> int: ... diff --git a/typings/pandas/_testing.pyi b/typings/pandas/_testing.pyi deleted file mode 100644 index 70427a6..0000000 --- a/typings/pandas/_testing.pyi +++ /dev/null @@ -1,205 +0,0 @@ -from pandas.core.frame import DataFrame -from pandas.core.indexes.base import Index -from pandas.core.series import Series -from pandas._typing import ( - FilePathOrBuffer as FilePathOrBuffer, - FrameOrSeries as FrameOrSeries, -) -from typing import Any, List, Optional, Union - -lzma = ... -N: int = ... -K: int = ... - -def set_testing_mode() -> None: ... -def reset_testing_mode() -> None: ... -def reset_display_options() -> None: ... -def round_trip_pickle(obj, path: Optional[FilePathOrBuffer] = ...) -> FrameOrSeries: ... -def round_trip_pathlib(writer, reader, path: Optional[str] = ...): ... -def round_trip_localpath(writer, reader, path: Optional[str] = ...): ... -def decompress_file(path, compression) -> None: ... -def write_to_compressed(compression, path, data, dest: str = ...) -> None: ... -def assert_almost_equal( - left, - right, - check_dtype: Union[bool, str] = ..., - check_less_precise: Union[bool, int] = ..., - **kwargs -): ... -def assert_dict_equal(left, right, compare_keys: bool = ...): ... -def randbool(size=..., p: float = ...): ... - -RANDS_CHARS = ... -RANDU_CHARS = ... - -def rands_array(nchars, size, dtype: str = ...): ... -def randu_array(nchars, size, dtype: str = ...): ... -def rands(nchars): ... -def randu(nchars): ... -def close(fignum=...) -> None: ... -def ensure_clean(filename=..., return_filelike: bool = ...) -> None: ... -def ensure_clean_dir() -> None: ... -def ensure_safe_environment_variables() -> None: ... -def equalContents(arr1, arr2) -> bool: ... -def assert_index_equal(left: Index[Any], right: Index[Any]) -> None: ... -def assert_class_equal(left, right, exact: Union[bool, str] = ..., obj=...): ... -def assert_attr_equal(attr, left, right, obj: str = ...): ... -def assert_is_valid_plot_return_object(objs) -> None: ... -def isiterable(obj) -> bool: ... -def assert_is_sorted(seq) -> None: ... -def assert_categorical_equal( - left, - right, - check_dtype: bool = ..., - check_category_order: bool = ..., - obj: str = ..., -) -> None: ... -def assert_interval_array_equal( - left, right, exact: str = ..., obj: str = ... -) -> None: ... -def assert_period_array_equal(left, right, obj: str = ...) -> None: ... -def assert_datetime_array_equal(left, right, obj: str = ...) -> None: ... -def assert_timedelta_array_equal(left, right, obj: str = ...) -> None: ... -def raise_assert_detail(obj, message, left, right, diff=...) -> None: ... -def assert_numpy_array_equal( - left, - right, - strict_nan: bool = ..., - check_dtype: bool = ..., - err_msg=..., - check_same=..., - obj: str = ..., -): ... -def assert_extension_array_equal( - left, - right, - check_dtype: bool = ..., - check_less_precise: bool = ..., - check_exact: bool = ..., -) -> None: ... -def assert_series_equal( - left: Series, - right: Series, - check_dtype: bool = ..., - check_index_type: bool | str = ..., - check_series_type: bool = ..., - check_less_precise: bool | int = ..., - check_names: bool = ..., - check_exact: bool = ..., - check_datetimelike_compat: bool = ..., - check_categorical: bool = ..., - check_category_order: bool = ..., - check_freq: bool = ..., - check_flags: bool = ..., - rtol: float = ..., - atol: float = ..., - obj: str = ..., - *, - check_index: bool = ... -) -> None: ... -def assert_frame_equal( - left: DataFrame, right: DataFrame, check_like: Optional[bool] = ... -) -> None: ... -def assert_equal(left, right, **kwargs) -> None: ... -def box_expected(expected, box_cls, transpose: bool = ...): ... -def to_array(obj): ... -def assert_sp_array_equal( - left, - right, - check_dtype: bool = ..., - check_kind: bool = ..., - check_fill_value: bool = ..., - consolidate_block_indices: bool = ..., -) -> None: ... -def assert_contains_all(iterable, dic) -> None: ... -def assert_copy(iter1, iter2, **eql_kwargs) -> None: ... -def getCols(k): ... -def makeStringIndex(k: int = ..., name=...): ... -def makeUnicodeIndex(k: int = ..., name=...): ... -def makeCategoricalIndex(k: int = ..., n: int = ..., name=..., **kwargs): ... -def makeIntervalIndex(k: int = ..., name=..., **kwargs): ... -def makeBoolIndex(k: int = ..., name=...): ... -def makeIntIndex(k: int = ..., name=...): ... -def makeUIntIndex(k: int = ..., name=...): ... -def makeRangeIndex(k: int = ..., name=..., **kwargs): ... -def makeFloatIndex(k: int = ..., name=...): ... -def makeDateIndex(k: int = ..., freq: str = ..., name=..., **kwargs): ... -def makeTimedeltaIndex(k: int = ..., freq: str = ..., name=..., **kwargs): ... -def makePeriodIndex(k: int = ..., name=..., **kwargs): ... -def makeMultiIndex(k: int = ..., names=..., **kwargs): ... -def all_index_generator(k: int = ...) -> None: ... -def index_subclass_makers_generator() -> None: ... -def all_timeseries_index_generator(k: int = ...) -> None: ... -def makeFloatSeries(name=...): ... -def makeStringSeries(name=...): ... -def makeObjectSeries(name=...): ... -def getSeriesData(): ... -def makeTimeSeries(nper=..., freq: str = ..., name=...): ... -def makePeriodSeries(nper=..., name=...): ... -def getTimeSeriesData(nper=..., freq: str = ...): ... -def getPeriodData(nper=...): ... -def makeTimeDataFrame(nper=..., freq: str = ...): ... -def makeDataFrame(): ... -def getMixedTypeDict(): ... -def makeMixedDataFrame(): ... -def makePeriodFrame(nper=...): ... -def makeCustomIndex( - nentries, nlevels, prefix: str = ..., names: bool = ..., ndupe_l=..., idx_type=... -): ... -def makeCustomDataframe( - nrows, - ncols, - c_idx_names: bool = ..., - r_idx_names: bool = ..., - c_idx_nlevels: int = ..., - r_idx_nlevels: int = ..., - data_gen_f=..., - c_ndupe_l=..., - r_ndupe_l=..., - dtype=..., - c_idx_type=..., - r_idx_type=..., -): ... -def makeMissingCustomDataframe( - nrows, - ncols, - density: float = ..., - random_state=..., - c_idx_names: bool = ..., - r_idx_names: bool = ..., - c_idx_nlevels: int = ..., - r_idx_nlevels: int = ..., - data_gen_f=..., - c_ndupe_l=..., - r_ndupe_l=..., - dtype=..., - c_idx_type=..., - r_idx_type=..., -): ... -def makeMissingDataframe(density: float = ..., random_state=...): ... -def optional_args(decorator): ... -def can_connect(url, error_classes=...): ... -def network( - t, - url: str = ..., - raise_on_error=..., - check_before_test: bool = ..., - error_classes=..., - skip_errnos=..., - _skip_on_messages=..., -): ... - -with_connectivity_check = network - -def assert_produces_warning( - expected_warning=..., - filter_level: str = ..., - clear=..., - check_stacklevel: bool = ..., - raise_on_extra_warnings: bool = ..., -) -> None: ... -def with_csv_dialect(name, **kwargs) -> None: ... -def use_numexpr(use, min_elements=...) -> None: ... -def test_parallel(num_threads: int = ..., kwargs_list=...): ... -def set_timezone(tz: str): ... -def convert_rows_list_to_csv_str(rows_list: List[str]): ... diff --git a/typings/pandas/_typing.pyi b/typings/pandas/_typing.pyi deleted file mode 100644 index e19341b..0000000 --- a/typings/pandas/_typing.pyi +++ /dev/null @@ -1,159 +0,0 @@ -import datetime -from io import BufferedIOBase, RawIOBase, TextIOBase, TextIOWrapper -from mmap import mmap -import numpy as np -from numpy import typing as npt -import sys -from os import PathLike -from pathlib import Path -from typing import ( - Any, - AnyStr, - Callable, - Collection, - Dict, - Hashable, - IO, - List, - Mapping, - NewType, - Optional, - Protocol, - Sequence, - Tuple, - Type, - TypeVar, - Union, -) - -from pandas.core.generic import NDFrame -from pandas._libs.tslibs import Period, Timedelta as Timedelta, Timestamp as Timestamp -from pandas.core.arrays import ExtensionArray as ExtensionArray -from pandas.core.series import Series as Series -from pandas.core.frame import DataFrame as DataFrame -from pandas.core.indexes.base import Index as Index -from pandas.core.dtypes.dtypes import ExtensionDtype - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -ArrayLike = Union[ExtensionArray, np.ndarray] -AnyArrayLike = Union[Index, Series] -PythonScalar = Union[str, int, float, bool, complex] -DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", Period, Timestamp, Timedelta) -PandasScalar = Union[bytes, datetime.date, datetime.datetime, datetime.timedelta] -# Scalar = Union[PythonScalar, PandasScalar] - -# dtypes -NpDtype = Union[ - str, np.dtype[np.generic], Type[Union[str, float, int, complex, bool, object]] -] -Dtype = Union[ExtensionDtype, NpDtype] -AstypeArg = Union[ExtensionDtype, npt.DTypeLike] -# DtypeArg specifies all allowable dtypes in a functions its dtype argument -DtypeArg = Union[Dtype, Dict[Any, Dtype]] -DtypeObj = Union[np.dtype[np.generic], "ExtensionDtype"] - -# filenames and file-like-objects -AnyStr_cov = TypeVar("AnyStr_cov", str, bytes, covariant=True) -AnyStr_con = TypeVar("AnyStr_con", str, bytes, contravariant=True) - -class BaseBuffer(Protocol): ... -class ReadBuffer(BaseBuffer, Protocol[AnyStr_cov]): ... -class WriteBuffer(BaseBuffer, Protocol[AnyStr_cov]): ... - -FilePath = Union[str, PathLike[str]] - -Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap] -FileOrBuffer = Union[str, Buffer[AnyStr]] -FilePathOrBuffer = Union["PathLike[str]", FileOrBuffer[AnyStr]] -FilePathOrBytesBuffer = Union[PathLike[str], WriteBuffer[bytes]] - -FrameOrSeries = TypeVar("FrameOrSeries", bound=NDFrame) -FrameOrSeriesUnion = Union[DataFrame, Series] -Axis = Union[str, int] -IndexLevel = Union[Hashable, Sequence[Hashable]] -Label = Optional[Hashable] -Level = Union[Hashable, int] -Ordered = Optional[bool] -JSONSerializable = Union[PythonScalar, List, Dict] -Axes = Collection -Renamer = Union[Mapping[Any, Label], Callable[[Any], Label]] -T = TypeVar("T") -FuncType = Callable[..., Any] -F = TypeVar("F", bound=FuncType) - -AggFuncTypeBase = Union[Callable, str] -AggFuncTypeDict = Dict[Hashable, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] -AggFuncType = Union[ - AggFuncTypeBase, - List[AggFuncTypeBase], - AggFuncTypeDict, -] - -num = Union[int, float] -SeriesAxisType = Literal["index", 0] # Restricted subset of _AxisType for series -AxisType = Literal["columns", "index", 0, 1] -DtypeNp = TypeVar("DtypeNp", bound=np.dtype[np.generic]) -KeysArgType = Any -ListLike = TypeVar("ListLike", Sequence, np.ndarray, "Series") -StrLike = Union[str, np.str_] -Scalar = Union[ - str, - bytes, - datetime.date, - datetime.datetime, - datetime.timedelta, - bool, - int, - float, - complex, - Timestamp, - Timedelta, -] -# Refine the next 3 in 3.9 to use the specialized type. -np_ndarray_int64 = npt.NDArray[np.int64] -np_ndarray_bool = npt.NDArray[np.bool_] -np_ndarray_str = npt.NDArray[np.str_] -IndexType = Union[slice, np_ndarray_int64, Index, List[int], Series[int]] -MaskType = Union[Series[bool], np_ndarray_bool, List[bool]] -# Scratch types for generics -S1 = TypeVar( - "S1", - str, - bytes, - datetime.date, - datetime.datetime, - datetime.timedelta, - bool, - int, - float, - complex, - Timestamp, - Timedelta, - np.datetime64, -) -T1 = TypeVar( - "T1", str, int, np.int64, np.uint64, np.float64, float, np.dtype[np.generic] -) -T2 = TypeVar("T2", str, int) - -# Interval closed type - -IntervalClosedType = Literal["left", "right", "both", "neither"] - -DateTimeErrorChoices = Literal["ignore", "raise", "coerce"] - -# Shared by functions such as drop and astype -IgnoreRaise = Literal["ignore", "raise"] - -# for arbitrary kwargs passed during reading/writing files -StorageOptions = Optional[Dict[str, Any]] - -# compression keywords and compression -CompressionDict = Dict[str, Any] -CompressionOptions = Optional[ - Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd"], CompressionDict] -] diff --git a/typings/pandas/compat/pickle_compat.pyi b/typings/pandas/compat/pickle_compat.pyi deleted file mode 100644 index c96ec9b..0000000 --- a/typings/pandas/compat/pickle_compat.pyi +++ /dev/null @@ -1,8 +0,0 @@ -import pickle as pkl -from pandas import DataFrame as DataFrame, Index as Index, Series as Series -from typing import Optional - -def load_reduce(self) -> None: ... -def load_newobj(self) -> None: ... -def load_newobj_ex(self) -> None: ... -def load(fh, encoding: Optional[str] = ..., is_verbose: bool = ...): ... diff --git a/typings/pandas/core/__init__.pyi b/typings/pandas/core/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/accessor.pyi b/typings/pandas/core/accessor.pyi deleted file mode 100644 index d2b7d78..0000000 --- a/typings/pandas/core/accessor.pyi +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Any - -class DirNamesMixin: - def __dir__(self): ... - -class PandasDelegate: ... - -def delegate_names( - delegate: Any, accessors: Any, typ: str, overwrite: bool = ... -) -> Any: ... - -class CachedAccessor: - def __init__(self, name: str, accessor: Any) -> None: ... - def __get__(self, obj: Any, cls: Any): ... - -def register_dataframe_accessor(name: Any): ... -def register_series_accessor(name: Any): ... -def register_index_accessor(name: Any): ... diff --git a/typings/pandas/core/algorithms.pyi b/typings/pandas/core/algorithms.pyi deleted file mode 100644 index 59d6b9f..0000000 --- a/typings/pandas/core/algorithms.pyi +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import annotations -import numpy as np -from pandas.core.dtypes.generic import ABCIndex as ABCIndex -from pandas.core.indexes.base import Index -from typing import Any, Tuple, Union - -def unique(values): ... - -unique1d = unique - -def isin(comps, values) -> np.ndarray: ... -def factorize( - values: Any, - sort: bool = ..., - na_sentinel: int = ..., - size_hint: Union[int, None] = None, -) -> Tuple[np.ndarray, Union[np.ndarray, Index]]: ... -def value_counts( - values, - sort: bool = ..., - ascending: bool = ..., - normalize: bool = ..., - bins=..., - dropna: bool = ..., -) -> Series: ... -def duplicated(values, keep=...) -> np.ndarray: ... -def mode(values, dropna: bool = ...) -> Series: ... -def rank( - values, - axis: int = ..., - method: str = ..., - na_option: str = ..., - ascending: bool = ..., - pct: bool = ..., -): ... -def checked_add_with_arr(arr, b, arr_mask=..., b_mask=...): ... -def quantile(x, q, interpolation_method: str = ...): ... - -class SelectN: - obj = ... - n = ... - keep = ... - def __init__(self, obj, n: int, keep: str) -> None: ... - def nlargest(self): ... - def nsmallest(self): ... - @staticmethod - def is_valid_dtype_n_method(dtype) -> bool: ... - -class SelectNSeries(SelectN): - def compute(self, method): ... - -class SelectNFrame(SelectN): - columns = ... - def __init__(self, obj, n: int, keep: str, columns) -> None: ... - def compute(self, method): ... - -def take(arr, indices, axis: int = ..., allow_fill: bool = ..., fill_value=...): ... -def take_nd( - arr, indexer, axis: int = ..., out=..., fill_value=..., allow_fill: bool = ... -): ... - -take_1d = take_nd - -def take_2d_multi(arr, indexer, fill_value=...): ... -def searchsorted(arr, value, side: str = ..., sorter=...): ... -def diff(arr, n: int, axis: int = ..., stacklevel=...): ... -def safe_sort( - values, - codes=..., - na_sentinel: int = ..., - assume_unique: bool = ..., - verify: bool = ..., -) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: ... - -from pandas import Series as Series diff --git a/typings/pandas/core/api.pyi b/typings/pandas/core/api.pyi deleted file mode 100644 index 6fc87ee..0000000 --- a/typings/pandas/core/api.pyi +++ /dev/null @@ -1,67 +0,0 @@ -from pandas._libs import NaT as NaT, Period as Period, Timedelta as Timedelta -from pandas._libs.tslibs import Timestamp as Timestamp -from pandas._libs.missing import NA as NA -from pandas.core.arrays.boolean import BooleanDtype as BooleanDtype -from pandas.core.arrays.integer import ( - Int16Dtype as Int16Dtype, - Int32Dtype as Int32Dtype, - Int64Dtype as Int64Dtype, - Int8Dtype as Int8Dtype, - UInt16Dtype as UInt16Dtype, - UInt32Dtype as UInt32Dtype, - UInt64Dtype as UInt64Dtype, - UInt8Dtype as UInt8Dtype, -) -from pandas.core.arrays.string_ import StringDtype as StringDtype -from pandas.core.construction import array as array -from pandas.core.dtypes.dtypes import ( - CategoricalDtype as CategoricalDtype, - DatetimeTZDtype as DatetimeTZDtype, - IntervalDtype as IntervalDtype, - PeriodDtype as PeriodDtype, -) -from pandas.core.dtypes.missing import ( - isna as isna, - isnull as isnull, - notna as notna, - notnull as notnull, -) -from pandas.core.indexes.datetimes import ( - bdate_range as bdate_range, - date_range as date_range, -) -from pandas.core.tools import to_datetime as to_datetime -from pandas.core.tools.numeric import to_numeric as to_numeric -from pandas.core.tools.timedeltas import to_timedelta as to_timedelta -from pandas.io.formats.format import set_eng_float_format as set_eng_float_format -from pandas.core.indexes.interval import ( - Interval as Interval, - interval_range as interval_range, -) -from pandas.core.indexes.period import period_range as period_range -from pandas.core.indexes.timedeltas import timedelta_range as timedelta_range -from pandas.core.arrays import Categorical as Categorical - -from pandas.core.groupby import Grouper as Grouper, NamedAgg as NamedAgg -from pandas.core.indexes.api import ( - CategoricalIndex as CategoricalIndex, - DatetimeIndex as DatetimeIndex, - Float64Index as Float64Index, - Index as Index, - Int64Index as Int64Index, - IntervalIndex as IntervalIndex, - MultiIndex as MultiIndex, - PeriodIndex as PeriodIndex, - RangeIndex as RangeIndex, - TimedeltaIndex as TimedeltaIndex, - UInt64Index as UInt64Index, -) -from pandas.core.indexing import IndexSlice as IndexSlice -from pandas.core.series import Series as Series -from pandas.core.frame import DataFrame as DataFrame -from pandas.tseries.offsets import DateOffset as DateOffset -from pandas.core.algorithms import ( - factorize as factorize, - unique as unique, - value_counts as value_counts, -) diff --git a/typings/pandas/core/arraylike.pyi b/typings/pandas/core/arraylike.pyi deleted file mode 100644 index 1cc740f..0000000 --- a/typings/pandas/core/arraylike.pyi +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Any, Protocol, Tuple -from pandas._typing import FrameOrSeries, DataFrame - -class OpsMixinProtocol(Protocol): ... - -class OpsMixin: - def __eq__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... # type: ignore - def __ne__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... # type: ignore - def __lt__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __le__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __gt__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __ge__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - # ------------------------------------------------------------- - # Logical Methods - def __and__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rand__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __or__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __ror__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __xor__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rxor__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - # ------------------------------------------------------------- - # Arithmetic Methods - def __add__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __radd__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __sub__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rsub__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __mul__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rmul__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __truediv__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rtruediv__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __floordiv__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rfloordiv__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __mod__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rmod__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __divmod__( - self: OpsMixinProtocol, other: DataFrame - ) -> Tuple[DataFrame, DataFrame]: ... - def __rdivmod__( - self: OpsMixinProtocol, other: DataFrame - ) -> Tuple[DataFrame, DataFrame]: ... - def __pow__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... - def __rpow__(self: OpsMixinProtocol, other: Any) -> DataFrame: ... diff --git a/typings/pandas/core/arrays/__init__.pyi b/typings/pandas/core/arrays/__init__.pyi deleted file mode 100644 index fe6f01f..0000000 --- a/typings/pandas/core/arrays/__init__.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from .base import ( - ExtensionArray as ExtensionArray, - ExtensionOpsMixin as ExtensionOpsMixin, - ExtensionScalarOpsMixin as ExtensionScalarOpsMixin, -) -from .boolean import BooleanArray as BooleanArray -from .categorical import Categorical as Categorical -from .datetimes import DatetimeArray as DatetimeArray -from .integer import IntegerArray as IntegerArray, integer_array as integer_array -from .interval import IntervalArray as IntervalArray -from .numpy_ import PandasArray as PandasArray, PandasDtype as PandasDtype -from .period import PeriodArray as PeriodArray, period_array as period_array -from .sparse import SparseArray as SparseArray -from .string_ import StringArray as StringArray -from .timedeltas import TimedeltaArray as TimedeltaArray diff --git a/typings/pandas/core/arrays/_arrow_utils.pyi b/typings/pandas/core/arrays/_arrow_utils.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/arrays/_ranges.pyi b/typings/pandas/core/arrays/_ranges.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/arrays/base.pyi b/typings/pandas/core/arrays/base.pyi deleted file mode 100644 index 2ed2247..0000000 --- a/typings/pandas/core/arrays/base.pyi +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np -from pandas._typing import ArrayLike as ArrayLike -from pandas.core.dtypes.dtypes import ExtensionDtype as ExtensionDtype -from pandas.core.dtypes.generic import ABCExtensionArray as ABCExtensionArray -from typing import Sequence, Tuple, Union - -def try_cast_to_ea(cls_or_instance, obj, dtype=...): ... - -class ExtensionArray: - def __getitem__(self, item) -> None: ... - def __setitem__(self, key: Union[int, slice, np.ndarray], value) -> None: ... - def __len__(self) -> int: ... - def __iter__(self): ... - def to_numpy(self, dtype=..., copy: bool = ..., na_value=...): ... - @property - def dtype(self) -> ExtensionDtype: ... - @property - def shape(self) -> Tuple[int, ...]: ... - @property - def ndim(self) -> int: ... - @property - def nbytes(self) -> int: ... - def astype(self, dtype, copy: bool = ...): ... - def isna(self) -> ArrayLike: ... - def argsort( - self, ascending: bool = ..., kind: str = ..., *args, **kwargs - ) -> np.ndarray: ... - def fillna(self, value=..., method=..., limit=...): ... - def dropna(self): ... - def shift( - self, periods: int = ..., fill_value: object = ... - ) -> ABCExtensionArray: ... - def unique(self): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def factorize( - self, na_sentinel: int = ... - ) -> Tuple[np.ndarray, ABCExtensionArray]: ... - def repeat(self, repeats, axis=...): ... - def take( - self, indices: Sequence[int], allow_fill: bool = ..., fill_value=... - ) -> ABCExtensionArray: ... - def copy(self) -> ABCExtensionArray: ... - def view(self, dtype=...) -> Union[ABCExtensionArray, np.ndarray]: ... - def ravel(self, order=...) -> ABCExtensionArray: ... - -class ExtensionOpsMixin: ... -class ExtensionScalarOpsMixin(ExtensionOpsMixin): ... diff --git a/typings/pandas/core/arrays/boolean.pyi b/typings/pandas/core/arrays/boolean.pyi deleted file mode 100644 index f95dc27..0000000 --- a/typings/pandas/core/arrays/boolean.pyi +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations -import numpy as np -from .masked import BaseMaskedArray as BaseMaskedArray -from pandas._typing import Scalar as Scalar -from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype -from typing import Type - -class BooleanDtype(ExtensionDtype): - name: str = ... - @property - def na_value(self) -> Scalar: ... - @property - def type(self) -> Type: ... - @property - def kind(self) -> str: ... - @classmethod - def construct_array_type(cls) -> Type[BooleanArray]: ... - def __from_arrow__(self, array): ... - -def coerce_to_array(values, mask=..., copy: bool = ...): ... - -class BooleanArray(BaseMaskedArray): - def __init__( - self, values: np.ndarray, mask: np.ndarray, copy: bool = ... - ) -> None: ... - @property - def dtype(self): ... - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - def __setitem__(self, key, value) -> None: ... - def astype(self, dtype, copy: bool = ...): ... - def any(self, skipna: bool = ..., **kwargs): ... - def all(self, skipna: bool = ..., **kwargs): ... diff --git a/typings/pandas/core/arrays/categorical.pyi b/typings/pandas/core/arrays/categorical.pyi deleted file mode 100644 index c11c95c..0000000 --- a/typings/pandas/core/arrays/categorical.pyi +++ /dev/null @@ -1,201 +0,0 @@ -import numpy as np -from pandas._typing import ( - ArrayLike as ArrayLike, - Dtype as Dtype, - Ordered as Ordered, - Scalar as Scalar, - np_ndarray_bool, -) -from pandas.core.accessor import PandasDelegate as PandasDelegate -from pandas.core.arrays.base import ExtensionArray as ExtensionArray -from pandas.core.base import NoNewAttributesMixin as NoNewAttributesMixin -from pandas.core.base import PandasObject as PandasObject -from pandas.core.dtypes.dtypes import CategoricalDtype as CategoricalDtype -from pandas.core.indexes.base import Index -from typing import List, Literal, Optional, Sequence, Union, overload - -def contains(cat, key, container): ... - -class Categorical(ExtensionArray, PandasObject): - __array_priority__: int = ... - def __init__( - self, - values: Sequence, - categories=..., - ordered: Optional[bool] = ..., - dtype: Optional[CategoricalDtype] = ..., - fastpath: bool = ..., - ) -> None: ... - @property - def categories(self): ... - @categories.setter - def categories(self, categories) -> None: ... - @property - def ordered(self) -> Ordered: ... - @property - def dtype(self) -> CategoricalDtype: ... - def astype(self, dtype: Dtype, copy: bool = ...) -> ArrayLike: ... - def size(self) -> int: ... - def itemsize(self) -> int: ... - def tolist(self) -> List[Scalar]: ... - to_list = ... - @classmethod - def from_codes( - cls, - codes: Sequence[int], - categories: Optional[Index] = ..., - ordered: Optional[bool] = ..., - dtype: Optional[CategoricalDtype] = ..., - fastpath: bool = ..., - ) -> Categorical: ... - @property - def codes(self) -> List[int]: ... - @overload - def set_ordered(self, value, inplace: Literal[True]) -> None: ... - @overload - def set_ordered(self, value, inplace: Literal[False]) -> Categorical: ... - @overload - def set_ordered(self, value, inplace: bool) -> Union[None, Categorical]: ... - @overload - def as_ordered(self, inplace: Literal[True]) -> None: ... - @overload - def as_ordered(self, inplace: Literal[False]) -> Categorical: ... - @overload - def as_ordered(self, inplace: bool) -> Union[None, Categorical]: ... - @overload - def as_unordered(self, inplace: Literal[True]) -> None: ... - @overload - def as_unordered(self, inplace: Literal[False] = ...) -> Categorical: ... - @overload - def set_categories( - self, new_categories, ordered=..., rename: bool = ..., *, inplace: Literal[True] - ) -> None: ... - @overload - def set_categories( - self, - new_categories, - ordered=..., - rename: bool = ..., - inplace: Literal[False] = ..., - ) -> Categorical: ... - @overload - def set_categories( - self, new_categories, ordered=..., rename: bool = ..., inplace: bool = ... - ) -> Union[None, Categorical]: ... - @overload - def rename_categories(self, new_categories, inplace: Literal[True]) -> None: ... - @overload - def rename_categories( - self, new_categories, inplace: Literal[False] = ... - ) -> Categorical: ... - @overload - def rename_categories( - self, new_categories, inplace: bool = ... - ) -> Union[None, Categorical]: ... - @overload - def reorder_categories( - self, new_categories, ordered=..., *, inplace: Literal[True] - ) -> None: ... - @overload - def reorder_categories( - self, new_categories, ordered=..., inplace: Literal[False] = ... - ) -> Categorical: ... - @overload - def reorder_categories( - self, new_categories, ordered=..., inplace: bool = ... - ) -> Union[None, Categorical]: ... - @overload - def add_categories(self, new_categories, inplace: Literal[True]) -> None: ... - @overload - def add_categories( - self, new_categories, inplace: Literal[False] = ... - ) -> Categorical: ... - @overload - def add_categories( - self, new_categories, inplace: bool = ... - ) -> Union[None, Categorical]: ... - @overload - def remove_categories(self, removals, inplace: Literal[True]) -> None: ... - @overload - def remove_categories( - self, removals, inplace: Literal[False] = ... - ) -> Categorical: ... - @overload - def remove_categories( - self, removals, inplace: bool = ... - ) -> Union[None, Categorical]: ... - @overload - def remove_unused_categories(self, inplace: Literal[True]) -> None: ... - @overload - def remove_unused_categories( - self, inplace: Literal[False] = ... - ) -> Categorical: ... - @overload - def remove_unused_categories( - self, inplace: bool = ... - ) -> Union[None, Categorical]: ... - def map(self, mapper): ... - def __eq__(self, other) -> bool: ... - def __ne__(self, other) -> bool: ... - def __lt__(self, other) -> bool: ... - def __gt__(self, other) -> bool: ... - def __le__(self, other) -> bool: ... - def __ge__(self, other) -> bool: ... - @property - def shape(self): ... - def shift(self, periods, fill_value=...): ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - @property - def T(self): ... - @property - def nbytes(self) -> int: ... - def memory_usage(self, deep: bool = ...): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def isna(self) -> np_ndarray_bool: ... - def isnull(self) -> np_ndarray_bool: ... - def notna(self) -> np_ndarray_bool: ... - def notnull(self) -> np_ndarray_bool: ... - def put(self, *args, **kwargs) -> None: ... - def dropna(self): ... - def value_counts(self, dropna: bool = ...): ... - def check_for_ordered(self, op) -> None: ... - def argsort(self, ascending: bool = ..., kind: str = ..., *args, **kwargs): ... - def sort_values( - self, inplace: bool = ..., ascending: bool = ..., na_position: str = ... - ): ... - def view(self, dtype=...): ... - def to_dense(self): ... - def fillna(self, value=..., method=..., limit=...): ... - def take(self, indexer, allow_fill: bool = ..., fill_value=...): ... - def take_nd(self, indexer, allow_fill: bool = ..., fill_value=...): ... - def __len__(self) -> int: ... - def __iter__(self): ... - def __contains__(self, key) -> bool: ... - def __getitem__(self, key): ... - def __setitem__(self, key, value) -> None: ... - def min(self, skipna: bool = ...): ... - def max(self, skipna: bool = ...): ... - def mode(self, dropna: bool = ...): ... - def unique(self): ... - def equals(self, other): ... - def is_dtype_equal(self, other): ... - def describe(self): ... - def repeat(self, repeats, axis=...): ... - def isin(self, values): ... - @overload - def replace(self, to_replace, value, inplace: Literal[True]) -> None: ... - @overload - def replace(self, to_replace, value, inplace: Literal[False]) -> Categorical: ... - @overload - def replace( - self, to_replace, value, inplace: bool = ... - ) -> Union[None, Categorical]: ... - -class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): - def __init__(self, data) -> None: ... - @property - def codes(self) -> Sequence[int]: ... - -def factorize_from_iterable(values): ... -def factorize_from_iterables(iterables): ... diff --git a/typings/pandas/core/arrays/datetimelike.pyi b/typings/pandas/core/arrays/datetimelike.pyi deleted file mode 100644 index 739d8a9..0000000 --- a/typings/pandas/core/arrays/datetimelike.pyi +++ /dev/null @@ -1,84 +0,0 @@ -import numpy as np -from pandas._libs import NaT as NaT, NaTType as NaTType, Timestamp as Timestamp -from pandas.core.arrays.base import ( - ExtensionArray as ExtensionArray, - ExtensionOpsMixin as ExtensionOpsMixin, -) -from typing import Sequence, Union - -class AttributesMixin: ... - -class DatelikeOps: - def strftime(self, date_format): ... - -class TimelikeOps: - def round(self, freq, ambiguous: str = ..., nonexistent: str = ...): ... - def floor(self, freq, ambiguous: str = ..., nonexistent: str = ...): ... - def ceil(self, freq, ambiguous: str = ..., nonexistent: str = ...): ... - -class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray): - @property - def ndim(self) -> int: ... - @property - def shape(self): ... - def reshape(self, *args, **kwargs): ... - def ravel(self, *args, **kwargs): ... - def __iter__(self): ... - @property - def asi8(self) -> np.ndarray: ... - @property - def nbytes(self): ... - def __array__(self, dtype=...) -> np.ndarray: ... - @property - def size(self) -> int: ... - def __len__(self) -> int: ... - def __getitem__(self, key): ... - def __setitem__(self, key: Union[int, Sequence[int], Sequence[bool], slice], value) -> None: ... # type: ignore[override] - def astype(self, dtype, copy: bool = ...): ... - def view(self, dtype=...): ... - def unique(self): ... - def take(self, indices, allow_fill: bool = ..., fill_value=...): ... - def copy(self): ... - def shift(self, periods: int = ..., fill_value=..., axis: int = ...): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def repeat(self, repeats, *args, **kwargs): ... - def value_counts(self, dropna: bool = ...): ... - def map(self, mapper): ... - def isna(self): ... - def fillna(self, value=..., method=..., limit=...): ... - @property - def freq(self): ... - @freq.setter - def freq(self, value) -> None: ... - @property - def freqstr(self): ... - @property - def inferred_freq(self): ... - @property - def resolution(self): ... - __pow__ = ... - __rpow__ = ... - __mul__ = ... - __rmul__ = ... - __truediv__ = ... - __rtruediv__ = ... - __floordiv__ = ... - __rfloordiv__ = ... - __mod__ = ... - __rmod__ = ... - __divmod__ = ... - __rdivmod__ = ... - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... - def __iadd__(self, other): ... - def __isub__(self, other): ... - def min(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def max(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def mean(self, skipna: bool = ...): ... - -def validate_periods(periods): ... -def validate_endpoints(closed): ... -def validate_inferred_freq(freq, inferred_freq, freq_infer): ... -def maybe_infer_freq(freq): ... diff --git a/typings/pandas/core/arrays/datetimes.pyi b/typings/pandas/core/arrays/datetimes.pyi deleted file mode 100644 index 9671b74..0000000 --- a/typings/pandas/core/arrays/datetimes.pyi +++ /dev/null @@ -1,84 +0,0 @@ -import numpy as np -from pandas.core.arrays import datetimelike as dtl -from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtype -from typing import Union - -def tz_to_dtype(tz): ... - -class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps): - __array_priority__: int = ... - def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ... - # ignore in dtype() is from the pandas source - @property - def dtype(self) -> Union[np.dtype, DatetimeTZDtype]: ... # type: ignore[override] - @property - def tz(self): ... - @tz.setter - def tz(self, value) -> None: ... - @property - def tzinfo(self): ... - @property - def is_normalized(self): ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __iter__(self): ... - def astype(self, dtype, copy: bool = ...): ... - def tz_convert(self, tz): ... - def tz_localize(self, tz, ambiguous: str = ..., nonexistent: str = ...): ... - def to_pydatetime(self): ... - def normalize(self): ... - def to_period(self, freq=...): ... - def to_perioddelta(self, freq): ... - def month_name(self, locale=...): ... - def day_name(self, locale=...): ... - @property - def time(self): ... - @property - def timetz(self): ... - @property - def date(self): ... - year = ... - month = ... - day = ... - hour = ... - minute = ... - second = ... - microsecond = ... - nanosecond = ... - weekofyear = ... - week = ... - dayofweek = ... - weekday = ... - dayofyear = ... - quarter = ... - days_in_month = ... - daysinmonth = ... - is_month_start = ... - is_month_end = ... - is_quarter_start = ... - is_quarter_end = ... - is_year_start = ... - is_year_end = ... - is_leap_year = ... - def to_julian_date(self): ... - -def sequence_to_dt64ns( - data, - dtype=..., - copy: bool = ..., - tz=..., - dayfirst: bool = ..., - yearfirst: bool = ..., - ambiguous: str = ..., -): ... -def objects_to_datetime64ns( - data, - dayfirst, - yearfirst, - utc: bool = ..., - errors: str = ..., - require_iso8601: bool = ..., - allow_object: bool = ..., -): ... -def maybe_convert_dtype(data, copy): ... -def maybe_infer_tz(tz, inferred_tz): ... -def validate_tz_from_dtype(dtype, tz): ... diff --git a/typings/pandas/core/arrays/integer.pyi b/typings/pandas/core/arrays/integer.pyi deleted file mode 100644 index afc1281..0000000 --- a/typings/pandas/core/arrays/integer.pyi +++ /dev/null @@ -1,37 +0,0 @@ -from .masked import BaseMaskedArray as BaseMaskedArray -from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype -from typing import Type - -class _IntegerDtype(ExtensionDtype): - name: str - base = ... - type: Type - na_value = ... - def is_signed_integer(self): ... - def is_unsigned_integer(self): ... - def numpy_dtype(self): ... - def kind(self): ... - def itemsize(self): ... - @classmethod - def construct_array_type(cls): ... - def __from_arrow__(self, array): ... - -def integer_array(values, dtype=..., copy: bool = ...): ... -def safe_cast(values, dtype, copy): ... -def coerce_to_array(values, dtype, mask=..., copy: bool = ...): ... - -class IntegerArray(BaseMaskedArray): - def dtype(self): ... - def __init__(self, values, mask, copy: bool = ...) -> None: ... - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - def __setitem__(self, key, value) -> None: ... - def astype(self, dtype, copy: bool = ...): ... - -class Int8Dtype: ... -class Int16Dtype: ... -class Int32Dtype: ... -class Int64Dtype: ... -class UInt8Dtype: ... -class UInt16Dtype: ... -class UInt32Dtype: ... -class UInt64Dtype: ... diff --git a/typings/pandas/core/arrays/interval.pyi b/typings/pandas/core/arrays/interval.pyi deleted file mode 100644 index 85c8028..0000000 --- a/typings/pandas/core/arrays/interval.pyi +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import annotations -import numpy as np -from pandas._libs.interval import ( - Interval as Interval, - IntervalMixin as IntervalMixin, -) -from pandas.core.arrays.base import ExtensionArray as ExtensionArray -from pandas.core.dtypes.generic import ABCExtensionArray as ABCExtensionArray -from pandas._typing import Axis, Scalar, Index as Index -from typing import Optional - -class IntervalArray(IntervalMixin, ExtensionArray): - ndim: int = ... - can_hold_na: bool = ... - def __new__( - cls, data, closed=..., dtype=..., copy: bool = ..., verify_integrity: bool = ... - ): ... - @classmethod - def from_breaks(cls, breaks, closed: str = ..., copy: bool = ..., dtype=...): ... - @classmethod - def from_arrays( - cls, left, right, closed: str = ..., copy: bool = ..., dtype=... - ): ... - @classmethod - def from_tuples(cls, data, closed: str = ..., copy: bool = ..., dtype=...): ... - def __iter__(self): ... - def __len__(self) -> int: ... - def __getitem__(self, value): ... - def __setitem__(self, key, value) -> None: ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def fillna(self, value=..., method=..., limit=...): ... - @property - def dtype(self): ... - def astype(self, dtype, copy: bool = ...): ... - def copy(self): ... - def isna(self): ... - @property - def nbytes(self) -> int: ... - @property - def size(self) -> int: ... - def shift( - self, periods: int = ..., fill_value: object = ... - ) -> ABCExtensionArray: ... - def take( - self, indices, allow_fill: bool = ..., fill_value=..., axis=..., **kwargs - ): ... - def value_counts(self, dropna: bool = ...): ... - @property - def left(self) -> Index: ... - @property - def right(self) -> Index: ... - @property - def closed(self) -> bool: ... - def set_closed(self, closed): ... - @property - def length(self) -> Index: ... - @property - def mid(self) -> Index: ... - @property - def is_non_overlapping_monotonic(self) -> bool: ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __arrow_array__(self, type=...): ... - def to_tuples(self, na_tuple: bool = ...): ... - def repeat(self, repeats, axis: Optional[Axis] = ...): ... - def contains(self, other): ... - def overlaps(self, other: Interval) -> bool: ... - -def maybe_convert_platform_interval(values): ... diff --git a/typings/pandas/core/arrays/masked.pyi b/typings/pandas/core/arrays/masked.pyi deleted file mode 100644 index 11ca483..0000000 --- a/typings/pandas/core/arrays/masked.pyi +++ /dev/null @@ -1,22 +0,0 @@ -import numpy as np -from pandas._typing import Scalar as Scalar -from pandas.core.arrays import ( - ExtensionArray as ExtensionArray, - ExtensionOpsMixin as ExtensionOpsMixin, -) - -class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin): - def __getitem__(self, item): ... - def __iter__(self): ... - def __len__(self) -> int: ... - def __invert__(self): ... - def to_numpy(self, dtype=..., copy=..., na_value: Scalar = ...): ... - __array_priority__: int = ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __arrow_array__(self, type=...): ... - def isna(self): ... - @property - def nbytes(self) -> int: ... - def take(self, indexer, allow_fill: bool = ..., fill_value=...): ... - def copy(self): ... - def value_counts(self, dropna: bool = ...): ... diff --git a/typings/pandas/core/arrays/numpy_.pyi b/typings/pandas/core/arrays/numpy_.pyi deleted file mode 100644 index 0917d8e..0000000 --- a/typings/pandas/core/arrays/numpy_.pyi +++ /dev/null @@ -1,116 +0,0 @@ -import numpy as np -from numpy.lib.mixins import NDArrayOperatorsMixin -from pandas.core.arrays.base import ( - ExtensionArray as ExtensionArray, - ExtensionOpsMixin as ExtensionOpsMixin, -) -from pandas.core.dtypes.dtypes import ExtensionDtype as ExtensionDtype -from typing import Union - -class PandasDtype(ExtensionDtype): - def __init__(self, dtype) -> None: ... - @property - def numpy_dtype(self): ... - @property - def name(self): ... - @property - def type(self): ... - @classmethod - def construct_from_string(cls, string): ... - @classmethod - def construct_array_type(cls): ... - @property - def kind(self): ... - @property - def itemsize(self): ... - -class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin): - __array_priority__: int = ... - def __init__( - self, values: Union[np.ndarray, PandasArray], copy: bool = ... - ) -> None: ... - @property - def dtype(self): ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - def __getitem__(self, item): ... - def __setitem__(self, key, value) -> None: ... - def __len__(self) -> int: ... - @property - def nbytes(self) -> int: ... - def isna(self): ... - def fillna(self, value=..., method=..., limit=...): ... - def take(self, indices, allow_fill: bool = ..., fill_value=...): ... - def copy(self): ... - def unique(self): ... - def any(self, axis=..., out=..., keepdims: bool = ..., skipna: bool = ...): ... - def all(self, axis=..., out=..., keepdims: bool = ..., skipna: bool = ...): ... - def min(self, axis=..., out=..., keepdims: bool = ..., skipna: bool = ...): ... - def max(self, axis=..., out=..., keepdims: bool = ..., skipna: bool = ...): ... - def sum( - self, - axis=..., - dtype=..., - out=..., - keepdims: bool = ..., - initial=..., - skipna: bool = ..., - min_count: int = ..., - ): ... - def prod( - self, - axis=..., - dtype=..., - out=..., - keepdims: bool = ..., - initial=..., - skipna: bool = ..., - min_count: int = ..., - ): ... - def mean( - self, axis=..., dtype=..., out=..., keepdims: bool = ..., skipna: bool = ... - ): ... - def median( - self, - axis=..., - out=..., - overwrite_input: bool = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def std( - self, - axis=..., - dtype=..., - out=..., - ddof: int = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def var( - self, - axis=..., - dtype=..., - out=..., - ddof: int = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def sem( - self, - axis=..., - dtype=..., - out=..., - ddof: int = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def kurt( - self, axis=..., dtype=..., out=..., keepdims: bool = ..., skipna: bool = ... - ): ... - def skew( - self, axis=..., dtype=..., out=..., keepdims: bool = ..., skipna: bool = ... - ): ... - def to_numpy(self, dtype=..., copy: bool = ..., na_value=...): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def __invert__(self): ... diff --git a/typings/pandas/core/arrays/period.pyi b/typings/pandas/core/arrays/period.pyi deleted file mode 100644 index 7b07eaa..0000000 --- a/typings/pandas/core/arrays/period.pyi +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np -from pandas._libs.tslibs import Timestamp -from pandas._libs.tslibs.period import Period as Period -from pandas.core.arrays import datetimelike as dtl -from pandas.tseries.offsets import Tick as Tick -from typing import Optional, Sequence, Union - -class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps): - __array_priority__: int = ... - def __init__(self, values, freq=..., dtype=..., copy: bool = ...) -> None: ... - def dtype(self): ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __arrow_array__(self, type=...): ... - year: int = ... - month: int = ... - day: int = ... - hour: int = ... - minute: int = ... - second: int = ... - weekofyear: int = ... - week: int = ... - dayofweek: int = ... - weekday: int = ... - dayofyear: int = ... - day_of_year = ... - quarter: int = ... - qyear: int = ... - days_in_month: int = ... - daysinmonth: int = ... - @property - def is_leap_year(self) -> bool: ... - @property - def start_time(self) -> Timestamp: ... - @property - def end_time(self) -> Timestamp: ... - def to_timestamp(self, freq: Optional[str] = ..., how: str = ...) -> Timestamp: ... - def asfreq(self, freq: Optional[str] = ..., how: str = ...) -> Period: ... - def astype(self, dtype, copy: bool = ...): ... - -def raise_on_incompatible(left, right): ... -def period_array( - data: Sequence[Optional[Period]], - freq: Optional[Union[str, Tick]] = ..., - copy: bool = ..., -) -> PeriodArray: ... -def validate_dtype_freq(dtype, freq): ... -def dt64arr_to_periodarr(data, freq, tz=...): ... diff --git a/typings/pandas/core/arrays/sparse/__init__.pyi b/typings/pandas/core/arrays/sparse/__init__.pyi deleted file mode 100644 index 7c05783..0000000 --- a/typings/pandas/core/arrays/sparse/__init__.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from .accessor import ( - SparseAccessor as SparseAccessor, - SparseFrameAccessor as SparseFrameAccessor, -) -from .array import ( - BlockIndex as BlockIndex, - IntIndex as IntIndex, - SparseArray as SparseArray, -) -from .dtype import SparseDtype as SparseDtype diff --git a/typings/pandas/core/arrays/sparse/accessor.pyi b/typings/pandas/core/arrays/sparse/accessor.pyi deleted file mode 100644 index b00bee4..0000000 --- a/typings/pandas/core/arrays/sparse/accessor.pyi +++ /dev/null @@ -1,27 +0,0 @@ -from pandas.compat._optional import ( - import_optional_dependency as import_optional_dependency, -) -from pandas.core.accessor import ( - PandasDelegate as PandasDelegate, - delegate_names as delegate_names, -) -from pandas.core.arrays.sparse.array import SparseArray as SparseArray -from pandas.core.arrays.sparse.dtype import SparseDtype as SparseDtype -from pandas.core.dtypes.cast import find_common_type as find_common_type - -class BaseAccessor: - def __init__(self, data=...) -> None: ... - -class SparseAccessor(BaseAccessor, PandasDelegate): - @classmethod - def from_coo(cls, A, dense_index: bool = ...): ... - def to_coo(self, row_levels=..., column_levels=..., sort_labels: bool = ...): ... - def to_dense(self): ... - -class SparseFrameAccessor(BaseAccessor, PandasDelegate): - @classmethod - def from_spmatrix(cls, data, index=..., columns=...): ... - def to_dense(self): ... - def to_coo(self): ... - @property - def density(self) -> float: ... diff --git a/typings/pandas/core/arrays/sparse/array.pyi b/typings/pandas/core/arrays/sparse/array.pyi deleted file mode 100644 index 2fb1e96..0000000 --- a/typings/pandas/core/arrays/sparse/array.pyi +++ /dev/null @@ -1,106 +0,0 @@ -import numpy as np -from pandas._libs.sparse import ( - BlockIndex as BlockIndex, - IntIndex as IntIndex, - SparseIndex as SparseIndex, -) -from pandas._libs.tslibs import NaT as NaT -from pandas.core.arrays import ( - ExtensionArray as ExtensionArray, - ExtensionOpsMixin as ExtensionOpsMixin, -) -from pandas.core.arrays.sparse.dtype import SparseDtype as SparseDtype -from pandas.core.base import PandasObject as PandasObject -from pandas.core.construction import sanitize_array as sanitize_array -from pandas.core.dtypes.cast import ( - astype_nansafe as astype_nansafe, - construct_1d_arraylike_from_scalar as construct_1d_arraylike_from_scalar, - find_common_type as find_common_type, - infer_dtype_from_scalar as infer_dtype_from_scalar, -) -from pandas.core.dtypes.common import ( - is_array_like as is_array_like, - is_bool_dtype as is_bool_dtype, - is_datetime64_any_dtype as is_datetime64_any_dtype, - is_dtype_equal as is_dtype_equal, - is_integer as is_integer, - is_object_dtype as is_object_dtype, - is_scalar as is_scalar, - is_string_dtype as is_string_dtype, - pandas_dtype as pandas_dtype, -) -from pandas.core.dtypes.generic import ( - ABCIndexClass as ABCIndexClass, - ABCSeries as ABCSeries, - ABCSparseArray as ABCSparseArray, -) -from pandas.core.dtypes.missing import ( - isna as isna, - na_value_for_dtype as na_value_for_dtype, - notna as notna, -) -from pandas.core.indexers import check_array_indexer as check_array_indexer -from pandas.core.missing import interpolate_2d as interpolate_2d -from pandas.core.ops.common import unpack_zerodim_and_defer as unpack_zerodim_and_defer -from pandas.errors import PerformanceWarning as PerformanceWarning - -class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin): - def __init__( - self, - data, - sparse_index=..., - index=..., - fill_value=..., - kind: str = ..., - dtype=..., - copy: bool = ..., - ) -> None: ... - @classmethod - def from_spmatrix(cls, data): ... - def __array__(self, dtype=..., copy=...) -> np.ndarray: ... - def __setitem__(self, key, value) -> None: ... - @property - def sp_index(self): ... - @property - def sp_values(self): ... - @property - def dtype(self): ... - @property - def fill_value(self): ... - @fill_value.setter - def fill_value(self, value) -> None: ... - @property - def kind(self) -> str: ... - def __len__(self) -> int: ... - @property - def nbytes(self) -> int: ... - @property - def density(self): ... - @property - def npoints(self) -> int: ... - def isna(self): ... - def fillna(self, value=..., method=..., limit=...): ... - def shift(self, periods: int = ..., fill_value=...): ... - def unique(self): ... - def factorize(self, na_sentinel: int = ...): ... - def value_counts(self, dropna: bool = ...): ... - def __getitem__(self, key): ... - def take(self, indices, allow_fill: bool = ..., fill_value=...): ... - def searchsorted(self, v, side: str = ..., sorter=...): ... - def copy(self): ... - def astype(self, dtype=..., copy: bool = ...): ... - def map(self, mapper): ... - def to_dense(self): ... - def nonzero(self): ... - def all(self, axis=..., *args, **kwargs): ... - def any(self, axis: int = ..., *args, **kwargs): ... - def sum(self, axis: int = ..., *args, **kwargs): ... - def cumsum(self, axis: int = ..., *args, **kwargs): ... - def mean(self, axis: int = ..., *args, **kwargs): ... - def transpose(self, *axes): ... - @property - def T(self): ... - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... - def __abs__(self): ... - -def make_sparse(arr, kind: str = ..., fill_value=..., dtype=..., copy: bool = ...): ... diff --git a/typings/pandas/core/arrays/sparse/dtype.pyi b/typings/pandas/core/arrays/sparse/dtype.pyi deleted file mode 100644 index a78a47f..0000000 --- a/typings/pandas/core/arrays/sparse/dtype.pyi +++ /dev/null @@ -1,44 +0,0 @@ -# merged types from pylance - -from pandas._typing import Dtype as Dtype, Scalar -from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype -from pandas.core.dtypes.cast import astype_nansafe as astype_nansafe -from pandas.core.dtypes.common import ( - is_bool_dtype as is_bool_dtype, - is_object_dtype as is_object_dtype, - is_scalar as is_scalar, - is_string_dtype as is_string_dtype, - pandas_dtype as pandas_dtype, -) -from pandas.core.dtypes.dtypes import ( - register_extension_dtype as register_extension_dtype, -) -from pandas.core.dtypes.missing import ( - isna as isna, - na_value_for_dtype as na_value_for_dtype, -) -from typing import Optional - -class SparseDtype(ExtensionDtype): - def __init__( - self, dtype: Dtype = ..., fill_value: Optional[Scalar] = ... - ) -> None: ... - def __hash__(self): ... - def __eq__(self, other) -> bool: ... - @property - def fill_value(self): ... - @property - def kind(self): ... - @property - def type(self): ... - @property - def subtype(self): ... - @property - def name(self): ... - @classmethod - def construct_array_type(cls): ... - @classmethod - def construct_from_string(cls, string): ... - @classmethod - def is_dtype(cls, dtype): ... - def update_dtype(self, dtype): ... diff --git a/typings/pandas/core/arrays/sparse/scipy_sparse.pyi b/typings/pandas/core/arrays/sparse/scipy_sparse.pyi deleted file mode 100644 index c6fc6b1..0000000 --- a/typings/pandas/core/arrays/sparse/scipy_sparse.pyi +++ /dev/null @@ -1,2 +0,0 @@ -from pandas.core.indexes.api import Index as Index, MultiIndex as MultiIndex -from pandas.core.series import Series as Series diff --git a/typings/pandas/core/arrays/string_.pyi b/typings/pandas/core/arrays/string_.pyi deleted file mode 100644 index 8144321..0000000 --- a/typings/pandas/core/arrays/string_.pyi +++ /dev/null @@ -1,20 +0,0 @@ -from pandas.core.arrays import PandasArray as PandasArray -from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype -from typing import Type - -class StringDtype(ExtensionDtype): - name: str = ... - na_value = ... - @property - def type(self) -> Type: ... - @classmethod - def construct_array_type(cls) -> Type[StringArray]: ... - def __from_arrow__(self, array): ... - -class StringArray(PandasArray): - def __init__(self, values, copy: bool = ...) -> None: ... - def __arrow_array__(self, type=...): ... - def __setitem__(self, key, value) -> None: ... - def fillna(self, value=..., method=..., limit=...): ... - def astype(self, dtype, copy: bool = ...): ... - def value_counts(self, dropna: bool = ...): ... diff --git a/typings/pandas/core/arrays/timedeltas.pyi b/typings/pandas/core/arrays/timedeltas.pyi deleted file mode 100644 index f8a846a..0000000 --- a/typings/pandas/core/arrays/timedeltas.pyi +++ /dev/null @@ -1,62 +0,0 @@ -from datetime import timedelta -from pandas.core.arrays import datetimelike as dtl -from typing import Sequence - -class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): - __array_priority__: int = ... - @property - def dtype(self): ... - def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ... - def astype(self, dtype, copy: bool = ...): ... - def sum( - self, - axis=..., - dtype=..., - out=..., - keepdims: bool = ..., - initial=..., - skipna: bool = ..., - min_count: int = ..., - ): ... - def std( - self, - axis=..., - dtype=..., - out=..., - ddof: int = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def median( - self, - axis=..., - out=..., - overwrite_input: bool = ..., - keepdims: bool = ..., - skipna: bool = ..., - ): ... - def __mul__(self, other): ... - __rmul__ = ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... - def __mod__(self, other): ... - def __rmod__(self, other): ... - def __divmod__(self, other): ... - def __rdivmod__(self, other): ... - def __neg__(self): ... - def __pos__(self): ... - def __abs__(self): ... - def total_seconds(self) -> int: ... - def to_pytimedelta(self) -> Sequence[timedelta]: ... - days: int = ... - seconds: int = ... - microseconds: int = ... - nanoseconds: int = ... - @property - def components(self) -> int: ... - -def sequence_to_td64ns(data, copy: bool = ..., unit: str = ..., errors: str = ...): ... -def ints_to_td64ns(data, unit: str = ...): ... -def objects_to_td64ns(data, unit: str = ..., errors: str = ...): ... diff --git a/typings/pandas/core/base.pyi b/typings/pandas/core/base.pyi deleted file mode 100644 index 2399146..0000000 --- a/typings/pandas/core/base.pyi +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations -import numpy as np - -from pandas.core.arrays.categorical import Categorical -from pandas._typing import ( - Scalar, - SeriesAxisType, - T1 as T1, - np_ndarray_int64, - np_ndarray_str, - Index as Index, - Series as Series, - DataFrame as DataFrame, -) - -from pandas.core.accessor import DirNamesMixin as DirNamesMixin -from pandas.core.arrays import ExtensionArray as ExtensionArray -from typing import Callable, Generic, List, Literal, Optional, Tuple, Union, overload - -class PandasObject(DirNamesMixin): - def __sizeof__(self) -> int: ... - -class NoNewAttributesMixin: - def __setattr__(self, key, value) -> None: ... - -class GroupByError(Exception): ... -class DataError(GroupByError): ... -class SpecificationError(GroupByError): ... - -class SelectionMixin: - def ndim(self) -> int: ... - def __getitem__(self, key): ... - def aggregate( - self, func: Optional[Callable] = ..., *args, **kwargs - ) -> Union[Scalar, Series, DataFrame]: ... - agg = aggregate - -class ShallowMixin: ... - -class IndexOpsMixin: - __array_priority__: int = ... - def transpose(self, *args, **kwargs) -> IndexOpsMixin: ... - @property - def T(self) -> IndexOpsMixin: ... - @property - def shape(self) -> tuple: ... - @property - def ndim(self) -> int: ... - def item(self): ... - @property - def nbytes(self) -> int: ... - @property - def size(self) -> int: ... - @property - def array(self) -> ExtensionArray: ... - def to_numpy(self) -> np.ndarray: ... - @property - def empty(self) -> bool: ... - def max(self, axis=..., skipna: bool = ..., **kwargs): ... - def min(self, axis=..., skipna: bool = ..., **kwargs): ... - def argmax( - self, axis: Optional[SeriesAxisType] = ..., skipna: bool = ..., *args, **kwargs - ) -> np.ndarray: ... - def argmin( - self, axis: Optional[SeriesAxisType] = ..., skipna: bool = ..., *args, **kwargs - ) -> np.ndarray: ... - def tolist(self) -> List: ... - def to_list(self) -> List: ... - def __iter__(self): ... - def hasnans(self) -> bool: ... - def value_counts( - self, - normalize: bool = ..., - sort: bool = ..., - ascending: bool = ..., - bins=..., - dropna: bool = ..., - ): ... - def nunique(self, dropna: bool = ...) -> int: ... - @property - def is_unique(self) -> bool: ... - @property - def is_monotonic(self) -> bool: ... - @property - def is_monotonic_decreasing(self) -> bool: ... - @property - def is_monotonic_increasing(self) -> bool: ... - def factorize( - self, sort: bool = ..., na_sentinel: int = ... - ) -> Tuple[np.ndarray, Union[np.ndarray, Index, Categorical]]: ... - def searchsorted( - self, value, side: str = ..., sorter=... - ) -> Union[int, List[int]]: ... - def drop_duplicates( - self, keep: Literal["first", "last", False] = ... - ) -> IndexOpsMixin: ... diff --git a/typings/pandas/core/computation/align.pyi b/typings/pandas/core/computation/align.pyi deleted file mode 100644 index 47830ca..0000000 --- a/typings/pandas/core/computation/align.pyi +++ /dev/null @@ -1,2 +0,0 @@ -def align_terms(terms): ... -def reconstruct_object(typ, obj, axes, dtype): ... diff --git a/typings/pandas/core/computation/common.pyi b/typings/pandas/core/computation/common.pyi deleted file mode 100644 index 1b9ef7e..0000000 --- a/typings/pandas/core/computation/common.pyi +++ /dev/null @@ -1,3 +0,0 @@ -def result_type_many(*arrays_and_dtypes): ... - -class NameResolutionError(NameError): ... diff --git a/typings/pandas/core/computation/engines.pyi b/typings/pandas/core/computation/engines.pyi deleted file mode 100644 index 9df5876..0000000 --- a/typings/pandas/core/computation/engines.pyi +++ /dev/null @@ -1,19 +0,0 @@ -import abc - -class NumExprClobberingError(NameError): ... - -class AbstractEngine(metaclass=abc.ABCMeta): - has_neg_frac: bool = ... - expr = ... - aligned_axes = ... - result_type = ... - def __init__(self, expr) -> None: ... - def convert(self) -> str: ... - def evaluate(self) -> object: ... - -class NumExprEngine(AbstractEngine): - has_neg_frac: bool = ... - -class PythonEngine(AbstractEngine): - has_neg_frac: bool = ... - def evaluate(self): ... diff --git a/typings/pandas/core/computation/expressions.pyi b/typings/pandas/core/computation/expressions.pyi deleted file mode 100644 index 90d1243..0000000 --- a/typings/pandas/core/computation/expressions.pyi +++ /dev/null @@ -1,6 +0,0 @@ -def set_use_numexpr(v: bool = ...) -> None: ... -def set_numexpr_threads(n=...) -> None: ... -def evaluate(op, op_str, a, b, use_numexpr: bool = ...): ... -def where(cond, a, b, use_numexpr: bool = ...): ... -def set_test_mode(v: bool = ...) -> None: ... -def get_test_result(): ... diff --git a/typings/pandas/core/computation/parsing.pyi b/typings/pandas/core/computation/parsing.pyi deleted file mode 100644 index 1020ba4..0000000 --- a/typings/pandas/core/computation/parsing.pyi +++ /dev/null @@ -1,12 +0,0 @@ -import tokenize -from typing import Iterator, Tuple - -BACKTICK_QUOTED_STRING: int - -def create_valid_python_identifier(name: str) -> str: ... -def clean_backtick_quoted_toks(tok: Tuple[int, str]) -> Tuple[int, str]: ... -def clean_column_name(name: str) -> str: ... -def tokenize_backtick_quoted_string( - token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int -) -> Tuple[int, str]: ... -def tokenize_string(source: str) -> Iterator[Tuple[int, str]]: ... diff --git a/typings/pandas/core/config_init.pyi b/typings/pandas/core/config_init.pyi deleted file mode 100644 index 773f3f6..0000000 --- a/typings/pandas/core/config_init.pyi +++ /dev/null @@ -1,62 +0,0 @@ -use_bottleneck_doc: str = ... - -def use_bottleneck_cb(key) -> None: ... - -use_numexpr_doc: str = ... - -def use_numexpr_cb(key) -> None: ... - -pc_precision_doc: str = ... -pc_colspace_doc: str = ... -pc_max_rows_doc: str = ... -pc_min_rows_doc: str = ... -pc_max_cols_doc: str = ... -pc_max_categories_doc: str = ... -pc_max_info_cols_doc: str = ... -pc_nb_repr_h_doc: str = ... -pc_pprint_nest_depth: str = ... -pc_multi_sparse_doc: str = ... -float_format_doc: str = ... -max_colwidth_doc: str = ... -colheader_justify_doc: str = ... -pc_expand_repr_doc: str = ... -pc_show_dimensions_doc: str = ... -pc_east_asian_width_doc: str = ... -pc_ambiguous_as_wide_doc: str = ... -pc_latex_repr_doc: str = ... -pc_table_schema_doc: str = ... -pc_html_border_doc: str = ... -pc_html_use_mathjax_doc: str = ... -pc_width_doc: str = ... -pc_chop_threshold_doc: str = ... -pc_max_seq_items: str = ... -pc_max_info_rows_doc: str = ... -pc_large_repr_doc: str = ... -pc_memory_usage_doc: str = ... -pc_latex_escape: str = ... -pc_latex_longtable: str = ... -pc_latex_multicolumn: str = ... -pc_latex_multicolumn_format: str = ... -pc_latex_multirow: str = ... - -def table_schema_cb(key) -> None: ... -def is_terminal() -> bool: ... - -max_cols: int = ... -tc_sim_interactive_doc: str = ... -use_inf_as_null_doc: str = ... -use_inf_as_na_doc: str = ... - -def use_inf_as_na_cb(key) -> None: ... - -chained_assignment: str = ... -reader_engine_doc: str = ... -writer_engine_doc: str = ... -parquet_engine_doc: str = ... -plotting_backend_doc: str = ... - -def register_plotting_backend_cb(key) -> None: ... - -register_converter_doc: str = ... - -def register_converter_cb(key) -> None: ... diff --git a/typings/pandas/core/dtypes/__init__.pyi b/typings/pandas/core/dtypes/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/dtypes/api.pyi b/typings/pandas/core/dtypes/api.pyi deleted file mode 100644 index 470210d..0000000 --- a/typings/pandas/core/dtypes/api.pyi +++ /dev/null @@ -1,43 +0,0 @@ -from pandas.core.dtypes.common import ( - is_array_like as is_array_like, - is_bool as is_bool, - is_bool_dtype as is_bool_dtype, - is_categorical as is_categorical, - is_categorical_dtype as is_categorical_dtype, - is_complex as is_complex, - is_complex_dtype as is_complex_dtype, - is_datetime64_any_dtype as is_datetime64_any_dtype, - is_datetime64_dtype as is_datetime64_dtype, - is_datetime64_ns_dtype as is_datetime64_ns_dtype, - is_datetime64tz_dtype as is_datetime64tz_dtype, - is_dict_like as is_dict_like, - is_dtype_equal as is_dtype_equal, - is_extension_array_dtype as is_extension_array_dtype, - is_extension_type as is_extension_type, - is_file_like as is_file_like, - is_float as is_float, - is_float_dtype as is_float_dtype, - is_hashable as is_hashable, - is_int64_dtype as is_int64_dtype, - is_integer as is_integer, - is_integer_dtype as is_integer_dtype, - is_interval as is_interval, - is_interval_dtype as is_interval_dtype, - is_iterator as is_iterator, - is_list_like as is_list_like, - is_named_tuple as is_named_tuple, - is_number as is_number, - is_numeric_dtype as is_numeric_dtype, - is_object_dtype as is_object_dtype, - is_period_dtype as is_period_dtype, - is_re as is_re, - is_re_compilable as is_re_compilable, - is_scalar as is_scalar, - is_signed_integer_dtype as is_signed_integer_dtype, - is_sparse as is_sparse, - is_string_dtype as is_string_dtype, - is_timedelta64_dtype as is_timedelta64_dtype, - is_timedelta64_ns_dtype as is_timedelta64_ns_dtype, - is_unsigned_integer_dtype as is_unsigned_integer_dtype, - pandas_dtype as pandas_dtype, -) diff --git a/typings/pandas/core/dtypes/base.pyi b/typings/pandas/core/dtypes/base.pyi deleted file mode 100644 index 0b0d5e8..0000000 --- a/typings/pandas/core/dtypes/base.pyi +++ /dev/null @@ -1,23 +0,0 @@ -from typing import List, Optional, Type -from pandas._typing import ExtensionArray - -class ExtensionDtype: - def __eq__(self, other) -> bool: ... - def __hash__(self) -> int: ... - def __ne__(self, other) -> bool: ... - @property - def na_value(self): ... - @property - def type(self) -> Type: ... - @property - def kind(self) -> str: ... - @property - def name(self) -> str: ... - @property - def names(self) -> Optional[List[str]]: ... - @classmethod - def construct_array_type(cls) -> Type[ExtensionArray]: ... - @classmethod - def construct_from_string(cls, string: str): ... - @classmethod - def is_dtype(cls, dtype) -> bool: ... diff --git a/typings/pandas/core/dtypes/cast.pyi b/typings/pandas/core/dtypes/cast.pyi deleted file mode 100644 index 3848323..0000000 --- a/typings/pandas/core/dtypes/cast.pyi +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np -from pandas._typing import Dtype as Dtype - -def maybe_convert_platform(values): ... -def is_nested_object(obj) -> bool: ... -def maybe_downcast_to_dtype(result, dtype): ... -def maybe_downcast_numeric(result, dtype, do_round: bool = ...): ... -def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray, other): ... -def maybe_promote(dtype, fill_value=...): ... -def infer_dtype_from(val, pandas_dtype: bool = ...): ... -def infer_dtype_from_scalar(val, pandas_dtype: bool = ...): ... -def infer_dtype_from_array(arr, pandas_dtype: bool = ...): ... -def maybe_infer_dtype_type(element): ... -def maybe_upcast(values, fill_value=..., dtype=..., copy: bool = ...): ... -def invalidate_string_dtypes(dtype_set) -> None: ... -def coerce_indexer_dtype(indexer, categories): ... -def coerce_to_dtypes(result, dtypes): ... -def astype_nansafe(arr, dtype, copy: bool = ..., skipna: bool = ...): ... -def maybe_convert_objects(values: np.ndarray, convert_numeric: bool = ...): ... -def soft_convert_objects( - values: np.ndarray, - datetime: bool = ..., - numeric: bool = ..., - timedelta: bool = ..., - coerce: bool = ..., - copy: bool = ..., -): ... -def convert_dtypes( - input_array, - convert_string: bool = ..., - convert_integer: bool = ..., - convert_boolean: bool = ..., -) -> Dtype: ... -def maybe_castable(arr) -> bool: ... -def maybe_infer_to_datetimelike(value, convert_dates: bool = ...): ... -def maybe_cast_to_datetime(value, dtype, errors: str = ...): ... -def find_common_type(types): ... -def cast_scalar_to_array(shape, value, dtype=...): ... -def construct_1d_arraylike_from_scalar(value, length: int, dtype): ... -def construct_1d_object_array_from_listlike(values): ... -def construct_1d_ndarray_preserving_na(values, dtype=..., copy: bool = ...): ... -def maybe_cast_to_integer_array(arr, dtype, copy: bool = ...): ... diff --git a/typings/pandas/core/dtypes/common.pyi b/typings/pandas/core/dtypes/common.pyi deleted file mode 100644 index 4a06888..0000000 --- a/typings/pandas/core/dtypes/common.pyi +++ /dev/null @@ -1,75 +0,0 @@ -import numpy as np -from pandas._typing import ArrayLike as ArrayLike -from pandas.core.dtypes.inference import ( - is_array_like as is_array_like, - is_bool as is_bool, - is_complex as is_complex, - is_dict_like as is_dict_like, - is_file_like as is_file_like, - is_float as is_float, - is_hashable as is_hashable, - is_integer as is_integer, - is_interval as is_interval, - is_iterator as is_iterator, - is_list_like as is_list_like, - is_named_tuple as is_named_tuple, - is_number as is_number, - is_re as is_re, - is_re_compilable as is_re_compilable, - is_scalar as is_scalar, -) -from typing import Callable, Union - -ensure_float64 = ... -ensure_float32 = ... - -def ensure_float(arr): ... - -ensure_uint64 = ... -ensure_int64 = ... -ensure_int32 = ... -ensure_int16 = ... -ensure_int8 = ... -ensure_platform_int = ... -ensure_object = ... - -def ensure_str(value) -> str: ... -def ensure_categorical(arr): ... -def ensure_python_int(value: Union[int, np.integer]) -> int: ... -def classes(*klasses) -> Callable: ... -def classes_and_not_datetimelike(*klasses) -> Callable: ... -def is_object_dtype(arr_or_dtype) -> bool: ... -def is_sparse(arr) -> bool: ... -def is_scipy_sparse(arr) -> bool: ... -def is_categorical(arr) -> bool: ... -def is_datetime64_dtype(arr_or_dtype) -> bool: ... -def is_datetime64tz_dtype(arr_or_dtype) -> bool: ... -def is_timedelta64_dtype(arr_or_dtype) -> bool: ... -def is_period_dtype(arr_or_dtype) -> bool: ... -def is_interval_dtype(arr_or_dtype) -> bool: ... -def is_categorical_dtype(arr_or_dtype) -> bool: ... -def is_string_dtype(arr_or_dtype) -> bool: ... -def is_period_arraylike(arr) -> bool: ... -def is_datetime_arraylike(arr) -> bool: ... -def is_dtype_equal(source, target) -> bool: ... -def is_any_int_dtype(arr_or_dtype) -> bool: ... -def is_integer_dtype(arr_or_dtype) -> bool: ... -def is_signed_integer_dtype(arr_or_dtype) -> bool: ... -def is_unsigned_integer_dtype(arr_or_dtype) -> bool: ... -def is_int64_dtype(arr_or_dtype) -> bool: ... -def is_datetime64_any_dtype(arr_or_dtype) -> bool: ... -def is_datetime64_ns_dtype(arr_or_dtype) -> bool: ... -def is_timedelta64_ns_dtype(arr_or_dtype) -> bool: ... -def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: ... -def is_numeric_v_string_like(a, b): ... -def is_datetimelike_v_numeric(a, b): ... -def needs_i8_conversion(arr_or_dtype) -> bool: ... -def is_numeric_dtype(arr_or_dtype) -> bool: ... -def is_string_like_dtype(arr_or_dtype) -> bool: ... -def is_float_dtype(arr_or_dtype) -> bool: ... -def is_bool_dtype(arr_or_dtype) -> bool: ... -def is_extension_type(arr) -> bool: ... -def is_extension_array_dtype(arr_or_dtype) -> bool: ... -def is_complex_dtype(arr_or_dtype) -> bool: ... -def infer_dtype_from_object(dtype): ... -def pandas_dtype(dtype): ... diff --git a/typings/pandas/core/dtypes/concat.pyi b/typings/pandas/core/dtypes/concat.pyi deleted file mode 100644 index dc68280..0000000 --- a/typings/pandas/core/dtypes/concat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -def get_dtype_kinds(l): ... -def concat_compat(to_concat, axis: int = ...): ... -def concat_categorical(to_concat, axis: int = ...): ... -def union_categoricals( - to_union, sort_categories: bool = ..., ignore_order: bool = ... -): ... -def concat_datetime(to_concat, axis: int = ..., typs=...): ... diff --git a/typings/pandas/core/dtypes/dtypes.pyi b/typings/pandas/core/dtypes/dtypes.pyi deleted file mode 100644 index 7047acc..0000000 --- a/typings/pandas/core/dtypes/dtypes.pyi +++ /dev/null @@ -1,130 +0,0 @@ -from pandas._typing import Ordered as Ordered -from .base import ExtensionDtype as ExtensionDtype -from pandas._libs.tslibs import ( - NaT as NaT, - Period as Period, - Timestamp as Timestamp, -) # , timezones as timezones -from pandas.core.indexes.base import Index -from typing import Any, Optional, Sequence, Tuple, Type, Union - -_str = str - -def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]: ... - -class Registry: - dtypes = ... - def __init__(self) -> None: ... - def register(self, dtype: Type[ExtensionDtype]) -> None: ... - def find( - self, dtype: Union[Type[ExtensionDtype], str] - ) -> Optional[Type[ExtensionDtype]]: ... - -registry = ... - -class PandasExtensionDtype(ExtensionDtype): - subdtype = ... - str: Optional[_str] = ... - num: int = ... - shape: Tuple[int, ...] = ... - itemsize: int = ... - base = ... - isbuiltin: int = ... - isnative: int = ... - def __hash__(self) -> int: ... - @classmethod - def reset_cache(cls) -> None: ... - -class CategoricalDtypeType(type): ... - -class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): - name: _str = ... - type: Type[CategoricalDtypeType] = ... - kind: _str = ... - str: _str = ... - base = ... - def __init__( - self, categories: Optional[Sequence[Any]] = ..., ordered: Ordered = ... - ) -> None: ... - @classmethod - def construct_from_string(cls, string: _str) -> CategoricalDtype: ... - def __hash__(self) -> int: ... - def __eq__(self, other) -> bool: ... - @classmethod - def construct_array_type(cls): ... - @staticmethod - def validate_ordered(ordered: Ordered) -> None: ... - @staticmethod - def validate_categories(categories, fastpath: bool = ...): ... - def update_dtype( - self, dtype: Union[_str, CategoricalDtype] - ) -> CategoricalDtype: ... - @property - def categories(self) -> Index: ... - @property - def ordered(self) -> Ordered: ... - -class DatetimeTZDtype(PandasExtensionDtype): - type: Type[Timestamp] = ... - kind: _str = ... - str: _str = ... - num: int = ... - base = ... - na_value = ... - def __init__(self, unit: _str = ..., tz=...) -> None: ... - @property - def unit(self): ... - @property - def tz(self): ... - @classmethod - def construct_array_type(cls): ... - @classmethod - def construct_from_string(cls, string: _str): ... - @property - def name(self) -> _str: ... - def __hash__(self) -> int: ... - def __eq__(self, other) -> bool: ... - -class PeriodDtype(PandasExtensionDtype): - type: Type[Period] = ... - kind: _str = ... - str: _str = ... - base = ... - num: int = ... - def __new__(cls, freq=...): ... - @property - def freq(self): ... - @classmethod - def construct_from_string(cls, string: _str): ... - @property - def name(self) -> _str: ... - @property - def na_value(self): ... - def __hash__(self) -> int: ... - def __eq__(self, other) -> bool: ... - @classmethod - def is_dtype(cls, dtype) -> bool: ... - @classmethod - def construct_array_type(cls): ... - def __from_arrow__(self, array): ... - -class IntervalDtype(PandasExtensionDtype): - name: _str = ... - kind: _str = ... - str: _str = ... - base = ... - num: int = ... - def __new__(cls, subtype=...): ... - @property - def subtype(self): ... - @classmethod - def construct_array_type(cls): ... - @classmethod - def construct_from_string(cls, string: _str): ... - @property - def type(self): ... - def __hash__(self) -> int: ... - def __eq__(self, other) -> bool: ... - @classmethod - def is_dtype(cls, dtype) -> bool: ... - def __from_arrow__(self, array): ... diff --git a/typings/pandas/core/dtypes/generic.pyi b/typings/pandas/core/dtypes/generic.pyi deleted file mode 100644 index cdcc2a9..0000000 --- a/typings/pandas/core/dtypes/generic.pyi +++ /dev/null @@ -1,31 +0,0 @@ -def create_pandas_abc_type(name, attr, comp): ... - -class ABCIndex: ... -class ABCInt64Index: ... -class ABCUInt64Index: ... -class ABCRangeIndex: ... -class ABCFloat64Index: ... -class ABCMultiIndex: ... -class ABCDatetimeIndex: ... -class ABCTimedeltaIndex: ... -class ABCPeriodIndex: ... -class ABCCategoricalIndex: ... -class ABCIntervalIndex: ... -class ABCIndexClass: ... -class ABCSeries: ... -class ABCDataFrame: ... -class ABCSparseArray: ... -class ABCCategorical: ... -class ABCDatetimeArray: ... -class ABCTimedeltaArray: ... -class ABCPeriodArray: ... -class ABCPeriod: ... -class ABCDateOffset: ... -class ABCInterval: ... -class ABCExtensionArray: ... -class ABCPandasArray: ... - -class _ABCGeneric(type): - def __instancecheck__(cls, inst) -> bool: ... - -class ABCGeneric: ... diff --git a/typings/pandas/core/dtypes/inference.pyi b/typings/pandas/core/dtypes/inference.pyi deleted file mode 100644 index c20e63a..0000000 --- a/typings/pandas/core/dtypes/inference.pyi +++ /dev/null @@ -1,19 +0,0 @@ -def is_bool(obj) -> bool: ... -def is_integer(obj) -> bool: ... -def is_float(obj) -> bool: ... -def is_complex(obj) -> bool: ... -def is_scalar(obj) -> bool: ... -def is_decimal(obj) -> bool: ... -def is_interval(obj) -> bool: ... -def is_list_like(obj) -> bool: ... -def is_number(obj) -> bool: ... -def is_iterator(obj) -> bool: ... -def is_file_like(obj) -> bool: ... -def is_re(obj) -> bool: ... -def is_re_compilable(obj) -> bool: ... -def is_array_like(obj) -> bool: ... -def is_nested_list_like(obj) -> bool: ... -def is_dict_like(obj) -> bool: ... -def is_named_tuple(obj) -> bool: ... -def is_hashable(obj) -> bool: ... -def is_sequence(obj) -> bool: ... diff --git a/typings/pandas/core/dtypes/missing.pyi b/typings/pandas/core/dtypes/missing.pyi deleted file mode 100644 index 590cf77..0000000 --- a/typings/pandas/core/dtypes/missing.pyi +++ /dev/null @@ -1,39 +0,0 @@ -import numpy as np -from typing import Union, overload, List -from pandas._typing import ( - Scalar as Scalar, - Series as Series, - Index as Index, - ArrayLike as ArrayLike, - DataFrame as DataFrame, -) - -isposinf_scalar = ... -isneginf_scalar = ... - -@overload -def isna(obj: DataFrame) -> DataFrame: ... -@overload -def isna(obj: Series) -> Series[bool]: ... -@overload -def isna(obj: Union[Index, List, ArrayLike]) -> np.ndarray: ... -@overload -def isna(obj: Scalar) -> bool: ... - -isnull = isna - -@overload -def notna(obj: DataFrame) -> DataFrame: ... -@overload -def notna(obj: Series) -> Series[bool]: ... -@overload -def notna(obj: Union[Index, List, ArrayLike]) -> np.ndarray: ... -@overload -def notna(obj: Scalar) -> bool: ... - -notnull = notna - -def array_equivalent(left, right, strict_nan: bool = ...) -> bool: ... -def na_value_for_dtype(dtype, compat: bool = ...): ... -def remove_na_arraylike(arr): ... -def is_valid_nat_for_dtype(obj, dtype) -> bool: ... diff --git a/typings/pandas/core/frame.pyi b/typings/pandas/core/frame.pyi deleted file mode 100644 index 3630547..0000000 --- a/typings/pandas/core/frame.pyi +++ /dev/null @@ -1,2253 +0,0 @@ -from __future__ import annotations -import datetime -import numpy as np -import sys - -from pandas.core.indexing import _iLocIndexer, _LocIndexer -from matplotlib.axes import Axes as PlotAxes -from pandas._typing import ( - Axes as Axes, - Axis as Axis, - FilePathOrBuffer as FilePathOrBuffer, - FilePathOrBytesBuffer as FilePathOrBytesBuffer, - Level as Level, - Renamer as Renamer, -) -from pandas._typing import ( - num, - SeriesAxisType, - AxisType, - Dtype, - DtypeNp, - Label, - StrLike, - Scalar as Scalar, - IndexType, - MaskType, - S1, - T as TType, -) -from pandas._typing import ( - ArrayLike as ArrayLike, - np_ndarray_str, - np_ndarray_bool, - Timestamp as Timestamp, - Timedelta as Timedelta, -) -from pandas._typing import IndexLevel as IndexLevel, IgnoreRaise as IgnoreRaise -from pandas.core.arraylike import OpsMixin -from pandas.core.generic import NDFrame as NDFrame -from pandas.core.groupby.generic import DataFrameGroupBy as DataFrameGroupBy -from pandas.core.groupby.grouper import Grouper -from pandas.core.indexes.base import Index as Index -from pandas.core.indexes.multi import MultiIndex as MultiIndex -from pandas.core.resample import Resampler -from pandas.core.window.rolling import Rolling, Window -from pandas.core.series import Series as Series -from pandas.io.formats import console as console, format as fmt -from pandas.io.formats.style import Styler as Styler -from pandas.plotting import PlotAccessor -from typing import ( - Any, - Callable, - Dict, - Hashable, - Iterable, - Iterator, - List, - Mapping, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, - overload, - Pattern, -) - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -import numpy as _np -import datetime as _dt - -_str = str -_bool = bool - -class _iLocIndexerFrame(_iLocIndexer): - @overload - def __getitem__(self, idx: Tuple[int, int]) -> Scalar: ... - @overload - def __getitem__(self, idx: int) -> Series: ... - @overload - def __getitem__(self, idx: Tuple[Union[IndexType, MaskType], int]) -> Series: ... - @overload - def __getitem__(self, idx: Tuple[int, Union[IndexType, MaskType]]) -> Series: ... - @overload - def __getitem__( - self, - idx: Union[ - IndexType, - MaskType, - Tuple[Union[IndexType, MaskType], Union[IndexType, MaskType]], - ], - ) -> DataFrame: ... - def __setitem__( - self, - idx: Union[ - int, - IndexType, - Tuple[int, int], - Tuple[IndexType, int], - Tuple[IndexType, IndexType], - Tuple[int, IndexType], - ], - value: Union[float, Series, DataFrame, str], - ) -> None: ... - -class _LocIndexerFrame(_LocIndexer): - @overload - def __getitem__( - self, - idx: Union[ - Union[IndexType, MaskType], - List[StrLike], - Callable, - Tuple[ - Union[ - IndexType, - MaskType, - slice, - List[StrLike], - Tuple[Union[str, int, slice], ...], - ], - Union[List[StrLike], slice, Series[bool], Callable], - ], - ], - ) -> DataFrame: ... - @overload - def __getitem__( - self, - idx: Tuple[Union[StrLike, Tuple[StrLike, ...]], Callable, StrLike], - ) -> Scalar: ... - @overload - def __getitem__( - self, - idx: Union[int, StrLike], - ) -> Series: ... - @overload - def __getitem__( - self, - idx: Tuple[Union[IndexType, MaskType], Callable, StrLike], - ) -> Series: ... - @overload - def __getitem__( - self, - idx: Tuple[Tuple[slice, ...], Callable, StrLike], - ) -> Series: ... - @overload - def __setitem__( - self, - idx: Union[ - MaskType, - StrLike, - Tuple[Union[MaskType, Index, Sequence[Scalar], Scalar, slice], ...], - ], - value: Union[S1, ArrayLike, Series, DataFrame], - ) -> None: ... - @overload - def __setitem__( - self, - idx: Tuple[Tuple[Union[StrLike, Scalar, slice], ...], StrLike], - value: Union[S1, ArrayLike, Series[S1], List], - ) -> None: ... - @overload - def __setitem__( - self, - idx: Tuple[Tuple[Union[StrLike, Scalar, slice], ...], StrLike], - value: Union[S1, ArrayLike, Series[S1], List], - ) -> None: ... - -class DataFrame(NDFrame, OpsMixin): - _ListLike = Union[ - np.ndarray, - List[Dtype], - Dict[_str, _np.ndarray], - Sequence, - Index, - Series, - ] - def __new__( - cls, - data: Optional[Union[_ListLike, DataFrame, Dict[Any, Any]]] = ..., - index: Optional[Union[Index, _ListLike]] = ..., - columns: Optional[_ListLike] = ..., - dtype=..., - copy: _bool = ..., - ) -> DataFrame: ... - @property - def axes(self) -> List[Index]: ... - @property - def shape(self) -> Tuple[int, int]: ... - @property - def style(self) -> Styler: ... - def items(self) -> Iterable[Tuple[Hashable, Series]]: ... - def iteritems(self) -> Iterable[Tuple[Label, Series]]: ... - def iterrows(self) -> Iterable[Tuple[Label, Series]]: ... - def itertuples(self, index: _bool = ..., name: Optional[str] = ...): ... - def __len__(self) -> int: ... - @overload - def dot(self, other: Union[DataFrame, ArrayLike]) -> DataFrame: ... - @overload - def dot(self, other: Series) -> Series: ... - def __matmul__(self, other): ... - def __rmatmul__(self, other): ... - @classmethod - def from_dict(cls, data, orient=..., dtype=..., columns=...) -> DataFrame: ... - def to_numpy( - self, - dtype: Optional[Union[Type[DtypeNp], Dtype]] = ..., - copy: _bool = ..., - na_value: Optional[Any] = ..., - ) -> _np.ndarray: ... - @overload - def to_dict( - self, - orient: Literal["records"], - into: Hashable = ..., - ) -> List[Dict[_str, Any]]: ... - @overload - def to_dict( - self, - orient: Literal["dict", "list", "series", "split", "index"] = ..., - into: Hashable = ..., - ) -> Dict[_str, Any]: ... - def to_gbq( - self, - destination_table, - project_id=..., - chunksize=..., - reauth=..., - if_exists=..., - auth_local_webserver=..., - table_schema=..., - location=..., - progress_bar=..., - credentials=..., - ) -> None: ... - @classmethod - def from_records( - cls, data, index=..., exclude=..., columns=..., coerce_float=..., nrows=... - ) -> DataFrame: ... - def to_records( - self, - index: _bool = ..., - columnDTypes: Optional[Union[_str, Dict]] = ..., - indexDTypes: Optional[Union[_str, Dict]] = ..., - ) -> np.recarray: ... - def to_stata( - self, - path: FilePathOrBuffer, - convert_dates: Optional[Dict] = ..., - write_index: _bool = ..., - byteorder: Optional[Union[_str, Literal["<", ">", "little", "big"]]] = ..., - time_stamp=..., - data_label: Optional[_str] = ..., - variable_labels: Optional[Dict] = ..., - version: int = ..., - convert_strl: Optional[List[_str]] = ..., - ) -> None: ... - def to_feather(self, path: FilePathOrBuffer, **kwargs) -> None: ... - @overload - def to_markdown( - self, buf: Optional[FilePathOrBuffer], mode: Optional[_str] = ..., **kwargs - ) -> None: ... - @overload - def to_markdown(self, mode: Optional[_str] = ..., **kwargs) -> _str: ... - @overload - def to_parquet( - self, - path: FilePathOrBytesBuffer, - *, - engine: Union[_str, Literal["auto", "pyarrow", "fastparquet"]] = ..., - compression: Union[_str, Literal["snappy", "gzip", "brotli"]] = ..., - index: Optional[_bool] = ..., - partition_cols: Optional[List] = ..., - **kwargs, - ) -> None: ... - @overload - def to_parquet( - self, - *, - path: None = ..., - engine: Union[_str, Literal["auto", "pyarrow", "fastparquet"]] = ..., - compression: Union[_str, Literal["snappy", "gzip", "brotli"]] = ..., - index: Optional[_bool] = ..., - partition_cols: Optional[List] = ..., - **kwargs, - ) -> bytes: ... - @overload - def to_html( - self, - buf: Optional[FilePathOrBuffer], - columns: Optional[Sequence[_str]] = ..., - col_space: Optional[Union[int, List[int], Dict[Union[_str, int], int]]] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - bold_rows: _bool = ..., - classes: Optional[Union[_str, List, Tuple]] = ..., - escape: _bool = ..., - notebook: _bool = ..., - border: Optional[int] = ..., - table_id: Optional[_str] = ..., - render_links: _bool = ..., - encoding: Optional[_str] = ..., - ) -> None: ... - @overload - def to_html( - self, - columns: Optional[Sequence[_str]] = ..., - col_space: Optional[Union[int, List[int], Dict[Union[_str, int], int]]] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - bold_rows: _bool = ..., - classes: Optional[Union[_str, List, Tuple]] = ..., - escape: _bool = ..., - notebook: _bool = ..., - border: Optional[int] = ..., - table_id: Optional[_str] = ..., - render_links: _bool = ..., - encoding: Optional[_str] = ..., - ) -> _str: ... - def info( - self, verbose=..., buf=..., max_cols=..., memory_usage=..., null_counts=... - ) -> None: ... - def memory_usage(self, index: _bool = ..., deep: _bool = ...) -> Series: ... - def transpose(self, *args, copy: _bool = ...) -> DataFrame: ... - @property - def T(self) -> DataFrame: ... - @overload - def __getitem__(self, idx: Scalar) -> Series: ... - @overload - def __getitem__(self, rows: slice) -> DataFrame: ... - @overload - def __getitem__( - self, - idx: Union[ - Callable, - Tuple, - Series[_bool], - DataFrame, - List[_str], - List[Hashable], - Index, - np_ndarray_str, - np_ndarray_bool, - Sequence[Tuple[Scalar, ...]], - ], - ) -> DataFrame: ... - def __setitem__(self, key, value): ... - @overload - def query(self, expr: _str, *, inplace: Literal[True], **kwargs) -> None: ... - @overload - def query( - self, expr: _str, *, inplace: Literal[False] = ..., **kwargs - ) -> DataFrame: ... - def eval(self, expr: _str, inplace: _bool = ..., **kwargs): ... - def select_dtypes( - self, - include: Optional[Union[_str, List[_str]]] = ..., - exclude: Optional[Union[_str, List[_str]]] = ..., - ) -> DataFrame: ... - def insert( - self, - loc: int, - column, - value: Union[int, _ListLike], - allow_duplicates: _bool = ..., - ) -> None: ... - def assign(self, **kwargs) -> DataFrame: ... - def lookup(self, row_labels: Sequence, col_labels: Sequence) -> np.ndarray: ... - def align( - self, - other: Union[DataFrame, Series], - join: Union[_str, Literal["inner", "outer", "left", "right"]] = ..., - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - fill_value=..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - limit: Optional[int] = ..., - fill_axis: AxisType = ..., - broadcast_axis: Optional[AxisType] = ..., - ) -> DataFrame: ... - def reindex(**kwargs) -> DataFrame: ... - @overload - def drop( - self, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Optional[Level] = ..., - inplace: Literal[True], - errors: IgnoreRaise = ..., - ) -> None: ... - @overload - def drop( - self, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Optional[Level] = ..., - inplace: Literal[False] = ..., - errors: IgnoreRaise = ..., - ) -> DataFrame: ... - @overload - def drop( - self, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Optional[Level] = ..., - inplace: bool = ..., - errors: IgnoreRaise = ..., - ) -> DataFrame | None: ... - @overload - def rename( - self, - mapper: Optional[Renamer] = ..., - *, - index: Optional[Renamer] = ..., - columns: Optional[Renamer] = ..., - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: Literal[True], - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> None: ... - @overload - def rename( - self, - mapper: Optional[Renamer] = ..., - *, - index: Optional[Renamer] = ..., - columns: Optional[Renamer] = ..., - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: Literal[False] = ..., - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> DataFrame: ... - @overload - def rename( - self, - mapper: Optional[Renamer] = ..., - *, - index: Optional[Renamer] = ..., - columns: Optional[Renamer] = ..., - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: bool = ..., - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> Optional[DataFrame]: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series, DataFrame]] = ..., - method: Optional[Literal["backfill", "bfill", "ffill", "pad"]] = ..., - axis: Optional[AxisType] = ..., - limit: int = ..., - downcast: Optional[Dict] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series, DataFrame]] = ..., - method: Optional[Literal["backfill", "bfill", "ffill", "pad"]] = ..., - axis: Optional[AxisType] = ..., - limit: int = ..., - downcast: Optional[Dict] = ..., - *, - inplace: Literal[False] = ..., - ) -> DataFrame: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series, DataFrame]] = ..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "ffill", "pad"]] - ] = ..., - axis: Optional[AxisType] = ..., - *, - limit: int = ..., - downcast: Optional[Dict] = ..., - ) -> Union[None, DataFrame]: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series, DataFrame]] = ..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "ffill", "pad"]] - ] = ..., - axis: Optional[AxisType] = ..., - inplace: Optional[_bool] = ..., - limit: int = ..., - downcast: Optional[Dict] = ..., - ) -> Union[None, DataFrame]: ... - @overload - def replace( - self, - to_replace=..., - value: Optional[Union[Scalar, Sequence, Mapping, Pattern]] = ..., - limit: Optional[int] = ..., - regex=..., - method: Optional[_str] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def replace( - self, - to_replace=..., - value: Optional[Union[Scalar, Sequence, Mapping, Pattern]] = ..., - limit: Optional[int] = ..., - regex=..., - method: Optional[_str] = ..., - *, - inplace: Literal[False], - ) -> DataFrame: ... - @overload - def replace( - self, - to_replace=..., - value: Optional[Union[Scalar, Sequence, Mapping, Pattern]] = ..., - *, - limit: Optional[int] = ..., - regex=..., - method: Optional[_str] = ..., - ) -> DataFrame: ... - @overload - def replace( - self, - to_replace=..., - value: Optional[Union[Scalar, Sequence, Mapping, Pattern]] = ..., - inplace: Optional[_bool] = ..., - limit: Optional[int] = ..., - regex=..., - method: Optional[_str] = ..., - ) -> Union[None, DataFrame]: ... - def shift( - self, - periods: int = ..., - freq=..., - axis: AxisType = ..., - fill_value: Optional[Hashable] = ..., - ) -> DataFrame: ... - @overload - def set_index( - self, - keys: Union[Label, Sequence], - drop: _bool = ..., - append: _bool = ..., - verify_integrity: _bool = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def set_index( - self, - keys: Union[Label, Sequence], - drop: _bool = ..., - append: _bool = ..., - verify_integrity: _bool = ..., - *, - inplace: Literal[False], - ) -> DataFrame: ... - @overload - def set_index( - self, - keys: Union[Label, Sequence], - drop: _bool = ..., - append: _bool = ..., - *, - verify_integrity: _bool = ..., - ) -> DataFrame: ... - @overload - def set_index( - self, - keys: Union[Label, Sequence], - drop: _bool = ..., - append: _bool = ..., - inplace: Optional[_bool] = ..., - verify_integrity: _bool = ..., - ) -> Union[None, DataFrame]: ... - @overload - def reset_index( - self, - level: Union[Level, Sequence[Level]] = ..., - drop: _bool = ..., - col_level: Union[int, _str] = ..., - col_fill: Hashable = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def reset_index( - self, - level: Union[Level, Sequence[Level]] = ..., - drop: _bool = ..., - col_level: Union[int, _str] = ..., - col_fill: Hashable = ..., - *, - inplace: Literal[False], - ) -> DataFrame: ... - @overload - def reset_index( - self, - level: Union[Level, Sequence[Level]] = ..., - drop: _bool = ..., - *, - col_level: Union[int, _str] = ..., - col_fill: Hashable = ..., - ) -> DataFrame: ... - @overload - def reset_index( - self, - level: Union[Level, Sequence[Level]] = ..., - drop: _bool = ..., - inplace: Optional[_bool] = ..., - col_level: Union[int, _str] = ..., - col_fill: Hashable = ..., - ) -> Union[None, DataFrame]: ... - def isna(self) -> DataFrame: ... - def isnull(self) -> DataFrame: ... - def notna(self) -> DataFrame: ... - def notnull(self) -> DataFrame: ... - @overload - def dropna( - self, - axis: AxisType = ..., - how: Union[_str, Literal["any", "all"]] = ..., - thresh: Optional[int] = ..., - subset: Optional[List] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def dropna( - self, - axis: AxisType = ..., - how: Union[_str, Literal["any", "all"]] = ..., - thresh: Optional[int] = ..., - subset: Optional[List] = ..., - *, - inplace: Literal[False], - ) -> DataFrame: ... - @overload - def dropna( - self, - axis: AxisType = ..., - how: Union[_str, Literal["any", "all"]] = ..., - thresh: Optional[int] = ..., - subset: Optional[List] = ..., - ) -> DataFrame: ... - @overload - def dropna( - self, - axis: AxisType = ..., - how: Union[_str, Literal["any", "all"]] = ..., - thresh: Optional[int] = ..., - subset: Optional[List] = ..., - inplace: Optional[_bool] = ..., - ) -> Union[None, DataFrame]: ... - def drop_duplicates( - self, - subset=..., - keep: Union[_str, Literal["first", "last"], _bool] = ..., - inplace: _bool = ..., - ignore_index: _bool = ..., - ) -> DataFrame: ... - def duplicated( - self, - subset: Optional[Union[Hashable, Sequence[Hashable]]] = ..., - keep: Union[_str, Literal["first", "last"], _bool] = ..., - ) -> Series: ... - @overload - def sort_values( - self, - by: Union[_str, Sequence[_str]], - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[True], - key: Optional[Callable] = ..., - ) -> None: ... - @overload - def sort_values( - self, - by: Union[_str, Sequence[_str]], - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[False], - key: Optional[Callable] = ..., - ) -> DataFrame: ... - @overload - def sort_values( - self, - by: Union[_str, Sequence[_str]], - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - *, - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> DataFrame: ... - @overload - def sort_values( - self, - by: Union[_str, Sequence[_str]], - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - inplace: Optional[_bool] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Union[None, DataFrame]: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Level] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[True], - key: Optional[Callable] = ..., - ) -> None: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[False], - key: Optional[Callable] = ..., - ) -> DataFrame: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - *, - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> DataFrame: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - inplace: Optional[_bool] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Union[None, DataFrame]: ... - def value_counts( - self, - subset: Optional[Sequence[Hashable]] = ..., - normalize: _bool = ..., - sort: _bool = ..., - ascending: _bool = ..., - dropna: _bool = ..., - ) -> Series[int]: ... - def nlargest( - self, - n: int, - columns: Union[_str, List[_str]], - keep: Union[_str, Literal["first", "last", "all"]] = ..., - ) -> DataFrame: ... - def nsmallest( - self, - n: int, - columns: Union[_str, List[_str]], - keep: Union[_str, Literal["first", "last", "all"]] = ..., - ) -> DataFrame: ... - def swaplevel( - self, i: Level = ..., j: Level = ..., axis: AxisType = ... - ) -> DataFrame: ... - def reorder_levels(self, order: List, axis: AxisType = ...) -> DataFrame: ... - def compare( - self, - other: DataFrame, - align_axis: Axis = ..., - keep_shape: bool = ..., - keep_equal: bool = ..., - ) -> DataFrame: ... - def combine( - self, - other: DataFrame, - func: Callable, - fill_value=..., - overwrite: _bool = ..., - ) -> DataFrame: ... - def combine_first(self, other: DataFrame) -> DataFrame: ... - def update( - self, - other: Union[DataFrame, Series], - join: _str = ..., - overwrite: _bool = ..., - filter_func: Optional[Callable] = ..., - errors: Union[_str, Literal["raise", "ignore"]] = ..., - ) -> None: ... - def groupby( - self, - by: Optional[Union[List[_str], _str]] = ..., - axis: AxisType = ..., - level: Optional[Level] = ..., - as_index: _bool = ..., - sort: _bool = ..., - group_keys: _bool = ..., - squeeze: _bool = ..., - observed: _bool = ..., - dropna: _bool = ..., - ) -> DataFrameGroupBy: ... - def pivot( - self, - index=..., - columns=..., - values=..., - ) -> DataFrame: ... - def pivot_table( - self, - values: Optional[Union[_str, Grouper, Sequence]] = ..., - index: Optional[Union[_str, Grouper, Sequence]] = ..., - columns: Optional[Union[_str, Grouper, Sequence]] = ..., - aggfunc=..., - fill_value: Optional[Scalar] = ..., - margins: _bool = ..., - dropna: _bool = ..., - margins_name: _str = ..., - observed: _bool = ..., - ) -> DataFrame: ... - def stack( - self, level: Level = ..., dropna: _bool = ... - ) -> Union[DataFrame, Series]: ... - def explode( - self, column: Union[_str, Tuple], ignore_index: _bool = ... - ) -> DataFrame: ... - def unstack( - self, - level: Level = ..., - fill_value: Optional[Union[int, _str, Dict]] = ..., - ) -> Union[DataFrame, Series]: ... - def melt( - self, - id_vars: Optional[Union[Tuple, Sequence, np.ndarray]] = ..., - value_vars: Optional[Union[Tuple, Sequence, np.ndarray]] = ..., - var_name: Optional[Scalar] = ..., - value_name: Scalar = ..., - col_level: Optional[Union[int, _str]] = ..., - ignore_index: _bool = ..., - ) -> DataFrame: ... - def diff(self, periods: int = ..., axis: AxisType = ...) -> DataFrame: ... - @overload - def agg( - self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs - ) -> Series: ... - @overload - def agg( - self, - func: Union[List[Callable], Dict[_str, Callable]] = ..., - axis: AxisType = ..., - **kwargs, - ) -> DataFrame: ... - @overload - def aggregate( - self, func: Union[Callable, _str], axis: AxisType = ..., **kwargs - ) -> Series: ... - @overload - def aggregate( - self, - func: Union[List[Callable], Dict[_str, Callable]], - axis: AxisType = ..., - **kwargs, - ) -> DataFrame: ... - def transform( - self, - func: Union[List[Callable], Dict[_str, Callable]], - axis: AxisType = ..., - *args, - **kwargs, - ) -> DataFrame: ... - @overload - def apply(self, f: Callable) -> Series: ... - @overload - def apply( - self, - f: Callable, - axis: AxisType, - raw: _bool = ..., - result_type: Optional[_str] = ..., - args=..., - **kwargs, - ) -> DataFrame: ... - def applymap( - self, func: Callable, na_action: Optional[Literal["ignore"]] = None, **kwargs - ) -> DataFrame: ... - def append( - self, - other: Union[ - DataFrame, Series, Dict[Any, Any], Sequence[Scalar], Sequence[_ListLike] - ], - ignore_index: _bool = ..., - verify_integrity: _bool = ..., - sort: _bool = ..., - ) -> DataFrame: ... - def join( - self, - other: Union[DataFrame, Series, List[DataFrame]], - on: Optional[Union[_str, List[_str]]] = ..., - how: Union[_str, Literal["left", "right", "outer", "inner"]] = ..., - lsuffix: _str = ..., - rsuffix: _str = ..., - sort: _bool = ..., - ) -> DataFrame: ... - def merge( - self, - right: Union[DataFrame, Series], - how: Union[_str, Literal["left", "right", "inner", "outer"]] = ..., - on: Optional[IndexLevel] = ..., - left_on: Optional[Union[Level, Sequence[Level]]] = ..., - right_on: Optional[Union[Level, Sequence[Level]]] = ..., - left_index: _bool = ..., - right_index: _bool = ..., - sort: _bool = ..., - suffixes: Tuple[Optional[_str], Optional[_str]] = ..., - copy: _bool = ..., - indicator: Union[_bool, _str] = ..., - validate: Optional[_str] = ..., - ) -> DataFrame: ... - def round( - self, decimals: Union[int, Dict, Series] = ..., *args, **kwargs - ) -> DataFrame: ... - def corr( - self, - method: Union[_str, Literal["pearson", "kendall", "spearman"]] = ..., - min_periods: int = ..., - ) -> DataFrame: ... - def cov(self, min_periods: Optional[int] = ..., ddof: int = 1) -> DataFrame: ... - def corrwith( - self, - other: Union[DataFrame, Series], - axis: Optional[AxisType] = ..., - drop: _bool = ..., - method: Union[_str, Literal["pearson", "kendall", "spearman"]] = ..., - ) -> Series: ... - @overload - def count( - self, axis: AxisType = ..., numeric_only: _bool = ..., *, level: Level - ) -> DataFrame: ... - @overload - def count( - self, axis: AxisType = ..., level: None = ..., numeric_only: _bool = ... - ) -> Series: ... - def nunique(self, axis: AxisType = ..., dropna=True) -> Series: ... - def idxmax(self, axis: AxisType = ..., skipna: _bool = ...) -> Series: ... - def idxmin(self, axis: AxisType = ..., skipna: _bool = ...) -> Series: ... - @overload - def mode( - self, - axis: AxisType = ..., - skipna: _bool = ..., - numeric_only: _bool = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def mode( - self, - axis: AxisType = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: _bool = ..., - **kwargs, - ) -> Series: ... - @overload - def quantile( - self, - q: float = ..., - axis: AxisType = ..., - numeric_only: _bool = ..., - interpolation: Union[ - _str, Literal["linear", "lower", "higher", "midpoint", "nearest"] - ] = ..., - ) -> Series: ... - @overload - def quantile( - self, - q: List[float], - axis: AxisType = ..., - numeric_only: _bool = ..., - interpolation: Union[ - _str, Literal["linear", "lower", "higher", "midpoint", "nearest"] - ] = ..., - ) -> DataFrame: ... - def to_timestamp( - self, - freq=..., - how: Union[_str, Literal["start", "end", "s", "e"]] = ..., - axis: AxisType = ..., - copy: _bool = ..., - ) -> DataFrame: ... - def to_period( - self, freq: Optional[_str] = ..., axis: AxisType = ..., copy: _bool = ... - ) -> DataFrame: ... - def isin(self, values: Union[Iterable, Series, DataFrame, Dict]) -> DataFrame: ... - def plot(self, *args, **kwargs) -> PlotAccessor: ... - def hist( - self, - column: Optional[Union[_str, List[_str]]] = ..., - by: Optional[Union[_str, _ListLike]] = ..., - grid: _bool = ..., - xlabelsize: Optional[int] = ..., - xrot: Optional[float] = ..., - ylabelsize: Optional[int] = ..., - yrot: Optional[float] = ..., - ax: Optional[PlotAxes] = ..., - sharex: _bool = ..., - sharey: _bool = ..., - figsize: Optional[Tuple[float, float]] = ..., - layout: Optional[Tuple[int, int]] = ..., - bins: Union[int, List] = ..., - backend: Optional[_str] = ..., - **kwargs, - ): ... - def boxplot( - self, - column: Optional[Union[_str, List[_str]]] = ..., - by: Optional[Union[_str, _ListLike]] = ..., - ax: Optional[PlotAxes] = ..., - fontsize: Optional[Union[float, _str]] = ..., - rot: int = ..., - grid: _bool = ..., - figsize: Optional[Tuple[float, float]] = ..., - layout: Optional[Tuple[int, int]] = ..., - return_type: Optional[Union[_str, Literal["axes", "dict", "both"]]] = ..., - backend: Optional[_str] = ..., - **kwargs, - ): ... - sparse = ... - - # The rest of these are remnants from the - # stubs shipped at preview. They may belong in - # base classes, or stubgen just failed to generate - # these. - - Name: _str - # - # dunder methods - def __exp__( - self, - other: Union[num, _ListLike, DataFrame], - axis: AxisType = ..., - level: Level = ..., - fill_value: Union[None, float] = ..., - ) -> DataFrame: ... - def __iter__(self) -> Iterator: ... - # properties - @property - def at(self): ... # Not sure what to do with this yet; look at source - @property - def bool(self) -> _bool: ... - @property - def columns(self) -> Index: ... - @columns.setter # setter needs to be right next to getter; otherwise mypy complains - def columns(self, cols: Union[List[_str], Index[_str]]) -> None: ... # type:ignore - @property - def dtypes(self) -> Series[Any]: ... - @property - def empty(self) -> _bool: ... - @property - def iat(self): ... # Not sure what to do with this yet; look at source - @property - def iloc(self) -> _iLocIndexerFrame: ... - @property - def index(self) -> Index: ... - @index.setter - def index(self, idx: Index) -> None: ... - @property - def loc(self) -> _LocIndexerFrame: ... - @property - def ndim(self) -> int: ... - @property - def size(self) -> int: ... - @property - def values(self) -> _np.ndarray: ... - # methods - def abs(self) -> DataFrame: ... - def add( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def add_prefix(self, prefix: _str) -> DataFrame: ... - def add_suffix(self, suffix: _str) -> DataFrame: ... - @overload - def all( - self, - axis: AxisType = ..., - bool_only: Optional[_bool] = ..., - skipna: _bool = ..., - level: None = ..., - **kwargs, - ) -> Series: ... - @overload - def all( - self, - axis: AxisType = ..., - bool_only: Optional[_bool] = ..., - skipna: _bool = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def any( - self, - axis: AxisType = ..., - bool_only: Optional[_bool] = ..., - skipna: _bool = ..., - level: None = ..., - **kwargs, - ) -> Series: ... - @overload - def any( - self, - axis: AxisType = ..., - bool_only: _bool = ..., - skipna: _bool = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - def asof( - self, where, subset: Optional[Union[_str, List[_str]]] = ... - ) -> DataFrame: ... - def asfreq( - self, - freq, - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - how: Optional[Union[_str, Literal["start", "end"]]] = ..., - normalize: _bool = ..., - fill_value: Optional[Scalar] = ..., - ) -> DataFrame: ... - def astype( - self, - dtype: Union[_str, Dtype, Dict[_str, Union[_str, Dtype]]], - copy: _bool = ..., - errors: _str = ..., - ) -> DataFrame: ... - def at_time( - self, - time: Union[_str, datetime.time], - asof: _bool = ..., - axis: Optional[AxisType] = ..., - ) -> DataFrame: ... - def between_time( - self, - start_time: Union[_str, datetime.time], - end_time: Union[_str, datetime.time], - include_start: _bool = ..., - include_end: _bool = ..., - axis: Optional[AxisType] = ..., - ) -> DataFrame: ... - @overload - def bfill( - self, - axis: Optional[AxisType] = ..., - *, - inplace: Literal[True], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> None: ... - @overload - def bfill( - self, - axis: Optional[AxisType] = ..., - *, - inplace: Literal[False], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> DataFrame: ... - def clip( - self, - lower: Optional[float] = ..., - upper: Optional[float] = ..., - axis: Optional[AxisType] = ..., - inplace: _bool = ..., - *args, - **kwargs, - ) -> DataFrame: ... - def copy(self, deep: _bool = ...) -> DataFrame: ... - def cummax( - self, axis: Optional[AxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> DataFrame: ... - def cummin( - self, axis: Optional[AxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> DataFrame: ... - def cumprod( - self, axis: Optional[AxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> DataFrame: ... - def cumsum( - self, axis: Optional[AxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> DataFrame: ... - def describe( - self, - percentiles: Optional[List[float]] = ..., - include: Optional[Union[_str, Literal["all"], List[Dtype]]] = ..., - exclude: Optional[List[Dtype]] = ..., - datetime_is_numeric: Optional[_bool] = ..., - ) -> DataFrame: ... - def div( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def divide( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def droplevel( - self, level: Union[Level, List[Level]] = ..., axis: AxisType = ... - ) -> DataFrame: ... - def eq( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - def equals(self, other: Union[Series, DataFrame]) -> _bool: ... - def ewm( - self, - com: Optional[float] = ..., - span: Optional[float] = ..., - halflife: Optional[float] = ..., - alpha: Optional[float] = ..., - min_periods: int = ..., - adjust: _bool = ..., - ignore_na: _bool = ..., - axis: AxisType = ..., - ) -> DataFrame: ... - def exp( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def expanding( - self, min_periods: int = ..., center: _bool = ..., axis: AxisType = ... - ): ... # for now - @overload - def ffill( - self, - axis: Optional[AxisType] = ..., - *, - inplace: Literal[True], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> None: ... - @overload - def ffill( - self, - axis: Optional[AxisType] = ..., - *, - inplace: Literal[False], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> DataFrame: ... - def filter( - self, - items: Optional[List] = ..., - like: Optional[_str] = ..., - regex: Optional[_str] = ..., - axis: Optional[AxisType] = ..., - ) -> DataFrame: ... - def first(self, offset) -> DataFrame: ... - def first_valid_index(self) -> Scalar: ... - def floordiv( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - # def from_dict - # def from_records - def fulldiv( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def ge( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - # def get - def gt( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - def head(self, n: int = ...) -> DataFrame: ... - def infer_objects(self) -> DataFrame: ... - # def info - @overload - def interpolate( - self, - method: _str = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - limit_direction: Union[_str, Literal["forward", "backward", "both"]] = ..., - limit_area: Union[_str, Optional[Literal["inside", "outside"]]] = ..., - downcast: Optional[Union[_str, Literal["infer"]]] = ..., - *, - inplace: Literal[True], - **kwargs, - ) -> None: ... - @overload - def interpolate( - self, - method: _str = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - limit_direction: Union[_str, Literal["forward", "backward", "both"]] = ..., - limit_area: Union[_str, Optional[Literal["inside", "outside"]]] = ..., - downcast: Optional[Union[_str, Literal["infer"]]] = ..., - *, - inplace: Literal[False], - **kwargs, - ) -> DataFrame: ... - @overload - def interpolate( - self, - method: _str = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - limit_direction: Union[_str, Literal["forward", "backward", "both"]] = ..., - limit_area: Union[_str, Optional[Literal["inside", "outside"]]] = ..., - downcast: Optional[Union[_str, Literal["infer"]]] = ..., - ) -> DataFrame: ... - @overload - def interpolate( - self, - method: _str = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - inplace: Optional[_bool] = ..., - limit_direction: Union[_str, Literal["forward", "backward", "both"]] = ..., - limit_area: Optional[Union[_str, Literal["inside", "outside"]]] = ..., - downcast: Optional[Union[_str, Literal["infer"]]] = ..., - **kwargs, - ) -> DataFrame: ... - def keys(self) -> Index: ... - @overload - def kurt( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def kurt( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - @overload - def kurtosis( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def kurtosis( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - def last(self, offset) -> DataFrame: ... - def last_valid_index(self) -> Scalar: ... - def le( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - def lt( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - @overload - def mad( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ) -> Series: ... - @overload - def mad( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - def mask( - self, - cond: Union[Series, DataFrame, _np.ndarray], - other=..., - inplace: _bool = ..., - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - errors: _str = ..., - try_cast: _bool = ..., - ) -> DataFrame: ... - @overload - def max( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def max( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - @overload - def mean( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def mean( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - @overload - def median( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def median( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - @overload - def min( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def min( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - def mod( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def mul( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def multiply( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def ne( - self, other, axis: AxisType = ..., level: Optional[Level] = ... - ) -> DataFrame: ... - def pct_change( - self, - periods: int = ..., - fill_method: _str = ..., - limit: Optional[int] = ..., - freq=..., - **kwargs, - ) -> DataFrame: ... - def pipe( - self, - func: Callable[..., TType] | tuple[Callable[..., TType], str], - *args, - **kwargs, - ) -> TType: ... - def pop(self, item: _str) -> Series: ... - def pow( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - @overload - def prod( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def prod( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> Series: ... - def product( - self, - axis: Optional[AxisType] = ..., - skipna: _bool = ..., - level: Optional[Level] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> DataFrame: ... - def radd( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def rank( - self, - axis: AxisType = ..., - method: Union[_str, Literal["average", "min", "max", "first", "dense"]] = ..., - numeric_only: Optional[_bool] = ..., - na_option: Union[_str, Literal["keep", "top", "bottom"]] = ..., - ascending: _bool = ..., - pct: _bool = ..., - ) -> DataFrame: ... - def rdiv( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def reindex_like( - self, - other: DataFrame, - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill", "nearest"]] - ] = ..., - copy: _bool = ..., - limit: Optional[int] = ..., - tolerance=..., - ) -> DataFrame: ... - @overload - def rename_axis( - self, - mapper=..., - *, - inplace: Literal[True], - axis: Optional[AxisType] = ..., - copy: _bool = ..., - ) -> None: ... - @overload - def rename_axis( - self, - mapper=..., - *, - inplace: Literal[False] = ..., - axis: Optional[AxisType] = ..., - copy: _bool = ..., - ) -> DataFrame: ... - @overload - def rename_axis( - self, - *, - inplace: Literal[True], - index: Optional[ - Union[_str, Sequence[_str], Dict[Union[_str, int], _str], Callable] - ] = ..., - columns: Optional[ - Union[_str, Sequence[_str], Dict[Union[_str, int], _str], Callable] - ] = ..., - copy: _bool = ..., - ) -> None: ... - @overload - def rename_axis( - self, - *, - inplace: Literal[False] = ..., - index: Optional[ - Union[_str, Sequence[_str], Dict[Union[_str, int], _str], Callable] - ] = ..., - columns: Optional[ - Union[_str, Sequence[_str], Dict[Union[_str, int], _str], Callable] - ] = ..., - copy: _bool = ..., - ) -> DataFrame: ... - def resample( - self, - rule, - axis: AxisType = ..., - closed: Optional[_str] = ..., - label: Optional[_str] = ..., - convention: Union[_str, Literal["start", "end", "s", "e"]] = ..., - kind: Union[_str, Optional[Literal["timestamp", "period"]]] = ..., - loffset=..., - base: int = ..., - on: Optional[_str] = ..., - level: Optional[Level] = ..., - origin: Union[ - Timestamp, Literal["epoch", "start", "start_day", "end", "end_day"] - ] = ..., - offset: Optional[Union[Timedelta, _str]] = None, - ) -> Resampler: ... - def rfloordiv( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[Union[float, None]] = ..., - ) -> DataFrame: ... - def rmod( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def rmul( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - @overload - def rolling( - self, - window, - min_periods: Optional[int] = ..., - center: _bool = ..., - *, - win_type: _str, - on: Optional[_str] = ..., - axis: AxisType = ..., - closed: Optional[_str] = ..., - ) -> Window: ... - @overload - def rolling( - self, - window, - min_periods: Optional[int] = ..., - center: _bool = ..., - *, - on: Optional[_str] = ..., - axis: AxisType = ..., - closed: Optional[_str] = ..., - ) -> Rolling: ... - def rpow( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def rsub( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def rtruediv( - self, - other, - axis: AxisType = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - # sample is missing a weights arg - def sample( - self, - n: Optional[int] = ..., - frac: Optional[float] = ..., - replace: _bool = ..., - weights: Optional[Union[_str, _ListLike, np.ndarray]] = ..., - random_state: Optional[int] = ..., - axis: Optional[SeriesAxisType] = ..., - ignore_index: _bool = ..., - ) -> Series[S1]: ... - @overload - def sem( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def sem( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - @overload - def set_axis( - self, labels, inplace: Literal[True], axis: AxisType = ... - ) -> None: ... - @overload - def set_axis( - self, labels, inplace: Literal[False], axis: AxisType = ... - ) -> DataFrame: ... - @overload - def set_axis(self, labels, *, axis: AxisType = ...) -> DataFrame: ... - @overload - def set_axis( - self, - labels, - axis: AxisType = ..., - inplace: Optional[_bool] = ..., - ) -> Union[None, DataFrame]: ... - @overload - def skew( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def skew( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - def slice_shift(self, periods: int = ..., axis: AxisType = ...) -> DataFrame: ... - def squeeze(self, axis: Optional[AxisType] = ...): ... - @overload - def std( - self, - axis: AxisType = ..., - skipna: _bool = ..., - ddof: int = ..., - numeric_only: _bool = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def std( - self, - axis: AxisType = ..., - skipna: _bool = ..., - level: None = ..., - ddof: int = ..., - numeric_only: _bool = ..., - **kwargs, - ) -> Series: ... - def sub( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def subtract( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - @overload - def sum( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def sum( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> Series: ... - def swapaxes( - self, axis1: AxisType, axis2: AxisType, copy: _bool = ... - ) -> DataFrame: ... - def tail(self, n: int = ...) -> DataFrame: ... - def take( - self, - indices: List, - axis: AxisType = ..., - is_copy: Optional[_bool] = ..., - **kwargs, - ) -> DataFrame: ... - def tshift( - self, periods: int = ..., freq=..., axis: AxisType = ... - ) -> DataFrame: ... - def to_clipboard( - self, excel: _bool = ..., sep: Optional[_str] = ..., **kwargs - ) -> None: ... - @overload - def to_csv( - self, - path_or_buf: Optional[FilePathOrBuffer], - sep: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Sequence[Hashable]] = ..., - header: Union[_bool, List[_str]] = ..., - index: _bool = ..., - index_label: Optional[Union[_bool, _str, Sequence[Hashable]]] = ..., - mode: _str = ..., - encoding: Optional[_str] = ..., - compression: Union[_str, Mapping[_str, _str]] = ..., - quoting: Optional[int] = ..., - quotechar: _str = ..., - line_terminator: Optional[_str] = ..., - chunksize: Optional[int] = ..., - date_format: Optional[_str] = ..., - doublequote: _bool = ..., - escapechar: Optional[_str] = ..., - decimal: _str = ..., - errors: _str = ..., - storage_options: Optional[Dict[_str, Any]] = ..., - ) -> None: ... - @overload - def to_csv( - self, - sep: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Sequence[Hashable]] = ..., - header: Union[_bool, List[_str]] = ..., - index: _bool = ..., - index_label: Optional[Union[_bool, _str, Sequence[Hashable]]] = ..., - mode: _str = ..., - encoding: Optional[_str] = ..., - compression: Union[_str, Mapping[_str, _str]] = ..., - quoting: Optional[int] = ..., - quotechar: _str = ..., - line_terminator: Optional[_str] = ..., - chunksize: Optional[int] = ..., - date_format: Optional[_str] = ..., - doublequote: _bool = ..., - escapechar: Optional[_str] = ..., - decimal: _str = ..., - errors: _str = ..., - storage_options: Optional[Dict[_str, Any]] = ..., - ) -> _str: ... - def to_excel( - self, - excel_writer, - sheet_name: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Union[_str, Sequence[_str]]] = ..., - header: _bool = ..., - index: _bool = ..., - index_label: Optional[Union[_str, Sequence[_str]]] = ..., - startrow: int = ..., - startcol: int = ..., - engine: Optional[_str] = ..., - merge_cells: _bool = ..., - encoding: Optional[_str] = ..., - inf_rep: _str = ..., - verbose: _bool = ..., - freeze_panes: Optional[Tuple[int, int]] = ..., - ) -> None: ... - def to_hdf( - self, - path_or_buf: FilePathOrBuffer, - key: _str, - mode: _str = ..., - complevel: Optional[int] = ..., - complib: Optional[_str] = ..., - append: _bool = ..., - format: Optional[_str] = ..., - index: _bool = ..., - min_itemsize: Optional[Union[int, Dict[_str, int]]] = ..., - nan_rep=..., - dropna: Optional[_bool] = ..., - data_columns: Optional[List[_str]] = ..., - errors: _str = ..., - encoding: _str = ..., - ) -> None: ... - @overload - def to_json( - self, - path_or_buf: Optional[FilePathOrBuffer], - orient: Optional[ - Union[ - _str, Literal["split", "records", "index", "columns", "values", "table"] - ] - ] = ..., - date_format: Optional[Union[_str, Literal["epoch", "iso"]]] = ..., - double_precision: int = ..., - force_ascii: _bool = ..., - date_unit: Union[_str, Literal["s", "ms", "us", "ns"]] = ..., - default_handler: Optional[ - Callable[[Any], Union[_str, int, float, _bool, List, Dict]] - ] = ..., - lines: _bool = ..., - compression: Union[ - _str, None, Literal["infer", "gzip", "bz2", "zip", "xz"] - ] = ..., - index: _bool = ..., - indent: Optional[int] = ..., - ) -> None: ... - @overload - def to_json( - self, - orient: Optional[ - Union[ - _str, Literal["split", "records", "index", "columns", "values", "table"] - ] - ] = ..., - date_format: Optional[Union[_str, Literal["epoch", "iso"]]] = ..., - double_precision: int = ..., - force_ascii: _bool = ..., - date_unit: Union[_str, Literal["s", "ms", "us", "ns"]] = ..., - default_handler: Optional[ - Callable[[Any], Union[_str, int, float, _bool, List, Dict]] - ] = ..., - lines: _bool = ..., - compression: Union[ - _str, None, Literal["infer", "gzip", "bz2", "zip", "xz"] - ] = ..., - index: _bool = ..., - indent: Optional[int] = ..., - ) -> _str: ... - @overload - def to_latex( - self, - buf: Optional[FilePathOrBuffer], - columns: Optional[List[_str]] = ..., - col_space: Optional[int] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - bold_rows: _bool = ..., - column_format: Optional[_str] = ..., - longtable: Optional[_bool] = ..., - escape: Optional[_bool] = ..., - encoding: Optional[_str] = ..., - decimal: _str = ..., - multicolumn: Optional[_bool] = ..., - multicolumn_format: Optional[_str] = ..., - multirow: Optional[_bool] = ..., - caption: Optional[Union[_str, Tuple[_str, _str]]] = ..., - label: Optional[_str] = ..., - position: Optional[str] = ..., - ) -> None: ... - @overload - def to_latex( - self, - columns: Optional[List[_str]] = ..., - col_space: Optional[int] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - bold_rows: _bool = ..., - column_format: Optional[_str] = ..., - longtable: Optional[_bool] = ..., - escape: Optional[_bool] = ..., - encoding: Optional[_str] = ..., - decimal: _str = ..., - multicolumn: Optional[_bool] = ..., - multicolumn_format: Optional[_str] = ..., - multirow: Optional[_bool] = ..., - caption: Optional[Union[_str, Tuple[_str, _str]]] = ..., - label: Optional[_str] = ..., - position: Optional[str] = ..., - ) -> _str: ... - def to_pickle( - self, - path: _str, - compression: Union[_str, Literal["infer", "gzip", "bz2", "zip", "xz"]] = ..., - protocol: int = ..., - ) -> None: ... - def to_sql( - self, - name: _str, - con, - schema: Optional[_str] = ..., - if_exists: _str = ..., - index: _bool = ..., - index_label: Optional[Union[_str, Sequence[_str]]] = ..., - chunksize: Optional[int] = ..., - dtype: Optional[Union[Dict, Scalar]] = ..., - method: Optional[Union[_str, Callable]] = ..., - ) -> None: ... - @overload - def to_string( - self, - buf: Optional[FilePathOrBuffer], - columns: Optional[Sequence[_str]] = ..., - col_space: Optional[Union[int, List[int], Dict[Union[_str, int], int]]] = ..., - header: Union[_bool, Sequence[_str]] = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - min_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - line_width: Optional[int] = ..., - max_colwidth: Optional[int] = ..., - encoding: Optional[_str] = ..., - ) -> None: ... - @overload - def to_string( - self, - columns: Optional[Sequence[_str]] = ..., - col_space: Optional[Union[int, List[int], Dict[Union[_str, int], int]]] = ..., - header: Union[_bool, Sequence[_str]] = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - min_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - line_width: Optional[int] = ..., - max_colwidth: Optional[int] = ..., - encoding: Optional[_str] = ..., - ) -> _str: ... - def to_xarray(self): ... - def truediv( - self, - other: Union[num, _ListLike, DataFrame], - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - ) -> DataFrame: ... - def truncate( - self, - before: Optional[Union[datetime.date, _str, int]] = ..., - after: Optional[Union[datetime.date, _str, int]] = ..., - axis: Optional[AxisType] = ..., - copy: _bool = ..., - ) -> DataFrame: ... - # def tshift - def tz_convert( - self, - tz, - axis: AxisType = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - ) -> DataFrame: ... - def tz_localize( - self, - tz, - axis: AxisType = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - ambiguous=..., - nonexistent: _str = ..., - ) -> DataFrame: ... - def unique(self) -> DataFrame: ... - @overload - def var( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def var( - self, - axis: Optional[AxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series: ... - def where( - self, - cond: Union[Series, DataFrame, _np.ndarray], - other=..., - inplace: _bool = ..., - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - errors: _str = ..., - try_cast: _bool = ..., - ) -> DataFrame: ... - # Move from generic because Series is Generic and it returns Series[bool] there - def __invert__(self) -> DataFrame: ... diff --git a/typings/pandas/core/generic.pyi b/typings/pandas/core/generic.pyi deleted file mode 100644 index 818ae35..0000000 --- a/typings/pandas/core/generic.pyi +++ /dev/null @@ -1,513 +0,0 @@ -import numpy as np -import sys -import pandas.core.indexing as indexing -from pandas._typing import ( - ArrayLike as ArrayLike, - Axis as Axis, - AxisType as AxisType, - Dtype as Dtype, - FilePathOrBuffer as FilePathOrBuffer, - FrameOrSeriesUnion as FrameOrSeriesUnion, - IgnoreRaise as IgnoreRaise, - JSONSerializable as JSONSerializable, - Level as Level, - Renamer as Renamer, - ListLike as ListLike, - Scalar as Scalar, - SeriesAxisType as SeriesAxisType, - FrameOrSeries as FrameOrSeries, - S1 as S1, - Timestamp as Timestamp, - Timedelta as Timedelta, - T, -) -from pandas.core.base import PandasObject as PandasObject -from pandas.core.indexes.base import Index as Index -from pandas.core.internals import BlockManager as BlockManager -from pandas.core.resample import Resampler -from typing import ( - Any, - Callable, - Dict, - Hashable, - Iterator, - List, - Mapping, - Optional, - Sequence, - Tuple, - TypeVar, - Union, - overload, -) - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -_bool = bool -_str = str - -class NDFrame(PandasObject, indexing.IndexingMixin): - def __new__( - cls, - data: BlockManager, - axes: Optional[List[Index]] = ..., - copy: _bool = ..., - dtype: Optional[Dtype] = ..., - attrs: Optional[Mapping[Optional[Hashable], Any]] = ..., - fastpath: _bool = ..., - ) -> NDFrame: ... - def set_flags( - self: FrameOrSeries, - *, - copy: bool = ..., - allows_duplicate_labels: Optional[bool] = ..., - ) -> FrameOrSeries: ... - @property - def attrs(self) -> Dict[Optional[Hashable], Any]: ... - @attrs.setter - def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None: ... - @property - def shape(self) -> Tuple[int, ...]: ... - @property - def axes(self) -> List[Index]: ... - @property - def ndim(self) -> int: ... - @property - def size(self) -> int: ... - def swapaxes( - self, axis1: SeriesAxisType, axis2: SeriesAxisType, copy: _bool = ... - ) -> NDFrame: ... - def droplevel(self, level: Level, axis: SeriesAxisType = ...) -> NDFrame: ... - def pop(self, item: _str) -> NDFrame: ... - def squeeze(self, axis=...): ... - def swaplevel(self, i=..., j=..., axis=...) -> NDFrame: ... - def equals(self, other: Series[S1]) -> _bool: ... - def __neg__(self) -> None: ... - def __pos__(self) -> None: ... - def __nonzero__(self) -> None: ... - def bool(self) -> _bool: ... - def __abs__(self) -> NDFrame: ... - def __round__(self, decimals: int = ...) -> NDFrame: ... - def __hash__(self): ... - def __iter__(self) -> Iterator: ... - def keys(self): ... - def iteritems(self): ... - def __len__(self) -> int: ... - def __contains__(self, key) -> _bool: ... - @property - def empty(self) -> _bool: ... - __array_priority__: int = ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __array_wrap__(self, result, context=...): ... - def to_excel( - self, - excel_writer, - sheet_name: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Union[_str, Sequence[_str]]] = ..., - header: _bool = ..., - index: _bool = ..., - index_label: Optional[Union[_str, Sequence[_str]]] = ..., - startrow: int = ..., - startcol: int = ..., - engine: Optional[_str] = ..., - merge_cells: _bool = ..., - encoding: Optional[_str] = ..., - inf_rep: _str = ..., - verbose: _bool = ..., - freeze_panes: Optional[Tuple[int, int]] = ..., - ) -> None: ... - @overload - def to_json( - self, - path_or_buf: Optional[FilePathOrBuffer], - orient: Optional[ - Union[ - _str, Literal["split", "records", "index", "columns", "values", "table"] - ] - ] = ..., - date_format: Optional[Union[_str, Literal["epoch", "iso"]]] = ..., - double_precision: int = ..., - force_ascii: _bool = ..., - date_unit: Union[_str, Literal["s", "ms", "us", "ns"]] = ..., - default_handler: Optional[ - Callable[[Any], Union[_str, int, float, _bool, List, Dict]] - ] = ..., - lines: _bool = ..., - compression: Union[_str, Literal["infer", "gzip", "bz2", "zip", "xz"]] = ..., - index: _bool = ..., - indent: Optional[int] = ..., - ) -> None: ... - @overload - def to_json( - self, - orient: Optional[ - Union[ - _str, Literal["split", "records", "index", "columns", "values", "table"] - ] - ] = ..., - date_format: Optional[Union[_str, Literal["epoch", "iso"]]] = ..., - double_precision: int = ..., - force_ascii: _bool = ..., - date_unit: Union[_str, Literal["s", "ms", "us", "ns"]] = ..., - default_handler: Optional[ - Callable[[Any], Union[_str, int, float, _bool, List, Dict]] - ] = ..., - lines: _bool = ..., - compression: Optional[ - Union[_str, Literal["infer", "gzip", "bz2", "zip", "xz"]] - ] = ..., - index: _bool = ..., - indent: Optional[int] = ..., - ) -> _str: ... - def to_hdf( - self, - path_or_buf: FilePathOrBuffer, - key: _str, - mode: _str = ..., - complevel: Optional[int] = ..., - complib: Optional[_str] = ..., - append: _bool = ..., - format: Optional[_str] = ..., - index: _bool = ..., - min_itemsize: Optional[Union[int, Dict[_str, int]]] = ..., - nan_rep=..., - dropna: Optional[_bool] = ..., - data_columns: Optional[List[_str]] = ..., - errors: _str = ..., - encoding: _str = ..., - ) -> None: ... - def to_sql( - self, - name: _str, - con, - schema: Optional[_str] = ..., - if_exists: _str = ..., - index: _bool = ..., - index_label: Optional[Union[_str, Sequence[_str]]] = ..., - chunksize: Optional[int] = ..., - dtype: Optional[Union[Dict, Scalar]] = ..., - method: Optional[Union[_str, Callable]] = ..., - ) -> None: ... - def to_pickle( - self, - path: _str, - compression: Union[_str, Literal["infer", "gzip", "bz2", "zip", "xz"]] = ..., - protocol: int = ..., - ) -> None: ... - def to_clipboard( - self, excel: _bool = ..., sep: Optional[_str] = ..., **kwargs - ) -> None: ... - def to_xarray(self): ... - @overload - def to_latex( - self, - buf: Optional[FilePathOrBuffer], - columns: Optional[List[_str]] = ..., - col_space: Optional[int] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - bold_rows: _bool = ..., - column_format: Optional[_str] = ..., - longtable: Optional[_bool] = ..., - escape: Optional[_bool] = ..., - encoding: Optional[_str] = ..., - decimal: _str = ..., - multicolumn: Optional[_bool] = ..., - multicolumn_format: Optional[_str] = ..., - multirow: Optional[_bool] = ..., - caption: Optional[Union[_str, Tuple[_str, _str]]] = ..., - label: Optional[_str] = ..., - position: Optional[_str] = ..., - ) -> None: ... - @overload - def to_latex( - self, - columns: Optional[List[_str]] = ..., - col_space: Optional[int] = ..., - header: _bool = ..., - index: _bool = ..., - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - bold_rows: _bool = ..., - column_format: Optional[_str] = ..., - longtable: Optional[_bool] = ..., - escape: Optional[_bool] = ..., - encoding: Optional[_str] = ..., - decimal: _str = ..., - multicolumn: Optional[_bool] = ..., - multicolumn_format: Optional[_str] = ..., - multirow: Optional[_bool] = ..., - caption: Optional[Union[_str, Tuple[_str, _str]]] = ..., - label: Optional[_str] = ..., - position: Optional[_str] = ..., - ) -> _str: ... - @overload - def to_csv( - self, - path_or_buf: Optional[FilePathOrBuffer], - sep: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Sequence[Hashable]] = ..., - header: Union[_bool, List[_str]] = ..., - index: _bool = ..., - index_label: Optional[Union[_bool, _str, Sequence[Hashable]]] = ..., - mode: _str = ..., - encoding: Optional[_str] = ..., - compression: Union[_str, Mapping[_str, _str]] = ..., - quoting: Optional[int] = ..., - quotechar: _str = ..., - line_terminator: Optional[_str] = ..., - chunksize: Optional[int] = ..., - date_format: Optional[_str] = ..., - doublequote: _bool = ..., - escapechar: Optional[_str] = ..., - decimal: _str = ..., - errors: _str = ..., - storage_options: Optional[Dict[_str, Any]] = ..., - ) -> None: ... - @overload - def to_csv( - self, - sep: _str = ..., - na_rep: _str = ..., - float_format: Optional[_str] = ..., - columns: Optional[Sequence[Hashable]] = ..., - header: Union[_bool, List[_str]] = ..., - index: _bool = ..., - index_label: Optional[Union[_bool, _str, Sequence[Hashable]]] = ..., - mode: _str = ..., - encoding: Optional[_str] = ..., - compression: Union[_str, Mapping[_str, _str]] = ..., - quoting: Optional[int] = ..., - quotechar: _str = ..., - line_terminator: Optional[_str] = ..., - chunksize: Optional[int] = ..., - date_format: Optional[_str] = ..., - doublequote: _bool = ..., - escapechar: Optional[_str] = ..., - decimal: _str = ..., - errors: _str = ..., - storage_options: Optional[Dict[_str, Any]] = ..., - ) -> _str: ... - def take( - self, indices, axis=..., is_copy: Optional[_bool] = ..., **kwargs - ) -> NDFrame: ... - def xs( - self, - key: Union[_str, Tuple[_str]], - axis: SeriesAxisType = ..., - level: Optional[Level] = ..., - drop_level: _bool = ..., - ) -> FrameOrSeriesUnion: ... - def __delitem__(self, idx: Hashable): ... - def get(self, key: object, default: Optional[Dtype] = ...) -> Dtype: ... - def reindex_like( - self, - other, - method: Optional[_str] = ..., - copy: _bool = ..., - limit=..., - tolerance=..., - ) -> NDFrame: ... - @overload - def drop( - self, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Level | None = ..., - inplace: Literal[True], - errors: IgnoreRaise = ..., - ) -> None: ... - @overload - def drop( - self: NDFrame, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Level | None = ..., - inplace: Literal[False] = ..., - errors: IgnoreRaise = ..., - ) -> NDFrame: ... - @overload - def drop( - self: NDFrame, - labels: Hashable | list[Hashable] = ..., - *, - axis: Axis = ..., - index: Hashable | list[Hashable] = ..., - columns: Hashable | list[Hashable] = ..., - level: Level | None = ..., - inplace: _bool = ..., - errors: IgnoreRaise = ..., - ) -> NDFrame | None: ... - def add_prefix(self, prefix: _str) -> NDFrame: ... - def add_suffix(self, suffix: _str) -> NDFrame: ... - def sort_index( - self, - axis=..., - level=..., - ascending: _bool = ..., - inplace: _bool = ..., - kind: _str = ..., - na_position: _str = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - ): ... - def filter( - self, - items=..., - like: Optional[_str] = ..., - regex: Optional[_str] = ..., - axis=..., - ) -> NDFrame: ... - def head(self: FrameOrSeries, n: int = ...) -> FrameOrSeries: ... - def tail(self: FrameOrSeries, n: int = ...) -> FrameOrSeries: ... - def pipe( - self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs - ) -> T: ... - def __finalize__(self, other, method=..., **kwargs) -> NDFrame: ... - def __getattr__(self, name: _str): ... - def __setattr__(self, name: _str, value) -> None: ... - @property - def values(self) -> ArrayLike: ... - @property - def dtypes(self): ... - def astype( - self: FrameOrSeries, dtype, copy: _bool = ..., errors: str = ... - ) -> FrameOrSeries: ... - def copy(self: FrameOrSeries, deep: _bool = ...) -> FrameOrSeries: ... - def __copy__(self, deep: _bool = ...) -> NDFrame: ... - def __deepcopy__(self, memo=...) -> NDFrame: ... - def infer_objects(self) -> NDFrame: ... - def convert_dtypes( - self: FrameOrSeries, - infer_objects: _bool = ..., - convert_string: _bool = ..., - convert_integer: _bool = ..., - convert_boolean: _bool = ..., - ) -> FrameOrSeries: ... - def fillna( - self, - value=..., - method=..., - axis=..., - inplace: _bool = ..., - limit=..., - downcast=..., - ) -> Optional[NDFrame]: ... - def replace( - self, - to_replace=..., - value=..., - inplace: _bool = ..., - limit=..., - regex: _bool = ..., - method: _str = ..., - ): ... - def asof(self, where, subset=...): ... - def isna(self) -> NDFrame: ... - def isnull(self) -> NDFrame: ... - def notna(self) -> NDFrame: ... - def notnull(self) -> NDFrame: ... - def clip( - self, lower=..., upper=..., axis=..., inplace: _bool = ..., *args, **kwargs - ) -> NDFrame: ... - def asfreq( - self, - freq, - method=..., - how: Optional[_str] = ..., - normalize: _bool = ..., - fill_value=..., - ) -> NDFrame: ... - def at_time(self, time, asof: _bool = ..., axis=...) -> NDFrame: ... - def between_time( - self, - start_time, - end_time, - include_start: _bool = ..., - include_end: _bool = ..., - axis=..., - ) -> NDFrame: ... - def first(self, offset) -> NDFrame: ... - def last(self, offset) -> NDFrame: ... - def rank( - self, - axis=..., - method: _str = ..., - numeric_only: Optional[_bool] = ..., - na_option: _str = ..., - ascending: _bool = ..., - pct: _bool = ..., - ) -> NDFrame: ... - def where( - self, - cond, - other=..., - inplace: _bool = ..., - axis=..., - level=..., - errors: _str = ..., - try_cast: _bool = ..., - ): ... - def mask( - self, - cond, - other=..., - inplace: _bool = ..., - axis=..., - level=..., - errors: _str = ..., - try_cast: _bool = ..., - ): ... - def shift(self, periods=..., freq=..., axis=..., fill_value=...) -> NDFrame: ... - def slice_shift(self, periods: int = ..., axis=...) -> NDFrame: ... - def tshift(self, periods: int = ..., freq=..., axis=...) -> NDFrame: ... - def truncate( - self, before=..., after=..., axis=..., copy: _bool = ... - ) -> NDFrame: ... - def tz_convert(self, tz, axis=..., level=..., copy: _bool = ...) -> NDFrame: ... - def tz_localize( - self, - tz, - axis=..., - level=..., - copy: _bool = ..., - ambiguous=..., - nonexistent: str = ..., - ) -> NDFrame: ... - def abs(self) -> NDFrame: ... - def describe( - self, - percentiles=..., - include=..., - exclude=..., - datetime_is_numeric: Optional[_bool] = ..., - ) -> NDFrame: ... - def pct_change( - self, periods=..., fill_method=..., limit=..., freq=..., **kwargs - ) -> NDFrame: ... - def transform(self, func, *args, **kwargs): ... - def first_valid_index(self): ... - def last_valid_index(self): ... - -from pandas.core.series import Series as Series diff --git a/typings/pandas/core/groupby/__init__.pyi b/typings/pandas/core/groupby/__init__.pyi deleted file mode 100644 index 178542b..0000000 --- a/typings/pandas/core/groupby/__init__.pyi +++ /dev/null @@ -1,2 +0,0 @@ -from pandas.core.groupby.generic import NamedAgg as NamedAgg -from pandas.core.groupby.grouper import Grouper as Grouper diff --git a/typings/pandas/core/groupby/categorical.pyi b/typings/pandas/core/groupby/categorical.pyi deleted file mode 100644 index fd76c4d..0000000 --- a/typings/pandas/core/groupby/categorical.pyi +++ /dev/null @@ -1,6 +0,0 @@ -from pandas.core.arrays.categorical import ( - Categorical as Categorical, -) # , CategoricalDtype as CategoricalDtype - -def recode_for_groupby(c: Categorical, sort: bool, observed: bool): ... -def recode_from_groupby(c: Categorical, sort: bool, ci): ... diff --git a/typings/pandas/core/groupby/generic.pyi b/typings/pandas/core/groupby/generic.pyi deleted file mode 100644 index d4756da..0000000 --- a/typings/pandas/core/groupby/generic.pyi +++ /dev/null @@ -1,295 +0,0 @@ -from matplotlib.axes import Axes as PlotAxes, SubplotBase as AxesSubplot -import numpy as np -import sys -from pandas._typing import ( - FrameOrSeries as FrameOrSeries, - AxisType, - Dtype, - Level, - F, - AggFuncType, - S1, -) -from pandas.core.frame import DataFrame as DataFrame -from pandas.core.groupby.groupby import ( - GroupBy as GroupBy, -) # , get_groupby as get_groupby -from pandas.core.groupby.grouper import Grouper as Grouper -from pandas.core.series import Series as Series -from typing import ( - Any, - Callable, - Dict, - FrozenSet, - List, - NamedTuple, - Optional, - Sequence, - Tuple, - Type, - Union, - overload, -) - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -AggScalar = Union[str, Callable[..., Any]] -ScalarResult = ... - -class NamedAgg(NamedTuple): - column: str = ... - aggfunc: AggScalar = ... - -def generate_property(name: str, klass: Type[FrameOrSeries]): ... -def pin_whitelisted_properties( - klass: Type[FrameOrSeries], whitelist: FrozenSet[str] -): ... - -class SeriesGroupBy(GroupBy): - def any(self, skipna: bool = ...) -> Series[bool]: ... - def all(self, skipna: bool = ...) -> Series[bool]: ... - def apply(self, func, *args, **kwargs) -> Series: ... - @overload - def aggregate(self, func: Union[List, Dict], *args, **kwargs) -> DataFrame: ... - @overload - def aggregate(self, func: Union[str, Callable], *args, **kwargs) -> Series: ... - agg = aggregate - def transform(self, func, *args, **kwargs): ... - def filter(self, func, dropna: bool = ..., *args, **kwargs): ... - def nunique(self, dropna: bool = ...) -> Series: ... - def describe(self, **kwargs) -> DataFrame: ... - def value_counts( - self, - normalize: bool = ..., - sort: bool = ..., - ascending: bool = ..., - bins=..., - dropna: bool = ..., - ) -> DataFrame: ... - def count(self) -> Series[int]: ... - def pct_change( - self, - periods: int = ..., - fill_method: str = ..., - limit=..., - freq=..., - axis: AxisType = ..., - ) -> Series[float]: ... - # Overrides and others from original pylance stubs - @property - def is_monotonic_increasing(self) -> bool: ... - @property - def is_monotonic_decreasing(self) -> bool: ... - def bfill(self, limit: Optional[int] = ...) -> Series[S1]: ... - def cummax(self, axis: AxisType = ..., **kwargs) -> Series[S1]: ... - def cummin(self, axis: AxisType = ..., **kwargs) -> Series[S1]: ... - def cumprod(self, axis: AxisType = ..., **kwargs) -> Series[S1]: ... - def cumsum(self, axis: AxisType = ..., **kwargs) -> Series[S1]: ... - def ffill(self, limit: Optional[int] = ...) -> Series[S1]: ... - def first(self, **kwargs) -> Series[S1]: ... - def head(self, n: int = ...) -> Series[S1]: ... - def last(self, **kwargs) -> Series[S1]: ... - def max(self, **kwargs) -> Series[S1]: ... - def mean(self, **kwargs) -> Series[S1]: ... - def median(self, **kwargs) -> Series[S1]: ... - def min(self, **kwargs) -> Series[S1]: ... - def nlargest(self, n: int = ..., keep: str = ...) -> Series[S1]: ... - def nsmallest(self, n: int = ..., keep: str = ...) -> Series[S1]: ... - def nth( - self, n: Union[int, Sequence[int]], dropna: Optional[str] = ... - ) -> Series[S1]: ... - -class DataFrameGroupBy(GroupBy): - def any(self, skipna: bool = ...) -> DataFrame: ... - def all(self, skipna: bool = ...) -> DataFrame: ... - def apply(self, func, *args, **kwargs) -> DataFrame: ... - @overload - def aggregate(self, arg: str, *args, **kwargs) -> DataFrame: ... - @overload - def aggregate(self, arg: Dict, *args, **kwargs) -> DataFrame: ... - @overload - def aggregate(self, arg: Callable[[], Any], *args, **kwargs) -> DataFrame: ... - @overload - def agg(self, arg: str, *args, **kwargs) -> DataFrame: ... - @overload - def agg(self, arg: Dict, *args, **kwargs) -> DataFrame: ... - @overload - def agg(self, arg: F, *args, **kwargs) -> DataFrame: ... - def transform(self, func, *args, **kwargs): ... - def filter( - self, func: Callable, dropna: bool = ..., *args, **kwargs - ) -> DataFrame: ... - def nunique(self, dropna: bool = ...) -> DataFrame: ... - @overload - def __getitem__(self, item: str) -> SeriesGroupBy: ... - @overload - def __getitem__(self, item: List[str]) -> DataFrameGroupBy: ... - def count(self) -> DataFrame: ... - def boxplot( - self, - grouped: DataFrame, - subplots: bool = ..., - column: Optional[Union[str, Sequence]] = ..., - fontsize: Union[int, str] = ..., - rot: float = ..., - grid: bool = ..., - ax: Optional[PlotAxes] = ..., - figsize: Optional[Tuple[float, float]] = ..., - layout: Optional[Tuple[int, int]] = ..., - sharex: bool = ..., - sharey: bool = ..., - bins: Union[int, Sequence] = ..., - backend: Optional[str] = ..., - **kwargs, - ) -> Union[AxesSubplot, Sequence[AxesSubplot]]: ... - # Overrides and others from original pylance stubs - ## These are "properties" but properties can't have all these arguments?! - def corr( - self, method: Union[str, Callable], min_periods: int = ... - ) -> DataFrame: ... - def cov(self, min_periods: int = ...) -> DataFrame: ... - def diff(self, periods: int = ..., axis: AxisType = ...) -> DataFrame: ... - def bfill(self, limit: Optional[int] = ...) -> DataFrame: ... - def corrwith( - self, - other: DataFrame, - axis: AxisType = ..., - drop: bool = ..., - method: str = ..., - ) -> Series: ... - def cummax(self, axis: AxisType = ..., **kwargs) -> DataFrame: ... - def cummin(self, axis: AxisType = ..., **kwargs) -> DataFrame: ... - def cumprod(self, axis: AxisType = ..., **kwargs) -> DataFrame: ... - def cumsum(self, axis: AxisType = ..., **kwargs) -> DataFrame: ... - def describe(self, **kwargs) -> DataFrame: ... - def ffill(self, limit: Optional[int] = ...) -> DataFrame: ... - @overload - def fillna( - self, - value, - method: Optional[str] = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def fillna( - self, - value, - method: Optional[str] = ..., - axis: AxisType = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - *, - inplace: Literal[False], - ) -> DataFrame: ... - @overload - def fillna( - self, - value, - method: Optional[str] = ..., - axis: AxisType = ..., - inplace: bool = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Union[None, DataFrame]: ... - def first(self, **kwargs) -> DataFrame: ... - def head(self, n: int = ...) -> DataFrame: ... - def hist( - self, - data: DataFrame, - column: Optional[Union[str, Sequence]] = ..., - by=..., - grid: bool = ..., - xlabelsize: Optional[int] = ..., - xrot: Optional[float] = ..., - ylabelsize: Optional[int] = ..., - yrot: Optional[float] = ..., - ax: Optional[PlotAxes] = ..., - sharex: bool = ..., - sharey: bool = ..., - figsize: Optional[Tuple[float, float]] = ..., - layout: Optional[Tuple[int, int]] = ..., - bins: Union[int, Sequence] = ..., - backend: Optional[str] = ..., - **kwargs, - ) -> Union[AxesSubplot, Sequence[AxesSubplot]]: ... - def idxmax(self, axis: AxisType = ..., skipna: bool = ...) -> Series: ... - def idxmin(self, axis: AxisType = ..., skipna: bool = ...) -> Series: ... - def last(self, **kwargs) -> DataFrame: ... - @overload - def mad( - self, - axis: AxisType = ..., - skipna: bool = ..., - numeric_only: Optional[bool] = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def mad( - self, - axis: AxisType = ..., - skipna: bool = ..., - level: None = ..., - numeric_only: Optional[bool] = ..., - **kwargs, - ) -> Series: ... - def max(self, **kwargs) -> DataFrame: ... - def mean(self, **kwargs) -> DataFrame: ... - def median(self, **kwargs) -> DataFrame: ... - def min(self, **kwargs) -> DataFrame: ... - def nth( - self, n: Union[int, Sequence[int]], dropna: Optional[str] = ... - ) -> DataFrame: ... - def pct_change( - self, - periods: int = ..., - fill_method: str = ..., - limit=..., - freq=..., - axis: AxisType = ..., - ) -> DataFrame: ... - def prod(self, **kwargs) -> DataFrame: ... - def quantile(self, q: float = ..., interpolation: str = ...) -> DataFrame: ... - def resample(self, rule, *args, **kwargs) -> Grouper: ... - def sem(self, ddof: int = ...) -> DataFrame: ... - def shift( - self, - periods: int = ..., - freq: str = ..., - axis: AxisType = ..., - fill_value=..., - ) -> DataFrame: ... - def size(self) -> Series[int]: ... - @overload - def skew( - self, - axis: AxisType = ..., - skipna: bool = ..., - numeric_only: bool = ..., - *, - level: Level, - **kwargs, - ) -> DataFrame: ... - @overload - def skew( - self, - axis: AxisType = ..., - skipna: bool = ..., - level: None = ..., - numeric_only: bool = ..., - **kwargs, - ) -> Series: ... - def std(self, ddof: int = ...) -> DataFrame: ... - def sum(self, **kwargs) -> DataFrame: ... - def tail(self, n: int = ...) -> DataFrame: ... - def take(self, indices: Sequence, axis: AxisType = ..., **kwargs) -> DataFrame: ... - def tshift(self, periods: int, freq=..., axis: AxisType = ...) -> DataFrame: ... - def var(self, ddof: int = ...) -> DataFrame: ... diff --git a/typings/pandas/core/groupby/groupby.pyi b/typings/pandas/core/groupby/groupby.pyi deleted file mode 100644 index e65c80d..0000000 --- a/typings/pandas/core/groupby/groupby.pyi +++ /dev/null @@ -1,138 +0,0 @@ -from pandas._typing import ( - FrameOrSeries as FrameOrSeries, - FrameOrSeriesUnion as FrameOrSeriesUnion, - Scalar as Scalar, - AxisType as AxisType, - KeysArgType, -) -from pandas.core.base import ( - PandasObject as PandasObject, - SelectionMixin as SelectionMixin, -) -from pandas.core.frame import DataFrame as DataFrame -from pandas.core.generic import NDFrame as NDFrame - -from pandas.core.groupby import ops as ops -from pandas.core.indexes.api import Index as Index -from pandas.core.series import Series as Series -from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union - -class GroupByPlot(PandasObject): - def __init__(self, groupby) -> None: ... - def __call__(self, *args, **kwargs): ... - def __getattr__(self, name: str): ... - -class _GroupBy(PandasObject): - level = ... - as_index = ... - keys = ... - sort = ... - group_keys = ... - squeeze = ... - observed = ... - mutated = ... - obj = ... - axis = ... - grouper = ... - exclusions = ... - def __init__( - self, - obj: NDFrame, - keys: Optional[KeysArgType] = ..., - axis: int = ..., - level=..., - grouper: Optional[ops.BaseGrouper] = ..., - exclusions=..., - selection=..., - as_index: bool = ..., - sort: bool = ..., - group_keys: bool = ..., - squeeze: bool = ..., - observed: bool = ..., - mutated: bool = ..., - ) -> None: ... - def __len__(self) -> int: ... - @property - def groups(self) -> Dict[str, str]: ... - @property - def ngroups(self): ... - @property - def indices(self) -> Dict[str, Index]: ... - def __getattr__(self, attr: str): ... - def pipe(self, func: Callable, *args, **kwargs): ... - plot = ... - def get_group(self, name, obj: Optional[DataFrame] = ...) -> DataFrame: ... - def __iter__(self) -> Generator[Tuple[str, Any], None, None]: ... - def apply(self, func: Callable, *args, **kwargs) -> FrameOrSeriesUnion: ... - -class GroupBy(_GroupBy): - def count(self) -> FrameOrSeriesUnion: ... - def mean(self, **kwargs) -> FrameOrSeriesUnion: ... - def median(self, **kwargs) -> FrameOrSeriesUnion: ... - def std(self, ddof: int = ...) -> FrameOrSeriesUnion: ... - def var(self, ddof: int = ...) -> FrameOrSeriesUnion: ... - def sem(self, ddof: int = ...) -> FrameOrSeriesUnion: ... - def size(self) -> Series: ... - def ohlc(self) -> DataFrame: ... - def describe(self, **kwargs) -> FrameOrSeriesUnion: ... - def resample(self, rule, *args, **kwargs): ... - def rolling(self, *args, **kwargs): ... - def expanding(self, *args, **kwargs): ... - def pad(self, limit: Optional[int] = ...): ... - def ffill(self, limit: Optional[int] = ...) -> FrameOrSeriesUnion: ... - def backfill(self, limit: Optional[int] = ...) -> FrameOrSeriesUnion: ... - def bfill(self, limit: Optional[int] = ...) -> FrameOrSeriesUnion: ... - def nth( - self, n: Union[int, List[int]], dropna: Optional[str] = ... - ) -> FrameOrSeriesUnion: ... - def quantile(self, q=..., interpolation: str = ...): ... - def ngroup(self, ascending: bool = ...) -> Series: ... - def cumcount(self, ascending: bool = ...) -> Series: ... - def rank( - self, - method: str = ..., - ascending: bool = ..., - na_option: str = ..., - pct: bool = ..., - axis: int = ..., - ) -> DataFrame: ... - def cummax(self, axis: AxisType = ..., **kwargs) -> FrameOrSeriesUnion: ... - def cummin(self, axis: AxisType = ..., **kwargs) -> FrameOrSeriesUnion: ... - def cumprod(self, axis: AxisType = ..., **kwargs) -> FrameOrSeriesUnion: ... - def cumsum(self, axis: AxisType = ..., **kwargs) -> FrameOrSeriesUnion: ... - def shift( - self, periods: int = ..., freq=..., axis: AxisType = ..., fill_value=... - ): ... - def pct_change( - self, - periods: int = ..., - fill_method: str = ..., - limit=..., - freq=..., - axis: AxisType = ..., - ) -> FrameOrSeriesUnion: ... - def head(self, n: int = ...) -> FrameOrSeriesUnion: ... - def tail(self, n: int = ...) -> FrameOrSeriesUnion: ... - # Surplus methodss from original pylance stubs; should they go away? - def first(self, **kwargs) -> FrameOrSeriesUnion: ... - def last(self, **kwargs) -> FrameOrSeriesUnion: ... - def max(self, **kwargs) -> FrameOrSeriesUnion: ... - def min(self, **kwargs) -> FrameOrSeriesUnion: ... - def prod(self, **kwargs) -> FrameOrSeriesUnion: ... - def sum(self, **kwargs) -> FrameOrSeriesUnion: ... - -def get_groupby( - obj: NDFrame, - by: Optional[KeysArgType] = ..., - axis: int = ..., - level=..., - grouper: Optional[ops.BaseGrouper] = ..., - exclusions=..., - selection=..., - as_index: bool = ..., - sort: bool = ..., - group_keys: bool = ..., - squeeze: bool = ..., - observed: bool = ..., - mutated: bool = ..., -) -> GroupBy: ... diff --git a/typings/pandas/core/groupby/ops.pyi b/typings/pandas/core/groupby/ops.pyi deleted file mode 100644 index 8ab91d8..0000000 --- a/typings/pandas/core/groupby/ops.pyi +++ /dev/null @@ -1,101 +0,0 @@ -import numpy as np -from pandas._typing import FrameOrSeries as FrameOrSeries -from pandas.core.groupby import grouper as grouper -from pandas.core.indexes.api import Index as Index -from pandas.core.series import Series as Series -from typing import List, Optional, Sequence, Tuple - -class BaseGrouper: - axis = ... - sort = ... - group_keys = ... - mutated = ... - indexer = ... - def __init__( - self, - axis: Index, - groupings: Sequence[grouper.Grouping], - sort: bool = ..., - group_keys: bool = ..., - mutated: bool = ..., - indexer: Optional[np.ndarray] = ..., - ) -> None: ... - @property - def groupings(self) -> List[grouper.Grouping]: ... - @property - def shape(self): ... - def __iter__(self): ... - @property - def nkeys(self) -> int: ... - def get_iterator(self, data: FrameOrSeries, axis: int = ...): ... - def apply(self, f, data: FrameOrSeries, axis: int = ...): ... - def indices(self): ... - @property - def codes(self) -> List[np.ndarray]: ... - @property - def levels(self) -> List[Index]: ... - @property - def names(self): ... - def size(self) -> Series: ... - def groups(self): ... - def is_monotonic(self) -> bool: ... - def group_info(self): ... - def codes_info(self) -> np.ndarray: ... - def ngroups(self) -> int: ... - @property - def reconstructed_codes(self) -> List[np.ndarray]: ... - def result_index(self) -> Index: ... - def get_group_levels(self): ... - def aggregate( - self, values, how: str, axis: int = ..., min_count: int = ... - ) -> Tuple[np.ndarray, Optional[List[str]]]: ... - def transform(self, values, how: str, axis: int = ..., **kwargs): ... - def agg_series(self, obj: Series, func): ... - -class BinGrouper(BaseGrouper): - bins = ... - binlabels = ... - mutated = ... - indexer = ... - def __init__( - self, - bins, - binlabels, - filter_empty: bool = ..., - mutated: bool = ..., - indexer=..., - ) -> None: ... - def groups(self): ... - @property - def nkeys(self) -> int: ... - def get_iterator(self, data: FrameOrSeries, axis: int = ...): ... - def indices(self): ... - def group_info(self): ... - def reconstructed_codes(self) -> List[np.ndarray]: ... - def result_index(self): ... - @property - def levels(self): ... - @property - def names(self): ... - @property - def groupings(self) -> List[grouper.Grouping]: ... - def agg_series(self, obj: Series, func): ... - -class DataSplitter: - data = ... - labels = ... - ngroups = ... - axis = ... - def __init__( - self, data: FrameOrSeries, labels, ngroups: int, axis: int = ... - ) -> None: ... - def slabels(self): ... - def sort_idx(self): ... - def __iter__(self): ... - -class SeriesSplitter(DataSplitter): ... - -class FrameSplitter(DataSplitter): - def fast_apply(self, f, names): ... - -def get_splitter(data: FrameOrSeries, *args, **kwargs) -> DataSplitter: ... diff --git a/typings/pandas/core/indexes/__init__.pyi b/typings/pandas/core/indexes/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/indexes/accessors.pyi b/typings/pandas/core/indexes/accessors.pyi deleted file mode 100644 index bfc00bb..0000000 --- a/typings/pandas/core/indexes/accessors.pyi +++ /dev/null @@ -1,30 +0,0 @@ -from pandas.core.accessor import PandasDelegate as PandasDelegate -from pandas.core.base import ( - NoNewAttributesMixin as NoNewAttributesMixin, - PandasObject as PandasObject, -) -from pandas.core.series import Series - -class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin): - orig = ... - name = ... - def __init__(self, data: Series, orig) -> None: ... - -class DatetimeProperties(Properties): - def to_pydatetime(self): ... - @property - def freq(self): ... - -class TimedeltaProperties(Properties): - def to_pytimedelta(self): ... - @property - def components(self): ... - @property - def freq(self): ... - -class PeriodProperties(Properties): ... - -class CombinedDatetimelikeProperties( - DatetimeProperties, TimedeltaProperties, PeriodProperties -): - def __new__(cls, data: Series): ... diff --git a/typings/pandas/core/indexes/base.pyi b/typings/pandas/core/indexes/base.pyi deleted file mode 100644 index f49f7b6..0000000 --- a/typings/pandas/core/indexes/base.pyi +++ /dev/null @@ -1,221 +0,0 @@ -import numpy as np -from pandas._typing import ( - Dtype as Dtype, - DtypeArg as DtypeArg, - Label as Label, - Level as Level, - Scalar as Scalar, - T1 as T1, - np_ndarray_str, - np_ndarray_int64, - np_ndarray_bool, -) - -from pandas._typing import ( - Series as Series, - DataFrame as DataFrame, - DtypeObj as DtypeObj, -) - -from pandas.core.arrays import ExtensionArray - -from pandas.core.base import IndexOpsMixin, PandasObject - -from pandas.core.strings import StringMethods - -from typing import ( - Callable, - Dict, - Generic, - Hashable, - Iterable, - Iterator, - List, - Literal, - Optional, - Sequence, - Tuple, - Union, - overload, -) - -class InvalidIndexError(Exception): ... - -_str = str - -class Index(IndexOpsMixin, PandasObject): - def __new__( - cls, - data: Iterable = ..., - dtype=..., - copy: bool = ..., - name=..., - tupleize_cols: bool = ..., - **kwargs - ) -> Index: ... - def __init__( - self, - data: Iterable, - dtype=..., - copy: bool = ..., - name=..., - tupleize_cols: bool = ..., - ): ... - @property - def str(self) -> StringMethods[Index]: ... - @property - def asi8(self) -> np_ndarray_int64: ... - def is_(self, other) -> bool: ... - def __len__(self) -> int: ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __array_wrap__(self, result, context=...): ... - @property - def dtype(self) -> DtypeObj: ... - def ravel(self, order: _str = ...): ... - def view(self, cls=...): ... - @overload - def astype(self, dtype: DtypeArg) -> Index: ... - @overload - def astype(self, dtype: T1) -> Index: ... - def take( - self, indices, axis: int = ..., allow_fill: bool = ..., fill_value=..., **kwargs - ): ... - def repeat(self, repeats, axis=...): ... - def copy(self, name=..., deep: bool = ...) -> Index: ... - def __copy__(self, **kwargs): ... - def __deepcopy__(self, memo=...): ... - def format( - self, name: bool = ..., formatter: Optional[Callable] = ..., na_rep: _str = ... - ) -> List[_str]: ... - def to_native_types(self, slicer=..., **kwargs): ... - def to_flat_index(self): ... - def to_series(self, index=..., name=...): ... - def to_frame(self, index: bool = ..., name=...) -> DataFrame: ... - @property - def name(self): ... - @name.setter - def name(self, value) -> None: ... - @property - def names(self) -> List[_str]: ... - @names.setter - def names(self, names: List[_str]): ... - def set_names(self, names, level=..., inplace: bool = ...): ... - def rename(self, name, inplace: bool = ...): ... - @property - def nlevels(self) -> int: ... - def sortlevel(self, level=..., ascending: bool = ..., sort_remaining=...): ... - def get_level_values(self, level: Union[int, _str]) -> Index: ... - def droplevel(self, level: Union[Level, List[Level]] = ...): ... - @property - def is_monotonic(self) -> bool: ... - @property - def is_monotonic_increasing(self) -> bool: ... - @property - def is_monotonic_decreasing(self) -> bool: ... - def is_unique(self) -> bool: ... - @property - def has_duplicates(self) -> bool: ... - def is_boolean(self) -> bool: ... - def is_integer(self) -> bool: ... - def is_floating(self) -> bool: ... - def is_numeric(self) -> bool: ... - def is_object(self) -> bool: ... - def is_categorical(self) -> bool: ... - def is_interval(self) -> bool: ... - def is_mixed(self) -> bool: ... - def holds_integer(self): ... - def inferred_type(self): ... - def is_all_dates(self) -> bool: ... - def __reduce__(self): ... - def hasnans(self) -> bool: ... - def isna(self): ... - isnull = ... - def notna(self): ... - notnull = ... - def fillna(self, value=..., downcast=...): ... - def dropna(self, how: _str = ...): ... - def unique(self, level=...) -> Index: ... - def drop_duplicates( - self, keep: Literal["first", "last", False] = ... - ) -> IndexOpsMixin: ... - def duplicated(self, keep: _str = ...): ... - def __add__(self, other) -> Index: ... - def __radd__(self, other) -> Index: ... - def __iadd__(self, other) -> Index: ... - def __sub__(self, other) -> Index: ... - def __rsub__(self, other) -> Index: ... - def __and__(self, other) -> Index: ... - def __or__(self, other) -> Index: ... - def __xor__(self, other) -> Index: ... - def __nonzero__(self) -> None: ... - __bool__ = ... - def union(self, other: Union[List[T1], Index], sort=...) -> Index: ... - def intersection( - self, other: Union[List[T1], Index], sort: bool = ... - ) -> Index: ... - def difference(self, other: Union[List[T1], Index]) -> Index: ... - def symmetric_difference( - self, other: Union[List[T1], Index], result_name=..., sort=... - ) -> Index: ... - def get_loc(self, key, method=..., tolerance=...): ... - def get_indexer(self, target, method=..., limit=..., tolerance=...): ... - def reindex(self, target, method=..., level=..., limit=..., tolerance=...): ... - def join( - self, - other, - how: _str = ..., - level=..., - return_indexers: bool = ..., - sort: bool = ..., - ): ... - @property - def values(self) -> np.ndarray: ... - def array(self) -> ExtensionArray: ... - def memory_usage(self, deep: bool = ...): ... - def where(self, cond, other=...): ... - def is_type_compatible(self, kind) -> bool: ... - def __contains__(self, key) -> bool: ... - def __hash__(self) -> int: ... - def __setitem__(self, key, value) -> None: ... - @overload - def __getitem__(self, idx: Union[slice, np_ndarray_int64, Index]) -> Index: ... - @overload - def __getitem__( - self, idx: Union[int, Tuple[np_ndarray_int64, ...]] - ) -> Hashable: ... - def append(self, other): ... - def putmask(self, mask, value): ... - def equals(self, other) -> bool: ... - def identical(self, other) -> bool: ... - def asof(self, label): ... - def asof_locs(self, where, mask): ... - def sort_values(self, return_indexer: bool = ..., ascending: bool = ...): ... - def sort(self, *args, **kwargs) -> None: ... - def shift(self, periods: int = ..., freq=...) -> None: ... - def argsort(self, *args, **kwargs): ... - def get_value(self, series, key): ... - def set_value(self, arr, key, value) -> None: ... - def get_indexer_non_unique(self, target): ... - def get_indexer_for(self, target, **kwargs): ... - def groupby(self, values) -> Dict[Hashable, np.ndarray]: ... - def map(self, mapper, na_action=...) -> Index: ... - def isin(self, values, level=...) -> np_ndarray_bool: ... - def slice_indexer(self, start=..., end=..., step=..., kind=...): ... - def get_slice_bound(self, label, side, kind): ... - def slice_locs(self, start=..., end=..., step=..., kind=...): ... - def delete(self, loc): ... - def insert(self, loc, item): ... - def drop(self, labels, *, errors: _str = ...) -> Index: ... - @property - def shape(self) -> Tuple[int, ...]: ... - # Extra methods from old stubs - def __eq__(self, other: object) -> bool: ... # Series: ... # type: ignore - def __iter__(self) -> Iterator: ... - def __ne__(self, other: _str) -> Index: ... # type: ignore - def to_numpy(self) -> np.ndarray: ... - -def ensure_index_from_sequences( - sequences: Sequence[Sequence[Dtype]], names: Sequence[str] = ... -) -> Index: ... -def ensure_index(index_like: Union[Sequence, Index], copy: bool = ...) -> Index: ... -def maybe_extract_name(name, obj, cls) -> Label: ... diff --git a/typings/pandas/core/indexes/datetimelike.pyi b/typings/pandas/core/indexes/datetimelike.pyi deleted file mode 100644 index 7271535..0000000 --- a/typings/pandas/core/indexes/datetimelike.pyi +++ /dev/null @@ -1,36 +0,0 @@ -from pandas.core.accessor import PandasDelegate as PandasDelegate -from pandas.core.indexes.extension import ExtensionIndex as ExtensionIndex -from pandas.core.indexes.numeric import Int64Index as Int64Index -from pandas.tseries.frequencies import DateOffset as DateOffset -from typing import List, Optional - -class DatetimeIndexOpsMixin(ExtensionIndex): - freq: Optional[DateOffset] - freqstr: Optional[str] - @property - def is_all_dates(self) -> bool: ... - @property - def values(self): ... - def __array_wrap__(self, result, context=...): ... - def equals(self, other) -> bool: ... - def __contains__(self, key): ... - def sort_values(self, return_indexer: bool = ..., ascending: bool = ...): ... - def take( - self, indices, axis: int = ..., allow_fill: bool = ..., fill_value=..., **kwargs - ): ... - def tolist(self) -> List: ... - def min(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def argmin(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def max(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def argmax(self, axis=..., skipna: bool = ..., *args, **kwargs): ... - def isin(self, values, level=...): ... - def where(self, cond, other=...): ... - def shift(self, periods: int = ..., freq=...): ... - def delete(self, loc): ... - -class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index): - def difference(self, other, sort=...): ... - def intersection(self, other, sort: bool = ...): ... - def join(self, other, how: str = ..., level=..., return_indexers=..., sort=...): ... - -class DatetimelikeDelegateMixin(PandasDelegate): ... diff --git a/typings/pandas/core/indexes/datetimes.pyi b/typings/pandas/core/indexes/datetimes.pyi deleted file mode 100644 index 7822ad8..0000000 --- a/typings/pandas/core/indexes/datetimes.pyi +++ /dev/null @@ -1,76 +0,0 @@ -import numpy as np -from datetime import tzinfo as tzinfo -from pandas.core.indexes.datetimelike import ( - DatetimeTimedeltaMixin as DatetimeTimedeltaMixin, - DatetimelikeDelegateMixin as DatetimelikeDelegateMixin, -) -from pandas.core.indexes.timedeltas import TimedeltaIndex as TimedeltaIndex -from pandas.core.series import Series as Series -from pandas._typing import Timestamp as Timestamp, Timedelta as Timedelta -from typing import Optional, Union, overload - -class DatetimeDelegateMixin(DatetimelikeDelegateMixin): ... - -class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin): - tz: Optional[tzinfo] - def __init__( - self, - data=..., - freq=..., - tz=..., - normalize: bool = ..., - closed=..., - ambiguous: str = ..., - dayfirst: bool = ..., - yearfirst: bool = ..., - dtype=..., - copy: bool = ..., - name=..., - ): ... - def __array__(self, dtype=...) -> np.ndarray: ... - def __reduce__(self): ... - @overload - def __add__(self, other: Series[Timedelta]) -> Series[Timestamp]: ... - @overload - def __add__(self, other: Union[Timedelta, TimedeltaIndex]) -> DatetimeIndex: ... - def union_many(self, others): ... - def to_series(self, keep_tz=..., index=..., name=...): ... - def snap(self, freq: str = ...): ... - def get_value(self, series, key): ... - def get_value_maybe_box(self, series, key): ... - def get_loc(self, key, method=..., tolerance=...): ... - def slice_indexer(self, start=..., end=..., step=..., kind=...): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def is_type_compatible(self, typ) -> bool: ... - @property - def inferred_type(self) -> str: ... - def insert(self, loc, item): ... - def indexer_at_time(self, time, asof: bool = ...): ... - def indexer_between_time( - self, start_time, end_time, include_start: bool = ..., include_end: bool = ... - ): ... - def strftime(self, date_format: str = ...) -> np.ndarray: ... - -def date_range( - start=..., - end=..., - periods=..., - freq=..., - tz=..., - normalize=..., - name=..., - closed=..., - **kwargs -) -> DatetimeIndex: ... -def bdate_range( - start=..., - end=..., - periods=..., - freq: str = ..., - tz=..., - normalize: bool = ..., - name=..., - weekmask=..., - holidays=..., - closed=..., -) -> DatetimeIndex: ... diff --git a/typings/pandas/core/indexes/extension.pyi b/typings/pandas/core/indexes/extension.pyi deleted file mode 100644 index 17c68a4..0000000 --- a/typings/pandas/core/indexes/extension.pyi +++ /dev/null @@ -1,18 +0,0 @@ -from pandas.core.indexes.base import Index as Index -from typing import List - -def inherit_from_data(name: str, delegate, cache: bool = ..., wrap: bool = ...): ... -def inherit_names(names: List[str], delegate, cache: bool = ..., wrap: bool = ...): ... -def make_wrapped_arith_op(opname): ... - -class ExtensionIndex(Index): - def __getitem__(self, key): ... - def __iter__(self): ... - def dropna(self, how: str = ...): ... - def repeat(self, repeats, axis=...): ... - def take( - self, indices, axis: int = ..., allow_fill: bool = ..., fill_value=..., **kwargs - ): ... - def unique(self, level=...): ... - def map(self, mapper, na_action=...): ... - def astype(self, dtype, copy: bool = ...): ... diff --git a/typings/pandas/core/indexes/frozen.pyi b/typings/pandas/core/indexes/frozen.pyi deleted file mode 100644 index 93d9d51..0000000 --- a/typings/pandas/core/indexes/frozen.pyi +++ /dev/null @@ -1,25 +0,0 @@ -from pandas.core.base import PandasObject as PandasObject - -class FrozenList(PandasObject, list): - def union(self, other) -> FrozenList: ... - def difference(self, other) -> FrozenList: ... - __add__ = ... - __iadd__ = ... - def __getitem__(self, n): ... - def __radd__(self, other): ... - def __eq__(self, other) -> bool: ... - __req__ = ... - def __mul__(self, other): ... - __imul__ = ... - def __reduce__(self): ... - def __hash__(self): ... - __setitem__ = ... - __setslice__ = ... - __delitem__ = ... - __delslice__ = ... - pop = ... - append = ... - extend = ... - remove = ... - sort = ... - insert = ... diff --git a/typings/pandas/core/indexes/multi.pyi b/typings/pandas/core/indexes/multi.pyi deleted file mode 100644 index f7c604c..0000000 --- a/typings/pandas/core/indexes/multi.pyi +++ /dev/null @@ -1,127 +0,0 @@ -import numpy as np -from pandas.core.indexes.base import Index as Index -from typing import Callable, Hashable, List, Optional, Sequence, Union -from pandas._typing import np_ndarray_bool, DtypeArg as DtypeArg, T1 as T1 - -class MultiIndex(Index): - def __new__( - cls, - levels=..., - codes=..., - sortorder=..., - names=..., - dtype=..., - copy=..., - name=..., - verify_integrity: bool = ..., - _set_identity: bool = ..., - ) -> MultiIndex: ... - def __init__( - self, - levels=..., - codes=..., - sortorder=..., - names=..., - dtype=..., - copy=..., - name=..., - verify_integrity: bool = ..., - _set_identity: bool = ..., - ) -> None: ... - @classmethod - def from_arrays(cls, arrays, sortorder=..., names=...) -> MultiIndex: ... - @classmethod - def from_tuples(cls, tuples, sortorder=..., names=...) -> MultiIndex: ... - @classmethod - def from_product(cls, iterables, sortorder=..., names=...) -> MultiIndex: ... - @classmethod - def from_frame(cls, df, sortorder=..., names=...) -> MultiIndex: ... - @property - def shape(self): ... - @property # Should be read-only - def levels(self) -> List[Index]: ... - def set_levels( - self, levels, level=..., inplace: bool = ..., verify_integrity: bool = ... - ): ... - @property - def codes(self): ... - def set_codes( - self, codes, level=..., inplace: bool = ..., verify_integrity: bool = ... - ): ... - def copy(self, names=..., deep: bool = ...) -> MultiIndex: ... - def __array__(self, dtype=...) -> np.ndarray: ... - def view(self, cls=...): ... - def __contains__(self, key) -> bool: ... - def dtype(self) -> np.dtype: ... - def memory_usage(self, deep: bool = ...) -> int: ... - def nbytes(self) -> int: ... - def format( - self, - name: Optional[bool] = ..., - formatter: Optional[Callable] = ..., - na_rep: Optional[str] = ..., - names: bool = ..., - space: int = ..., - sparsify: Optional[bool] = ..., - adjoin: bool = ..., - ) -> List: ... - def __len__(self) -> int: ... - def inferred_type(self) -> str: ... - @property - def values(self): ... - def is_monotonic_increasing(self) -> bool: ... - def is_monotonic_decreasing(self) -> bool: ... - def duplicated(self, keep: str = ...): ... - def fillna(self, value=..., downcast=...) -> None: ... - def dropna(self, how: str = ...): ... - def get_value(self, series, key): ... - def get_level_values(self, level: Union[str, int]) -> Index: ... - def unique(self, level=...): ... - def to_frame(self, index: bool = ..., name=...): ... - def to_flat_index(self): ... - @property - def is_all_dates(self) -> bool: ... - def is_lexsorted(self) -> bool: ... - def lexsort_depth(self): ... - def remove_unused_levels(self): ... - @property - def nlevels(self) -> int: ... - @property - def levshape(self): ... - def __reduce__(self): ... - def __getitem__(self, key): ... - def take( - self, indices, axis: int = ..., allow_fill: bool = ..., fill_value=..., **kwargs - ): ... - def append(self, other): ... - def argsort(self, *args, **kwargs): ... - def repeat(self, repeats, axis=...): ... - def where(self, cond, other=...) -> None: ... - def drop(self, codes, *, level=..., errors: str = ...) -> MultiIndex: ... - def swaplevel(self, i: int = ..., j: int = ...): ... - def reorder_levels(self, order): ... - def sortlevel( - self, level: int = ..., ascending: bool = ..., sort_remaining: bool = ... - ): ... - def get_indexer(self, target, method=..., limit=..., tolerance=...): ... - def get_indexer_non_unique(self, target): ... - def reindex(self, target, method=..., level=..., limit=..., tolerance=...): ... - def get_slice_bound( - self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str - ) -> int: ... - def slice_locs(self, start=..., end=..., step=..., kind=...): ... - def get_loc(self, key, method=...): ... - def get_loc_level(self, key, level=..., drop_level: bool = ...): ... - def get_locs(self, seq): ... - def truncate(self, before=..., after=...): ... - def equals(self, other) -> bool: ... - def equal_levels(self, other): ... - def union(self, other, sort=...): ... - def intersection(self, other, sort: bool = ...): ... - def difference(self, other, sort=...): ... - def astype(self, dtype: Union[DtypeArg, T1], copy: bool = ...) -> MultiIndex: ... - def insert(self, loc, item): ... - def delete(self, loc): ... - def isin(self, values, level=...) -> np_ndarray_bool: ... - -def maybe_droplevels(index, key): ... diff --git a/typings/pandas/core/indexes/numeric.pyi b/typings/pandas/core/indexes/numeric.pyi deleted file mode 100644 index d777c08..0000000 --- a/typings/pandas/core/indexes/numeric.pyi +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np -from pandas.core.indexes.base import Index as Index -from typing import Iterable, TypeVar -from pandas._typing import T1 as T1, np_ndarray_int64 - -class NumericIndex(Index): - def __init__(self, data: Iterable = ..., dtype=..., copy: bool = ..., name=...): ... - @property - def is_all_dates(self) -> bool: ... - def insert(self, loc, item): ... - -class IntegerIndex(NumericIndex): - def __contains__(self, key) -> bool: ... - -class Int64Index(IntegerIndex): - @property - def inferredT1ype(self) -> str: ... - @property - def asi8(self) -> np_ndarray_int64: ... - -class UInt64Index(IntegerIndex): - @property - def inferredT1ype(self) -> str: ... - @property - def asi8(self) -> np_ndarray_int64: ... - -class Float64Index(NumericIndex): - @property - def inferredT1ype(self) -> str: ... - def astype(self, dtype, copy: bool = ...): ... - def get_value(self, series, key): ... - def equals(self, other) -> bool: ... - def __contains__(self, other) -> bool: ... - def get_loc(self, key, method=..., tolerance=...): ... - def is_unique(self) -> bool: ... - def isin(self, values, level=...): ... diff --git a/typings/pandas/core/indexes/timedeltas.pyi b/typings/pandas/core/indexes/timedeltas.pyi deleted file mode 100644 index 76dd8d8..0000000 --- a/typings/pandas/core/indexes/timedeltas.pyi +++ /dev/null @@ -1,44 +0,0 @@ -from typing import Union, overload -from pandas._libs import Timedelta as Timedelta -from pandas.core.arrays import datetimelike as dtl -from pandas.core.indexes.datetimelike import ( - DatetimeIndexOpsMixin as DatetimeIndexOpsMixin, - DatetimeTimedeltaMixin as DatetimeTimedeltaMixin, - DatetimelikeDelegateMixin as DatetimelikeDelegateMixin, -) -from pandas.core.indexes.datetimes import DatetimeIndex as DatetimeIndex -from pandas._typing import num - -class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): ... - -class TimedeltaIndex(DatetimeTimedeltaMixin, dtl.TimelikeOps, TimedeltaDelegateMixin): - def __new__( - cls, - data=..., - unit=..., - freq=..., - closed=..., - dtype=..., - copy: bool = ..., - name=..., - ): ... - @overload - def __add__(self, other: DatetimeIndex) -> DatetimeIndex: ... - @overload - def __add__(self, other: Union[Timedelta, TimedeltaIndex]) -> TimedeltaIndex: ... - def __sub__(self, other: Union[Timedelta, TimedeltaIndex]) -> TimedeltaIndex: ... - def __mul__(self, other: num) -> TimedeltaIndex: ... - def __truediv__(self, other: num) -> TimedeltaIndex: ... - def astype(self, dtype, copy: bool = ...): ... - def get_value(self, series, key): ... - def get_value_maybe_box(self, series, key: Timedelta): ... - def get_loc(self, key, method=..., tolerance=...): ... - def searchsorted(self, value, side: str = ..., sorter=...): ... - def is_type_compatible(self, typ) -> bool: ... - @property - def inferred_type(self) -> str: ... - def insert(self, loc, item): ... - -def timedelta_range( - start=..., end=..., periods=..., freq=..., name=..., closed=... -) -> TimedeltaIndex: ... diff --git a/typings/pandas/core/indexing.pyi b/typings/pandas/core/indexing.pyi deleted file mode 100644 index c5762e3..0000000 --- a/typings/pandas/core/indexing.pyi +++ /dev/null @@ -1,50 +0,0 @@ -import numpy as np -from pandas._libs.indexing import _NDFrameIndexerBase -from pandas.core.indexes.api import Index as Index -from pandas._typing import StrLike, Scalar -from typing import Tuple, Union - -class _IndexSlice: - def __getitem__(self, arg) -> Tuple[Union[StrLike, Scalar, slice], ...]: ... - -IndexSlice: _IndexSlice - -class IndexingError(Exception): ... - -class IndexingMixin: - @property - def iloc(self) -> _iLocIndexer: ... - @property - def loc(self) -> _LocIndexer: ... - @property - def at(self) -> _AtIndexer: ... - @property - def iat(self) -> _iAtIndexer: ... - -class _NDFrameIndexer(_NDFrameIndexerBase): - axis = ... - def __call__(self, axis=...): ... - def __getitem__(self, key): ... - def __setitem__(self, key, value) -> None: ... - -class _LocationIndexer(_NDFrameIndexer): - def __getitem__(self, key): ... - -class _LocIndexer(_LocationIndexer): ... -class _iLocIndexer(_LocationIndexer): ... - -class _ScalarAccessIndexer(_NDFrameIndexerBase): - def __getitem__(self, key): ... - def __setitem__(self, key, value) -> None: ... - -class _AtIndexer(_ScalarAccessIndexer): ... -class _iAtIndexer(_ScalarAccessIndexer): ... - -def convert_to_index_sliceable(obj, key): ... -def check_bool_indexer(index: Index, key) -> np.ndarray: ... -def convert_missing_indexer(indexer): ... -def convert_from_missing_indexer_tuple(indexer, axes): ... -def maybe_convert_ix(*args): ... -def is_nested_tuple(tup, labels) -> bool: ... -def is_label_like(key) -> bool: ... -def need_slice(obj) -> bool: ... diff --git a/typings/pandas/core/internals/__init__.pyi b/typings/pandas/core/internals/__init__.pyi deleted file mode 100644 index 296698c..0000000 --- a/typings/pandas/core/internals/__init__.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from .blocks import ( - Block as Block, - BoolBlock as BoolBlock, - CategoricalBlock as CategoricalBlock, - DatetimeBlock as DatetimeBlock, - DatetimeTZBlock as DatetimeTZBlock, - ExtensionBlock as ExtensionBlock, - ObjectBlock as ObjectBlock, - make_block as make_block, -) -from .managers import ( - BlockManager as BlockManager, - SingleBlockManager as SingleBlockManager, - concatenate_block_managers as concatenate_block_managers, - create_block_manager_from_arrays as create_block_manager_from_arrays, - create_block_manager_from_blocks as create_block_manager_from_blocks, -) diff --git a/typings/pandas/core/internals/blocks.pyi b/typings/pandas/core/internals/blocks.pyi deleted file mode 100644 index 17aaa77..0000000 --- a/typings/pandas/core/internals/blocks.pyi +++ /dev/null @@ -1,261 +0,0 @@ -from pandas.core.arrays import ExtensionArray as ExtensionArray -from pandas.core.base import PandasObject as PandasObject -from typing import List - -class Block(PandasObject): - is_numeric: bool = ... - is_float: bool = ... - is_integer: bool = ... - is_complex: bool = ... - is_datetime: bool = ... - is_datetimetz: bool = ... - is_timedelta: bool = ... - is_bool: bool = ... - is_object: bool = ... - is_categorical: bool = ... - is_extension: bool = ... - ndim = ... - values = ... - def __init__(self, values, placement, ndim=...) -> None: ... - @property - def is_view(self): ... - @property - def is_datelike(self): ... - def is_categorical_astype(self, dtype): ... - def external_values(self, dtype=...): ... - def internal_values(self, dtype=...): ... - def array_values(self) -> ExtensionArray: ... - def get_values(self, dtype=...): ... - def get_block_values(self, dtype=...): ... - def to_dense(self): ... - @property - def fill_value(self): ... - @property - def mgr_locs(self): ... - @mgr_locs.setter - def mgr_locs(self, new_mgr_locs) -> None: ... - @property - def array_dtype(self): ... - def make_block(self, values, placement=...) -> Block: ... - def make_block_same_class(self, values, placement=..., ndim=...): ... - def __len__(self) -> int: ... - def getitem_block(self, slicer, new_mgr_locs=...): ... - @property - def shape(self): ... - @property - def dtype(self): ... - @property - def ftype(self): ... - def merge(self, other): ... - def concat_same_type(self, to_concat, placement=...): ... - def iget(self, i): ... - def set(self, locs, values) -> None: ... - def delete(self, loc) -> None: ... - def apply(self, func, **kwargs): ... - def fillna(self, value, limit=..., inplace: bool = ..., downcast=...): ... - def split_and_operate(self, mask, f, inplace: bool): ... - def downcast(self, dtypes=...): ... - def astype(self, dtype, copy: bool = ..., errors: str = ...): ... - def convert( - self, - copy: bool = ..., - datetime: bool = ..., - numeric: bool = ..., - timedelta: bool = ..., - coerce: bool = ..., - ): ... - def to_native_types(self, slicer=..., na_rep: str = ..., quoting=..., **kwargs): ... - def copy(self, deep: bool = ...): ... - def replace( - self, - to_replace, - value, - inplace: bool = ..., - filter=..., - regex: bool = ..., - convert: bool = ..., - ): ... - def setitem(self, indexer, value): ... - def putmask( - self, - mask, - new, - align: bool = ..., - inplace: bool = ..., - axis: int = ..., - transpose: bool = ..., - ): ... - def coerce_to_target_dtype(self, other): ... - def interpolate( - self, - *, - method: str = ..., - axis: int = ..., - index=..., - inplace: bool = ..., - limit=..., - limit_direction: str = ..., - limit_area=..., - fill_value=..., - downcast=..., - **kwargs, - ): ... - def take_nd(self, indexer, axis, new_mgr_locs=..., fill_tuple=...): ... - def diff(self, n: int, axis: int = ...) -> List[Block]: ... - def shift(self, periods, axis: int = ..., fill_value=...): ... - def where( - self, other, cond, align=..., errors=..., try_cast: bool = ..., axis: int = ... - ) -> List[Block]: ... - def equals(self, other) -> bool: ... - def quantile(self, qs, interpolation: str = ..., axis: int = ...): ... - -class NonConsolidatableMixIn: - def __init__(self, values, placement, ndim=...) -> None: ... - @property - def shape(self): ... - def iget(self, col): ... - def should_store(self, value): ... - values = ... - def set(self, locs, values, check: bool = ...) -> None: ... - def putmask( - self, - mask, - new, - align: bool = ..., - inplace: bool = ..., - axis: int = ..., - transpose: bool = ..., - ): ... - -class ExtensionBlock(NonConsolidatableMixIn, Block): - is_extension: bool = ... - def __init__(self, values, placement, ndim=...) -> None: ... - @property - def fill_value(self): ... - @property - def is_view(self): ... - @property - def is_numeric(self): ... - def setitem(self, indexer, value): ... - def get_values(self, dtype=...): ... - def array_values(self) -> ExtensionArray: ... - def to_dense(self): ... - def to_native_types(self, slicer=..., na_rep: str = ..., quoting=..., **kwargs): ... - def take_nd(self, indexer, axis: int = ..., new_mgr_locs=..., fill_tuple=...): ... - def concat_same_type(self, to_concat, placement=...): ... - def fillna(self, value, limit=..., inplace: bool = ..., downcast=...): ... - def interpolate( - self, - *, - method: str = ..., - axis: int = ..., - inplace: bool = ..., - limit=..., - fill_value=..., - **kwargs, - ): ... - def diff(self, n: int, axis: int = ...) -> List[Block]: ... - def shift( - self, periods: int, axis: int = ..., fill_value=... - ) -> List[ExtensionBlock]: ... - def where( - self, other, cond, align=..., errors=..., try_cast: bool = ..., axis: int = ... - ) -> List[Block]: ... - -class ObjectValuesExtensionBlock(ExtensionBlock): - def external_values(self, dtype=...): ... - -class NumericBlock(Block): - is_numeric: bool = ... - -class DatetimeLikeBlockMixin: - @property - def fill_value(self): ... - def get_values(self, dtype=...): ... - def iget(self, key): ... - def shift(self, periods, axis: int = ..., fill_value=...): ... - -class DatetimeBlock(DatetimeLikeBlockMixin, Block): - is_datetime: bool = ... - def __init__(self, values, placement, ndim=...) -> None: ... - def astype(self, dtype, copy: bool = ..., errors: str = ...): ... - def to_native_types( - self, slicer=..., na_rep=..., date_format=..., quoting=..., **kwargs - ): ... - def should_store(self, value): ... - def set(self, locs, values) -> None: ... - def external_values(self): ... - def array_values(self) -> ExtensionArray: ... - -class DatetimeTZBlock(DatetimeBlock): - is_datetimetz: bool = ... - is_extension: bool = ... - fill_value = ... - @property - def is_view(self): ... - def get_values(self, dtype=...): ... - def to_dense(self): ... - def diff(self, n: int, axis: int = ...) -> List[Block]: ... - def concat_same_type(self, to_concat, placement=...): ... - def fillna(self, value, limit=..., inplace: bool = ..., downcast=...): ... - def setitem(self, indexer, value): ... - def equals(self, other) -> bool: ... - def quantile(self, qs, interpolation: str = ..., axis: int = ...): ... - -class BoolBlock(NumericBlock): - is_bool: bool = ... - def should_store(self, value): ... - def replace( - self, - to_replace, - value, - inplace: bool = ..., - filter=..., - regex: bool = ..., - convert: bool = ..., - ): ... - -class ObjectBlock(Block): - is_object: bool = ... - def __init__(self, values, placement=..., ndim: int = ...) -> None: ... - @property - def is_bool(self): ... - def convert( - self, - copy: bool = ..., - datetime: bool = ..., - numeric: bool = ..., - timedelta: bool = ..., - coerce: bool = ..., - ): ... - def should_store(self, value): ... - def replace( - self, - to_replace, - value, - inplace: bool = ..., - filter=..., - regex: bool = ..., - convert: bool = ..., - ): ... - -class CategoricalBlock(ExtensionBlock): - is_categorical: bool = ... - def __init__(self, values, placement, ndim=...) -> None: ... - @property - def array_dtype(self): ... - def to_dense(self): ... - def to_native_types(self, slicer=..., na_rep: str = ..., quoting=..., **kwargs): ... - def concat_same_type(self, to_concat, placement=...): ... - def replace( - self, - to_replace, - value, - inplace: bool = ..., - filter=..., - regex: bool = ..., - convert: bool = ..., - ): ... - -def get_block_type(values, dtype=...): ... -def make_block(values, placement, klass=..., ndim=..., dtype=...): ... diff --git a/typings/pandas/core/internals/concat.pyi b/typings/pandas/core/internals/concat.pyi deleted file mode 100644 index ccdc47d..0000000 --- a/typings/pandas/core/internals/concat.pyi +++ /dev/null @@ -1,15 +0,0 @@ -def get_mgr_concatenation_plan(mgr, indexers): ... - -class JoinUnit: - block = ... - indexers = ... - shape = ... - def __init__(self, block, shape, indexers=...) -> None: ... - def needs_filling(self): ... - def dtype(self): ... - def is_na(self): ... - def get_reindexed_values(self, empty_dtype, upcasted_na): ... - -def concatenate_join_units(join_units, concat_axis, copy): ... -def is_uniform_join_units(join_units): ... -def combine_concat_plans(plans, concat_axis): ... diff --git a/typings/pandas/core/internals/construction.pyi b/typings/pandas/core/internals/construction.pyi deleted file mode 100644 index 2bb94f8..0000000 --- a/typings/pandas/core/internals/construction.pyi +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np - -def arrays_to_mgr(arrays, arr_names, index, columns, dtype=...): ... -def masked_rec_array_to_mgr(data, index, columns, dtype, copy): ... -def init_ndarray(values, index, columns, dtype=..., copy: bool = ...): ... -def init_dict(data, index, columns, dtype=...): ... -def prep_ndarray(values, copy=...) -> np.ndarray: ... -def extract_index(data): ... -def reorder_arrays(arrays, arr_columns, columns): ... -def get_names_from_index(data): ... -def to_arrays(data, columns, coerce_float: bool = ..., dtype=...): ... -def sanitize_index(data, index, copy: bool = ...): ... diff --git a/typings/pandas/core/internals/managers.pyi b/typings/pandas/core/internals/managers.pyi deleted file mode 100644 index 78f79ee..0000000 --- a/typings/pandas/core/internals/managers.pyi +++ /dev/null @@ -1,133 +0,0 @@ -from pandas.core.base import PandasObject as PandasObject -from pandas.core.indexes.api import Index as Index -from pandas.core.internals.blocks import Block as Block -from typing import List, Sequence, Union - -class BlockManager(PandasObject): - axes = ... - blocks = ... - def __init__( - self, - blocks: Sequence[Block], - axes: Sequence[Index], - do_integrity_check: bool = ..., - ) -> None: ... - def make_empty(self, axes=...): ... - def __nonzero__(self): ... - __bool__ = ... - @property - def shape(self): ... - @property - def ndim(self) -> int: ... - def set_axis(self, axis, new_labels) -> None: ... - def rename_axis(self, mapper, axis, copy: bool = ..., level=...): ... - @property - def items(self): ... - def get_dtype_counts(self): ... - def get_dtypes(self): ... - def __len__(self) -> int: ... - def reduce(self, func, *args, **kwargs): ... - def apply(self, f, filter=..., **kwargs): ... - def quantile( - self, - axis: int = ..., - consolidate: bool = ..., - transposed: bool = ..., - interpolation: str = ..., - qs=..., - numeric_only=..., - ): ... - def isna(self, func): ... - def where(self, **kwargs): ... - def setitem(self, **kwargs): ... - def putmask(self, **kwargs): ... - def diff(self, **kwargs): ... - def interpolate(self, **kwargs): ... - def shift(self, **kwargs): ... - def fillna(self, **kwargs): ... - def downcast(self, **kwargs): ... - def astype(self, dtype, copy: bool = ..., errors: str = ...): ... - def convert(self, **kwargs): ... - def replace(self, value, **kwargs): ... - def replace_list( - self, src_list, dest_list, inplace: bool = ..., regex: bool = ... - ): ... - def is_consolidated(self): ... - @property - def is_mixed_type(self): ... - @property - def is_numeric_mixed_type(self): ... - @property - def is_datelike_mixed_type(self): ... - @property - def any_extension_types(self): ... - @property - def is_view(self): ... - def get_bool_data(self, copy: bool = ...): ... - def get_numeric_data(self, copy: bool = ...): ... - def combine(self, blocks, copy: bool = ...): ... - def get_slice(self, slobj: slice, axis: int = ...): ... - def __contains__(self, item) -> bool: ... - @property - def nblocks(self) -> int: ... - def copy(self, deep: bool = ...): ... - def as_array(self, transpose: bool = ..., items=...): ... - def to_dict(self, copy: bool = ...): ... - def fast_xs(self, loc): ... - def consolidate(self): ... - def get(self, item): ... - def iget(self, i): ... - def delete(self, item) -> None: ... - def set(self, item, value): ... - def insert(self, loc: int, item, value, allow_duplicates: bool = ...): ... - def reindex_axis( - self, new_index, axis, method=..., limit=..., fill_value=..., copy: bool = ... - ): ... - def reindex_indexer( - self, - new_axis, - indexer, - axis, - fill_value=..., - allow_dups: bool = ..., - copy: bool = ..., - ): ... - def take( - self, indexer, axis: int = ..., verify: bool = ..., convert: bool = ... - ): ... - def equals(self, other): ... - def unstack(self, unstacker_func, fill_value): ... - -class SingleBlockManager(BlockManager): - ndim: int = ... - axes = ... - blocks = ... - def __init__( - self, - block: Block, - axis: Union[Index, List[Index]], - do_integrity_check: bool = ..., - fastpath: bool = ..., - ) -> None: ... - def get_slice(self, slobj, axis: int = ...): ... - @property - def index(self): ... - @property - def dtype(self): ... - @property - def array_dtype(self): ... - def get_dtype_counts(self): ... - def get_dtypes(self): ... - def external_values(self): ... - def internal_values(self): ... - def get_values(self): ... - def is_consolidated(self): ... - def delete(self, item) -> None: ... - def fast_xs(self, loc): ... - def concat(self, to_concat, new_axis): ... - -def create_block_manager_from_blocks(blocks, axes): ... -def create_block_manager_from_arrays(arrays, names, axes): ... -def construction_error(tot_items, block_shape, axes, e=...) -> None: ... -def form_blocks(arrays, names, axes): ... -def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): ... diff --git a/typings/pandas/core/ops/__init__.pyi b/typings/pandas/core/ops/__init__.pyi deleted file mode 100644 index 23e14ed..0000000 --- a/typings/pandas/core/ops/__init__.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Any, Optional, Set, Tuple - -ARITHMETIC_BINOPS: Set[str] = ... -COMPARISON_BINOPS: Set[str] = ... - -def get_op_result_name(left: Any, right: Any): ... -def maybe_upcast_for_op(obj: Any, shape: Tuple[int, ...]) -> Any: ... -def fill_binop(left: Any, right: Any, fill_value: Any): ... -def dispatch_to_series( - left: Any, - right: Any, - func: Any, - str_rep: Optional[Any] = ..., - axis: Optional[Any] = ..., -): ... diff --git a/typings/pandas/core/ops/array_ops.pyi b/typings/pandas/core/ops/array_ops.pyi deleted file mode 100644 index fa38241..0000000 --- a/typings/pandas/core/ops/array_ops.pyi +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -from pandas.core.dtypes.generic import ABCExtensionArray as ABCExtensionArray -from typing import Optional, Union - -def comp_method_OBJECT_ARRAY(op, x, y): ... -def masked_arith_op(x, y, op): ... -def define_na_arithmetic_op(op, str_rep: str): ... -def na_arithmetic_op(left, right, op, str_rep: str): ... -def arithmetic_op( - left: Union[np.ndarray, ABCExtensionArray], right, op, str_rep: str -): ... -def comparison_op( - left: Union[np.ndarray, ABCExtensionArray], right, op -) -> Union[np.ndarray, ABCExtensionArray]: ... -def na_logical_op(x: np.ndarray, y, op): ... -def logical_op( - left: Union[np.ndarray, ABCExtensionArray], right, op -) -> Union[np.ndarray, ABCExtensionArray]: ... -def get_array_op(op, str_rep: Optional[str] = ...): ... diff --git a/typings/pandas/core/ops/dispatch.pyi b/typings/pandas/core/ops/dispatch.pyi deleted file mode 100644 index 66b066c..0000000 --- a/typings/pandas/core/ops/dispatch.pyi +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from pandas.core.dtypes.generic import ( - ABCExtensionArray as ABCExtensionArray, - ABCSeries as ABCSeries, -) -from typing import Union - -def should_extension_dispatch(left: ABCSeries, right) -> bool: ... -def should_series_dispatch(left, right, op): ... -def dispatch_to_extension_op(op, left: Union[ABCExtensionArray, np.ndarray], right): ... diff --git a/typings/pandas/core/ops/docstrings.pyi b/typings/pandas/core/ops/docstrings.pyi deleted file mode 100644 index af4811e..0000000 --- a/typings/pandas/core/ops/docstrings.pyi +++ /dev/null @@ -1 +0,0 @@ -reverse_op = ... diff --git a/typings/pandas/core/ops/invalid.pyi b/typings/pandas/core/ops/invalid.pyi deleted file mode 100644 index 9c40d55..0000000 --- a/typings/pandas/core/ops/invalid.pyi +++ /dev/null @@ -1,2 +0,0 @@ -def invalid_comparison(left, right, op): ... -def make_invalid_op(name: str): ... diff --git a/typings/pandas/core/ops/mask_ops.pyi b/typings/pandas/core/ops/mask_ops.pyi deleted file mode 100644 index 2c86116..0000000 --- a/typings/pandas/core/ops/mask_ops.pyi +++ /dev/null @@ -1,23 +0,0 @@ -import numpy as np -from pandas._libs import lib as lib, missing as libmissing -from typing import Optional, Union - -def kleene_or( - left: Union[bool, np.ndarray], - right: Union[bool, np.ndarray], - left_mask: Optional[np.ndarray], - right_mask: Optional[np.ndarray], -): ... -def kleene_xor( - left: Union[bool, np.ndarray], - right: Union[bool, np.ndarray], - left_mask: Optional[np.ndarray], - right_mask: Optional[np.ndarray], -): ... -def kleene_and( - left: Union[bool, libmissing.NAType, np.ndarray], - right: Union[bool, libmissing.NAType, np.ndarray], - left_mask: Optional[np.ndarray], - right_mask: Optional[np.ndarray], -): ... -def raise_for_nan(value, method) -> None: ... diff --git a/typings/pandas/core/ops/methods.pyi b/typings/pandas/core/ops/methods.pyi deleted file mode 100644 index 2ffac10..0000000 --- a/typings/pandas/core/ops/methods.pyi +++ /dev/null @@ -1,2 +0,0 @@ -def add_special_arithmetic_methods(cls): ... -def add_flex_arithmetic_methods(cls) -> None: ... diff --git a/typings/pandas/core/ops/missing.pyi b/typings/pandas/core/ops/missing.pyi deleted file mode 100644 index 9810e74..0000000 --- a/typings/pandas/core/ops/missing.pyi +++ /dev/null @@ -1,3 +0,0 @@ -def fill_zeros(result, x, y): ... -def mask_zero_div_zero(x, y, result): ... -def dispatch_fill_zeros(op, left, right, result): ... diff --git a/typings/pandas/core/ops/roperator.pyi b/typings/pandas/core/ops/roperator.pyi deleted file mode 100644 index ec88904..0000000 --- a/typings/pandas/core/ops/roperator.pyi +++ /dev/null @@ -1,12 +0,0 @@ -def radd(left, right): ... -def rsub(left, right): ... -def rmul(left, right): ... -def rdiv(left, right): ... -def rtruediv(left, right): ... -def rfloordiv(left, right): ... -def rmod(left, right): ... -def rdivmod(left, right): ... -def rpow(left, right): ... -def rand_(left, right): ... -def ror_(left, right): ... -def rxor(left, right): ... diff --git a/typings/pandas/core/resample.pyi b/typings/pandas/core/resample.pyi deleted file mode 100644 index 4ddeea7..0000000 --- a/typings/pandas/core/resample.pyi +++ /dev/null @@ -1,74 +0,0 @@ -from pandas.core.base import ShallowMixin as ShallowMixin -from pandas.core.groupby.base import GroupByMixin as GroupByMixin -from pandas.core.groupby.groupby import _GroupBy -from pandas.core.groupby.grouper import Grouper as Grouper -from pandas._typing import FrameOrSeriesUnion - -class Resampler(_GroupBy, ShallowMixin): - def __init__( - self, obj, groupby=..., axis: int = ..., kind=..., **kwargs - ) -> None: ... - def __getattr__(self, attr: str): ... - def __iter__(self): ... - @property - def obj(self): ... - @property - def ax(self): ... - def pipe(self, func, *args, **kwargs): ... - def aggregate(self, func, *args, **kwargs): ... - agg = aggregate - def transform(self, arg, *args, **kwargs): ... - def pad(self, limit=...): ... - def nearest(self, limit=...): ... - def backfill(self, limit=...): ... - bfill = backfill - def fillna(self, method, limit=...): ... - def interpolate( - self, - method: str = ..., - axis: int = ..., - limit=..., - inplace: bool = ..., - limit_direction: str = ..., - limit_area=..., - downcast=..., - **kwargs, - ): ... - def asfreq(self, fill_value=...): ... - def std(self, ddof: int = ..., *args, **kwargs): ... - def var(self, ddof: int = ..., *args, **kwargs): ... - def size(self): ... - def count(self): ... - def quantile(self, q: float = ..., **kwargs): ... - def sum( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def prod( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def min( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def max( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def first( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def last( - self, _method=..., min_count: int = ..., *args, **kwargs - ) -> FrameOrSeriesUnion: ... - def mean(self, _method=..., *args, **kwargs) -> FrameOrSeriesUnion: ... - def sem(self, _method=..., *args, **kwargs) -> FrameOrSeriesUnion: ... - def median(self, _method=..., *args, **kwargs) -> FrameOrSeriesUnion: ... - def ohlc(self, _method=..., *args, **kwargs) -> FrameOrSeriesUnion: ... - -class _GroupByMixin(GroupByMixin): - groupby = ... - def __init__(self, obj, *args, **kwargs) -> None: ... - -def resample(obj, kind=..., **kwds): ... -def get_resampler_for_grouping( - groupby, rule, how=..., fill_method=..., limit=..., kind=..., **kwargs -): ... -def asfreq(obj, freq, method=..., how=..., normalize: bool = ..., fill_value=...): ... diff --git a/typings/pandas/core/reshape/__init__.pyi b/typings/pandas/core/reshape/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/core/reshape/api.pyi b/typings/pandas/core/reshape/api.pyi deleted file mode 100644 index c2ad0ec..0000000 --- a/typings/pandas/core/reshape/api.pyi +++ /dev/null @@ -1,18 +0,0 @@ -from pandas.core.reshape.concat import concat as concat -from pandas.core.reshape.melt import ( - lreshape as lreshape, - melt as melt, - wide_to_long as wide_to_long, -) -from pandas.core.reshape.merge import ( - merge as merge, - merge_asof as merge_asof, - merge_ordered as merge_ordered, -) -from pandas.core.reshape.pivot import ( - crosstab as crosstab, - pivot as pivot, - pivot_table as pivot_table, -) -from pandas.core.reshape.reshape import get_dummies as get_dummies -from pandas.core.reshape.tile import cut as cut, qcut as qcut diff --git a/typings/pandas/core/reshape/concat.pyi b/typings/pandas/core/reshape/concat.pyi deleted file mode 100644 index ee00c35..0000000 --- a/typings/pandas/core/reshape/concat.pyi +++ /dev/null @@ -1,56 +0,0 @@ -from pandas import DataFrame as DataFrame, Series as Series -from typing import ( - Hashable, - Iterable, - Mapping, - Optional, - Union, - overload, - Literal, - TypeVar, -) - -HashableT = TypeVar("HashableT", bound=Hashable) - -@overload -def concat( - objs: Union[Iterable[Optional[Series]], Mapping[HashableT, Optional[Series]]], - join: str = ..., - ignore_index: bool = ..., - keys=..., - levels=..., - names=..., - verify_integrity: bool = ..., - sort: bool = ..., - copy: bool = ..., - axis: Literal[0, "index"] = ..., -) -> Series: ... -@overload -def concat( - objs: Union[Iterable[Optional[Series]], Mapping[HashableT, Optional[Series]]], - axis: Literal[1, "columns"], - join: str = ..., - ignore_index: bool = ..., - keys=..., - levels=..., - names=..., - verify_integrity: bool = ..., - sort: bool = ..., - copy: bool = ..., -) -> DataFrame: ... -@overload -def concat( - objs: Union[ - Iterable[Optional[Union[DataFrame, Series]]], - Mapping[HashableT, Optional[Union[DataFrame, Series]]], - ], - axis: Literal[0, "index", 1, "columns"] = ..., - join: str = ..., - ignore_index: bool = ..., - keys=..., - levels=..., - names=..., - verify_integrity: bool = ..., - sort: bool = ..., - copy: bool = ..., -) -> DataFrame: ... diff --git a/typings/pandas/core/reshape/melt.pyi b/typings/pandas/core/reshape/melt.pyi deleted file mode 100644 index 2b1eca9..0000000 --- a/typings/pandas/core/reshape/melt.pyi +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np -from pandas.core.frame import DataFrame as DataFrame -from typing import List, Optional, Tuple, Union - -def melt( - frame: DataFrame, - id_vars: Optional[Union[Tuple, List, np.ndarray]] = ..., - value_vars: Optional[Union[Tuple, List, np.ndarray]] = ..., - var_name: Optional[str] = ..., - value_name: str = ..., - col_level: Optional[Union[int, str]] = ..., - ignore_index: bool = ..., -) -> DataFrame: ... -def lreshape(data: DataFrame, groups, dropna: bool = ..., label=...) -> DataFrame: ... -def wide_to_long( - df: DataFrame, stubnames, i, j, sep: str = ..., suffix: str = ... -) -> DataFrame: ... diff --git a/typings/pandas/core/reshape/merge.pyi b/typings/pandas/core/reshape/merge.pyi deleted file mode 100644 index 7f422a0..0000000 --- a/typings/pandas/core/reshape/merge.pyi +++ /dev/null @@ -1,130 +0,0 @@ -from pandas._libs.tslibs import Timedelta -from pandas import DataFrame as DataFrame, Series as Series -from pandas._typing import Label -from typing import Optional, Sequence, Union - -def merge( - left: Union[DataFrame, Series], - right: Union[DataFrame, Series], - how: str = ..., - on: Optional[Union[Label, Sequence]] = ..., - left_on: Optional[Union[Label, Sequence]] = ..., - right_on: Optional[Union[Label, Sequence]] = ..., - left_index: bool = ..., - right_index: bool = ..., - sort: bool = ..., - suffixes: Sequence[Union[str, None]] = ..., - copy: bool = ..., - indicator: Union[bool, str] = ..., - validate: str = ..., -) -> DataFrame: ... -def merge_ordered( - left: Union[DataFrame, Series], - right: Union[DataFrame, Series], - on: Optional[Union[Label, Sequence]] = ..., - left_on: Optional[Union[Label, Sequence]] = ..., - right_on: Optional[Union[Label, Sequence]] = ..., - left_by: Optional[Union[str, Sequence[str]]] = ..., - right_by: Optional[Union[str, Sequence[str]]] = ..., - fill_method: Optional[str] = ..., - suffixes: Sequence[Union[str, None]] = ..., - how: str = ..., -) -> DataFrame: ... -def merge_asof( - left: Union[DataFrame, Series], - right: Union[DataFrame, Series], - on: Optional[Label] = ..., - left_on: Optional[Label] = ..., - right_on: Optional[Label] = ..., - left_index: bool = ..., - right_index: bool = ..., - by: Optional[Union[str, Sequence[str]]] = ..., - left_by: Optional[str] = ..., - right_by: Optional[str] = ..., - suffixes: Sequence[Union[str, None]] = ..., - tolerance: Optional[Union[int, Timedelta]] = ..., - allow_exact_matches: bool = ..., - direction: str = ..., -) -> DataFrame: ... - -class _MergeOperation: - left = ... - right = ... - how = ... - axis = ... - on = ... - left_on = ... - right_on = ... - copy = ... - suffixes = ... - sort = ... - left_index = ... - right_index = ... - indicator = ... - indicator_name = ... - def __init__( - self, - left: Union[Series, DataFrame], - right: Union[Series, DataFrame], - how: str = ..., - on=..., - left_on=..., - right_on=..., - axis=..., - left_index: bool = ..., - right_index: bool = ..., - sort: bool = ..., - suffixes=..., - copy: bool = ..., - indicator: bool = ..., - validate=..., - ) -> None: ... - def get_result(self): ... - -class _OrderedMerge(_MergeOperation): - fill_method = ... - def __init__( - self, - left, - right, - on=..., - left_on=..., - right_on=..., - left_index: bool = ..., - right_index: bool = ..., - axis=..., - suffixes=..., - copy: bool = ..., - fill_method=..., - how: str = ..., - ) -> None: ... - def get_result(self): ... - -class _AsOfMerge(_OrderedMerge): - by = ... - left_by = ... - right_by = ... - tolerance = ... - allow_exact_matches = ... - direction = ... - def __init__( - self, - left, - right, - on=..., - left_on=..., - right_on=..., - left_index: bool = ..., - right_index: bool = ..., - by=..., - left_by=..., - right_by=..., - axis=..., - suffixes=..., - copy: bool = ..., - fill_method=..., - how: str = ..., - tolerance=..., - allow_exact_matches: bool = ..., - direction: str = ..., - ) -> None: ... diff --git a/typings/pandas/core/reshape/pivot.pyi b/typings/pandas/core/reshape/pivot.pyi deleted file mode 100644 index d7b0b5f..0000000 --- a/typings/pandas/core/reshape/pivot.pyi +++ /dev/null @@ -1,36 +0,0 @@ -from pandas.core.series import Series -from pandas.core.frame import DataFrame -from pandas.core.groupby.grouper import Grouper -from pandas._typing import Scalar -from typing import Callable, Optional, Sequence, Union - -def pivot_table( - data: DataFrame, - values: Optional[str] = ..., - index: Optional[Union[str, Sequence, Grouper]] = ..., - columns: Optional[Union[str, Sequence, Grouper]] = ..., - aggfunc=..., - fill_value: Optional[Scalar] = ..., - margins: bool = ..., - dropna: bool = ..., - margins_name: str = ..., - observed: bool = ..., -) -> DataFrame: ... -def pivot( - data: DataFrame, - index: Optional[str] = ..., - columns: Optional[str] = ..., - values: Optional[Union[str, Sequence[str]]] = ..., -) -> DataFrame: ... -def crosstab( - index: Union[Sequence, Series], - columns: Union[Sequence, Series], - values: Optional[Sequence] = ..., - rownames: Optional[Sequence] = ..., - colnames: Optional[Sequence] = ..., - aggfunc: Optional[Callable] = ..., - margins: bool = ..., - margins_name: str = ..., - dropna: bool = ..., - normalize: bool = ..., -) -> DataFrame: ... diff --git a/typings/pandas/core/reshape/util.pyi b/typings/pandas/core/reshape/util.pyi deleted file mode 100644 index 4ce5844..0000000 --- a/typings/pandas/core/reshape/util.pyi +++ /dev/null @@ -1 +0,0 @@ -def cartesian_product(X): ... diff --git a/typings/pandas/core/series.pyi b/typings/pandas/core/series.pyi deleted file mode 100644 index 6718865..0000000 --- a/typings/pandas/core/series.pyi +++ /dev/null @@ -1,1763 +0,0 @@ -import sys -from datetime import date, time -from typing import ( - Any, - Callable, - Dict, - Generic, - Hashable, - Iterable, - List, - Mapping, - Optional, - Sequence, - Tuple, - Type, - Union, - overload, -) - -import numpy as np -from matplotlib.axes import Axes as PlotAxes -from matplotlib.axes import SubplotBase as SubplotBase -from pandas._typing import S1 as S1 -from pandas._typing import ArrayLike as ArrayLike -from pandas._typing import Axis as Axis -from pandas._typing import AxisType as AxisType -from pandas._typing import Dtype as Dtype -from pandas._typing import DtypeNp as DtypeNp -from pandas._typing import FilePathOrBuffer as FilePathOrBuffer -from pandas._typing import IgnoreRaise as IgnoreRaise -from pandas._typing import Label as Label -from pandas._typing import Level as Level -from pandas._typing import ListLike as ListLike -from pandas._typing import MaskType as MaskType -from pandas._typing import Renamer as Renamer -from pandas._typing import Scalar as Scalar -from pandas._typing import SeriesAxisType as SeriesAxisType -from pandas._typing import Timedelta as Timedelta -from pandas._typing import Timestamp as Timestamp -from pandas._typing import num as num -from pandas.core.arrays.base import ExtensionArray -from pandas.core.groupby.generic import SeriesGroupBy -from pandas.core.indexes.base import Index -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.resample import Resampler -from pandas.core.strings import StringMethods -from pandas.core.window import ExponentialMovingWindow -from pandas.core.window.rolling import Rolling, Window - -from .base import IndexOpsMixin -from .frame import DataFrame -from .generic import NDFrame -from .indexes.multi import MultiIndex -from .indexing import _iLocIndexer, _LocIndexer - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -_bool = bool -_str = str - -class _iLocIndexerSeries(_iLocIndexer, Generic[S1]): - # get item - @overload - def __getitem__(self, idx: int) -> S1: ... - @overload - def __getitem__(self, idx: Union[Index, slice]) -> Series[S1]: ... - # set item - @overload - def __setitem__(self, idx: int, value: S1) -> None: ... - @overload - def __setitem__(self, idx: Index, value: Union[S1, Series[S1]]) -> None: ... - -class _LocIndexerSeries(_LocIndexer, Generic[S1]): - @overload - def __getitem__( - self, - idx: Union[ - MaskType, - Index, - Sequence[Union[int, float]], - List[_str], - slice, - Tuple[Union[int, str, float, slice, Index], ...], - ], - ) -> Series[S1]: ... - @overload - def __getitem__( - self, - idx: Union[int, _str, float], - ) -> S1: ... - @overload - def __setitem__( - self, - idx: Union[Index, MaskType], - value: Union[S1, ArrayLike, Series[S1]], - ) -> None: ... - @overload - def __setitem__( - self, - idx: str, - value: S1, - ) -> None: ... - @overload - def __setitem__( - self, - idx: Union[List[int], List[str], List[Union[str, int]]], - value: Union[S1, ArrayLike, Series[S1]], - ) -> None: ... - -class Series(IndexOpsMixin, NDFrame, Generic[S1]): - - _ListLike = Union[ArrayLike, Dict[_str, np.ndarray], List, Tuple, Index] - @overload - def __new__( - cls, - data: DatetimeIndex, - index: Union[_str, int, Series, List, Index] = ..., - dtype=..., - name: Optional[Hashable] = ..., - copy: bool = ..., - fastpath: bool = ..., - ) -> Series[Timestamp]: ... - @overload - def __new__( - cls, - data: Optional[ - Union[object, _ListLike, Series[S1], Dict[int, S1], Dict[_str, S1]] - ], - dtype: Type[S1], - index: Union[_str, int, Series, List, Index] = ..., - name: Optional[Hashable] = ..., - copy: bool = ..., - fastpath: bool = ..., - ) -> Series[S1]: ... - @overload - def __new__( - cls, - data: Optional[ - Union[object, _ListLike, Series[S1], Dict[int, S1], Dict[_str, S1]] - ] = ..., - index: Union[_str, int, Series, List, Index] = ..., - dtype=..., - name: Optional[Hashable] = ..., - copy: bool = ..., - fastpath: bool = ..., - ) -> Series: ... - @property - def hasnans(self) -> bool: ... - def div( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[float]: ... - def rdiv( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - @property - def dtype(self) -> Dtype: ... - @property - def dtypes(self) -> Dtype: ... - @property - def name(self) -> Optional[Hashable]: ... - @name.setter - def name(self, value: Optional[Hashable]) -> None: ... - @property - def values(self) -> ArrayLike: ... - @property - def array(self) -> ExtensionArray: ... - def ravel(self, order: _str = ...) -> np.ndarray: ... - def __len__(self) -> int: ... - def view(self, dtype=...) -> Series[S1]: ... - def __array_ufunc__(self, ufunc: Callable, method: _str, *inputs, **kwargs): ... - def __array__(self, dtype=...) -> np.ndarray: ... - @property - def axes(self) -> List: ... - def take( - self, - indices: Sequence, - axis: SeriesAxisType = ..., - is_copy: Optional[_bool] = ..., - **kwargs, - ) -> Series[S1]: ... - @overload - def __getitem__( - self, - idx: Union[ - List[_str], Index, Series[S1], slice, MaskType, Tuple[Union[S1, slice], ...] - ], - ) -> Series: ... - @overload - def __getitem__(self, idx: Union[int, _str]) -> S1: ... - def __setitem__(self, key, value) -> None: ... - def repeat( - self, repeats: Union[int, List[int]], axis: Optional[SeriesAxisType] = ... - ) -> Series[S1]: ... - @property - def index(self) -> Union[Index, MultiIndex]: ... - @index.setter - def index(self, idx: Index) -> None: ... - @overload - def reset_index( - self, - level: Optional[Sequence[Level]], - drop: Literal[True], - *, - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> Series[S1]: ... - @overload - def reset_index( - self, - level: Optional[Level], - drop: Literal[True], - *, - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> Series[S1]: ... - @overload - def reset_index( - self, - /, - drop: Literal[True], - level: Optional[Sequence[Level]] = ..., - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> Series[S1]: ... - @overload - def reset_index( - self, - /, - drop: Literal[True], - level: Optional[Level] = ..., - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> Series[S1]: ... - @overload - def reset_index( - self, - level: Optional[Sequence[Level]] = ..., - drop: Literal[False] = ..., - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> DataFrame: ... - @overload - def reset_index( - self, - level: Optional[Level] = ..., - drop: Literal[False] = ..., - name: Optional[object] = ..., - inplace: _bool = ..., - ) -> DataFrame: ... - @overload - def to_string( - self, - buf: Optional[FilePathOrBuffer], - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - min_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - line_width: Optional[int] = ..., - max_colwidth: Optional[int] = ..., - encoding: Optional[_str] = ..., - ) -> None: ... - @overload - def to_string( - self, - na_rep: _str = ..., - formatters=..., - float_format=..., - sparsify: Optional[_bool] = ..., - index_names: _bool = ..., - justify: Optional[_str] = ..., - max_rows: Optional[int] = ..., - min_rows: Optional[int] = ..., - max_cols: Optional[int] = ..., - show_dimensions: _bool = ..., - decimal: _str = ..., - line_width: Optional[int] = ..., - max_colwidth: Optional[int] = ..., - encoding: Optional[_str] = ..., - ) -> _str: ... - @overload - def to_markdown( - self, - buf: Optional[FilePathOrBuffer], - mode: Optional[_str] = ..., - index: _bool = ..., - storage_options: Optional[dict] = ..., - **kwargs, - ) -> None: ... - @overload - def to_markdown( - self, - mode: Optional[_str] = ..., - index: _bool = ..., - storage_options: Optional[dict] = ..., - ) -> _str: ... - def items(self) -> Iterable[Tuple[Hashable, S1]]: ... - def iteritems(self) -> Iterable[Tuple[Label, S1]]: ... - def keys(self) -> List: ... - def to_dict(self, into: Hashable = ...) -> Dict[Any, Any]: ... - def to_frame(self, name: Optional[object] = ...) -> DataFrame: ... - def groupby( - self, - by=..., - axis: SeriesAxisType = ..., - level: Optional[Level] = ..., - as_index: _bool = ..., - sort: _bool = ..., - group_keys: _bool = ..., - squeeze: _bool = ..., - observed: _bool = ..., - dropna: _bool = ..., - ) -> SeriesGroupBy: ... - @overload - def count(self, level: None = ...) -> int: ... - @overload - def count(self, level: Hashable) -> Series[S1]: ... - def mode(self, dropna) -> Series[S1]: ... - def unique(self) -> np.ndarray: ... - @overload - def drop_duplicates( - self, keep: Literal["first", "last", False] = ..., inplace: Literal[False] = ... - ) -> Series[S1]: ... - @overload - def drop_duplicates( - self, keep: Literal["first", "last", False], inplace: Literal[True] - ) -> None: ... - @overload - def drop_duplicates(self, *, inplace: Literal[True]) -> None: ... - @overload - def drop_duplicates( - self, keep: Literal["first", "last", False] = ..., inplace: bool = ... - ) -> Optional[Series[S1]]: ... - def duplicated( - self, keep: Literal["first", "last", False] = ... - ) -> Series[_bool]: ... - def idxmax( - self, axis: SeriesAxisType = ..., skipna: _bool = ..., *args, **kwargs - ) -> Union[int, _str]: ... - def idxmin( - self, axis: SeriesAxisType = ..., skipna: _bool = ..., *args, **kwargs - ) -> Union[int, _str]: ... - def round(self, decimals: int = ..., *args, **kwargs) -> Series[S1]: ... - @overload - def quantile( - self, - q: float = ..., - interpolation: Union[ - _str, Literal["linear", "lower", "higher", "midpoint", "nearest"] - ] = ..., - ) -> float: ... - @overload - def quantile( - self, - q: _ListLike, - interpolation: Union[ - _str, Literal["linear", "lower", "higher", "midpoint", "nearest"] - ] = ..., - ) -> Series[S1]: ... - def corr( - self, - other: Series[S1], - method: Literal["pearson", "kendall", "spearman"] = ..., - min_periods: int = ..., - ) -> float: ... - def cov( - self, other: Series[S1], min_periods: Optional[int] = ..., ddof: int = ... - ) -> float: ... - def diff(self, periods: int = ...) -> Series[S1]: ... - def autocorr(self, lag: int = ...) -> float: ... - @overload - def dot(self, other: Series[S1]) -> Scalar: ... - @overload - def dot(self, other: DataFrame) -> Series[S1]: ... - @overload - def dot(self, other: _ListLike) -> np.ndarray: ... - def __matmul__(self, other): ... - def __rmatmul__(self, other): ... - @overload - def searchsorted( - self, - value: _ListLike, - side: Union[_str, Literal["left", "right"]] = ..., - sorter: Optional[_ListLike] = ..., - ) -> List[int]: ... - @overload - def searchsorted( - self, - value: Scalar, - side: Union[_str, Literal["left", "right"]] = ..., - sorter: Optional[_ListLike] = ..., - ) -> int: ... - def append( - self, - to_append: Union[Series, Sequence[Series]], - ignore_index: _bool = ..., - verify_integrity: _bool = ..., - ) -> Series[S1]: ... - @overload - def compare( - self, - other: Series, - align_axis: SeriesAxisType, - keep_shape: bool = ..., - keep_equal: bool = ..., - ) -> Series: ... - @overload - def compare( - self, - other: Series, - align_axis: Literal["columns", 1] = ..., - keep_shape: bool = ..., - keep_equal: bool = ..., - ) -> DataFrame: ... - def combine( - self, other: Series[S1], func: Callable, fill_value: Optional[Scalar] = ... - ) -> Series[S1]: ... - def combine_first(self, other: Series[S1]) -> Series[S1]: ... - def update( - self, other: Union[Series[S1], Sequence[S1], Mapping[int, S1]] - ) -> None: ... - @overload - def sort_values( - self, - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[True], - key: Optional[Callable] = ..., - ) -> None: ... - @overload - def sort_values( - self, - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[False], - key: Optional[Callable] = ..., - ) -> Series[S1]: ... - @overload - def sort_values( - self, - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - *, - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Series[S1]: ... - @overload - def sort_values( - self, - axis: AxisType = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - inplace: Optional[_bool] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Union[None, Series[S1]]: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Level] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[True], - key: Optional[Callable] = ..., - ) -> None: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - *, - inplace: Literal[False], - key: Optional[Callable] = ..., - ) -> Series: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - *, - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Series: ... - @overload - def sort_index( - self, - axis: AxisType = ..., - level: Optional[Union[Level, List[int], List[_str]]] = ..., - ascending: Union[_bool, Sequence[_bool]] = ..., - inplace: Optional[_bool] = ..., - kind: Union[_str, Literal["quicksort", "mergesort", "heapsort"]] = ..., - na_position: Union[_str, Literal["first", "last"]] = ..., - sort_remaining: _bool = ..., - ignore_index: _bool = ..., - key: Optional[Callable] = ..., - ) -> Union[None, Series]: ... - def argsort( - self, - axis: SeriesAxisType = ..., - kind: Union[_str, Literal["mergesort", "quicksort", "heapsort"]] = ..., - order: None = ..., - ) -> Series[int]: ... - def nlargest( - self, n: int = ..., keep: Union[_str, Literal["first", "last", "all"]] = ... - ) -> Series[S1]: ... - def nsmallest( - self, n: int = ..., keep: Union[_str, Literal["first", "last", "all"]] = ... - ) -> Series[S1]: ... - def swaplevel( - self, i: Level = ..., j: Level = ..., copy: _bool = ... - ) -> Series[S1]: ... - def reorder_levels(self, order: List) -> Series[S1]: ... - def explode(self) -> Series[S1]: ... - def unstack( - self, - level: Level = ..., - fill_value: Optional[Union[int, _str, Dict]] = ..., - ) -> DataFrame: ... - def map( - self, arg, na_action: Optional[Union[_str, Literal["ignore"]]] = ... - ) -> Series[S1]: ... - def aggregate( - self, - func: Union[ - Callable, - _str, - List[Union[Callable, _str]], - Dict[SeriesAxisType, Union[Callable, _str]], - ], - axis: SeriesAxisType = ..., - *args, - **kwargs, - ) -> None: ... - def agg( - self, - func: Union[ - Callable, - _str, - List[Union[Callable, _str]], - Dict[SeriesAxisType, Union[Callable, _str]], - ] = ..., - axis: SeriesAxisType = ..., - *args, - **kwargs, - ) -> None: ... - def transform( - self, - func: Union[List[Callable], Dict[_str, Callable]], - axis: SeriesAxisType = ..., - *args, - **kwargs, - ) -> Series[S1]: ... - def apply( - self, func: Callable, convertDType: _bool = ..., args: Tuple = ..., **kwds - ) -> Union[Series, DataFrame]: ... - def align( - self, - other: Union[DataFrame, Series], - join: Union[_str, Literal["inner", "outer", "left", "right"]] = ..., - axis: Optional[AxisType] = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - fill_value=..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - limit: Optional[int] = ..., - fill_axis: SeriesAxisType = ..., - broadcast_axis: Optional[SeriesAxisType] = ..., - ) -> Tuple[Series, Series]: ... - @overload - def rename( - self, - index: Optional[Union[Renamer, Hashable]] = ..., - *, - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: Literal[True], - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> None: ... - @overload - def rename( - self, - index: Optional[Renamer] = ..., - *, - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: Literal[False] = ..., - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> Series: ... - @overload - def rename( - self, - index: Optional[Hashable] = ..., - *, - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: Literal[False] = ..., - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> Series: ... - @overload - def rename( - self, - index: Optional[Union[Renamer, Hashable]] = ..., - *, - axis: Optional[Axis] = ..., - copy: bool = ..., - inplace: bool = ..., - level: Optional[Level] = ..., - errors: IgnoreRaise = ..., - ) -> Optional[Series]: ... - def reindex_like( - self, - other: Series[S1], - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill", "nearest"]] - ] = ..., - copy: _bool = ..., - limit: Optional[int] = ..., - tolerance: Optional[float] = ..., - ) -> Series: ... - @overload - def drop( - self, - labels: Union[Hashable, List[Hashable]] = ..., - *, - axis: Axis = ..., - index: Union[Hashable, List[Hashable]] = ..., - columns: Union[Hashable, List[Hashable]] = ..., - level: Optional[Level] = ..., - inplace: Literal[True], - errors: IgnoreRaise = ..., - ) -> None: ... - @overload - def drop( - self, - labels: Union[Hashable, List[Hashable]] = ..., - *, - axis: Axis = ..., - index: Union[Hashable, List[Hashable]] = ..., - columns: Union[Hashable, List[Hashable]] = ..., - level: Optional[Level] = ..., - inplace: Literal[False] = ..., - errors: IgnoreRaise = ..., - ) -> Series: ... - @overload - def drop( - self, - labels: Union[Hashable, List[Hashable]] = ..., - *, - axis: Axis = ..., - index: Union[Hashable, List[Hashable]] = ..., - columns: Union[Hashable, List[Hashable]] = ..., - level: Optional[Level] = ..., - inplace: bool = ..., - errors: IgnoreRaise = ..., - ) -> Optional[Series]: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series[S1], DataFrame]] = ..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - axis: SeriesAxisType = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series[S1], DataFrame]] = ..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - axis: SeriesAxisType = ..., - *, - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... - @overload - def fillna( - self, - value: Optional[Union[Scalar, Dict, Series[S1], DataFrame]] = ..., - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - axis: SeriesAxisType = ..., - inplace: _bool = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Union[Series[S1], None]: ... - def replace( - self, - to_replace: Optional[Union[_str, List, Dict, Series[S1], int, float]] = ..., - value: Optional[Union[Scalar, Dict, List, _str]] = ..., - inplace: _bool = ..., - limit: Optional[int] = ..., - regex=..., - method: Optional[Union[_str, Literal["pad", "ffill", "bfill"]]] = ..., - ) -> Series[S1]: ... - def shift( - self, - periods: int = ..., - freq=..., - axis: SeriesAxisType = ..., - fill_value: Optional[object] = ..., - ) -> Series[S1]: ... - def memory_usage(self, index: _bool = ..., deep: _bool = ...) -> int: ... - def isin(self, values: Union[Iterable, Series[S1], Dict]) -> Series[_bool]: ... - def between( - self, - left: Union[Scalar, Sequence], - right: Union[Scalar, Sequence], - inclusive: Literal["both", "neither", "left", "right"] = "both", - ) -> Series[_bool]: ... - def isna(self) -> Series[_bool]: ... - def isnull(self) -> Series[_bool]: ... - def notna(self) -> Series[_bool]: ... - def notnull(self) -> Series[_bool]: ... - @overload - def dropna( - self, - axis: SeriesAxisType = ..., - how: Optional[_str] = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def dropna( - self, - axis: SeriesAxisType = ..., - inplace: _bool = ..., - how: Optional[_str] = ..., - ) -> Series[S1]: ... - def to_timestamp( - self, - freq=..., - how: Union[_str, Literal["start", "end", "s", "e"]] = ..., - copy: _bool = ..., - ) -> Series[S1]: ... - def to_period(self, freq: Optional[_str] = ..., copy: _bool = ...) -> DataFrame: ... - @property - def str(self) -> StringMethods[Series[Any]]: ... - @property - def dt(self) -> Series: ... - cat = ... - def plot(self, **kwargs) -> Union[PlotAxes, np.ndarray]: ... - sparse = ... - def hist( - self, - by: Optional[object] = ..., - ax: Optional[PlotAxes] = ..., - grid: _bool = ..., - xlabelsize: Optional[int] = ..., - xrot: Optional[float] = ..., - ylabelsize: Optional[int] = ..., - yrot: Optional[float] = ..., - figsize: Optional[Tuple[float, float]] = ..., - bins: Union[int, Sequence] = ..., - backend: Optional[_str] = ..., - **kwargs, - ) -> SubplotBase: ... - def swapaxes( - self, axis1: SeriesAxisType, axis2: SeriesAxisType, copy: _bool = ... - ) -> Series[S1]: ... - def droplevel( - self, level: Union[Level, List[Level]], axis: SeriesAxisType = ... - ) -> DataFrame: ... - def pop(self, item: _str) -> Series[S1]: ... - def squeeze(self, axis: Optional[SeriesAxisType] = ...) -> Scalar: ... - def __abs__(self) -> Series[S1]: ... - def add_prefix(self, prefix: _str) -> Series[S1]: ... - def add_suffix(self, suffix: _str) -> Series[S1]: ... - def reindex(self, index: Optional[_ListLike] = ..., **kwargs) -> Series[S1]: ... - def filter( - self, - items: Optional[_ListLike] = ..., - like: Optional[_str] = ..., - regex: Optional[_str] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def head(self, n: int = ...) -> Series[S1]: ... - def tail(self, n: int = ...) -> Series[S1]: ... - def sample( - self, - n: Optional[int] = ..., - frac: Optional[float] = ..., - replace: _bool = ..., - weights: Optional[Union[_str, _ListLike, np.ndarray]] = ..., - random_state: Optional[int] = ..., - axis: Optional[SeriesAxisType] = ..., - ignore_index: _bool = ..., - ) -> Series[S1]: ... - def astype( - self, - dtype: Union[S1, _str, Type[Scalar]], - copy: _bool = ..., - errors: Union[_str, Literal["raise", "ignore"]] = ..., - ) -> Series: ... - def copy(self, deep: _bool = ...) -> Series[S1]: ... - def infer_objects(self) -> Series[S1]: ... - def convert_dtypes( - self, - infer_objects: _bool = ..., - convert_string: _bool = ..., - convert_integer: _bool = ..., - convert_boolean: _bool = ..., - ) -> Series[S1]: ... - @overload - def ffill( - self, - axis: Optional[SeriesAxisType] = ..., - *, - inplace: Literal[True], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> None: ... - @overload - def ffill( - self, - axis: Optional[SeriesAxisType] = ..., - *, - inplace: Literal[False], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... - @overload - def bfill( - self, - axis: Optional[SeriesAxisType] = ..., - *, - inplace: Literal[True], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> None: ... - @overload - def bfill( - self, - axis: Optional[SeriesAxisType] = ..., - *, - inplace: Literal[False], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... - @overload - def bfill( - self, - value: Union[S1, Dict, Series[S1], DataFrame], - axis: SeriesAxisType = ..., - inplace: _bool = ..., - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., - ) -> Union[Series[S1], None]: ... - def interpolate( - self, - method: Union[ - _str, - Literal[ - "linear", - "time", - "index", - "values", - "pad", - "nearest", - "slinear", - "quadratic", - "cubic", - "spline", - "barycentric", - "polynomial", - "krogh", - "pecewise_polynomial", - "spline", - "pchip", - "akima", - "from_derivatives", - ], - ] = ..., - axis: Optional[SeriesAxisType] = ..., - limit: Optional[int] = ..., - inplace: _bool = ..., - limit_direction: Optional[ - Union[_str, Literal["forward", "backward", "both"]] - ] = ..., - limit_area: Optional[Union[_str, Literal["inside", "outside"]]] = ..., - downcast: Optional[Union[_str, Literal["infer"]]] = ..., - **kwargs, - ) -> Series[S1]: ... - def asof( - self, - where: Union[Scalar, Sequence[Scalar]], - subset: Optional[Union[_str, Sequence[_str]]] = ..., - ) -> Union[Scalar, Series[S1]]: ... - def clip( - self, - lower: Optional[float] = ..., - upper: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - inplace: _bool = ..., - *args, - **kwargs, - ) -> Series[S1]: ... - def asfreq( - self, - freq, - method: Optional[ - Union[_str, Literal["backfill", "bfill", "pad", "ffill"]] - ] = ..., - how: Optional[Union[_str, Literal["start", "end"]]] = ..., - normalize: _bool = ..., - fill_value: Optional[Scalar] = ..., - ) -> Series[S1]: ... - def at_time( - self, - time: Union[_str, time], - asof: _bool = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def between_time( - self, - start_time: Union[_str, time], - end_time: Union[_str, time], - include_start: _bool = ..., - include_end: _bool = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def resample( - self, - rule, - axis: SeriesAxisType = ..., - closed: Optional[_str] = ..., - label: Optional[_str] = ..., - convention: Union[_str, Literal["start", "end", "s", "e"]] = ..., - kind: Optional[Union[_str, Literal["timestamp", "period"]]] = ..., - loffset=..., - base: int = ..., - on: Optional[_str] = ..., - level: Optional[Level] = ..., - origin: Union[ - Timestamp, Literal["epoch", "start", "start_day", "end", "end_day"] - ] = ..., - offset: Optional[Union[Timedelta, _str]] = None, - ) -> Resampler: ... - def first(self, offset) -> Series[S1]: ... - def last(self, offset) -> Series[S1]: ... - def rank( - self, - axis: SeriesAxisType = ..., - method: Union[_str, Literal["average", "min", "max", "first", "dense"]] = ..., - numeric_only: Optional[_bool] = ..., - na_option: Union[_str, Literal["keep", "top", "bottom"]] = ..., - ascending: _bool = ..., - pct: _bool = ..., - ) -> Series: ... - def where( - self, - cond: Union[Series[S1], Series[_bool], np.ndarray], - other=..., - inplace: _bool = ..., - axis: Optional[SeriesAxisType] = ..., - level: Optional[Level] = ..., - errors: _str = ..., - try_cast: _bool = ..., - ) -> Series[S1]: ... - def mask( - self, - cond: MaskType, - other: Union[Scalar, Series[S1], DataFrame, Callable] = ..., - inplace: _bool = ..., - axis: Optional[SeriesAxisType] = ..., - level: Optional[Level] = ..., - errors: Union[_str, Literal["raise", "ignore"]] = ..., - try_cast: _bool = ..., - ) -> Series[S1]: ... - def slice_shift( - self, periods: int = ..., axis: SeriesAxisType = ... - ) -> Series[S1]: ... - def tshift( - self, periods: int = ..., freq=..., axis: SeriesAxisType = ... - ) -> Series[S1]: ... - def truncate( - self, - before: Optional[Union[date, _str, int]] = ..., - after: Optional[Union[date, _str, int]] = ..., - axis: Optional[SeriesAxisType] = ..., - copy: _bool = ..., - ) -> Series[S1]: ... - def tz_convert( - self, - tz, - axis: SeriesAxisType = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - ) -> Series[S1]: ... - def tz_localize( - self, - tz, - axis: SeriesAxisType = ..., - level: Optional[Level] = ..., - copy: _bool = ..., - ambiguous=..., - nonexistent: _str = ..., - ) -> Series[S1]: ... - def abs(self) -> Series[S1]: ... - def describe( - self, - percentiles: Optional[List[float]] = ..., - include: Optional[Union[_str, Literal["all"], List[S1]]] = ..., - exclude: Optional[Union[S1, List[S1]]] = ..., - datetime_is_numeric: Optional[_bool] = ..., - ) -> Series[S1]: ... - def pct_change( - self, - periods: int = ..., - fill_method: _str = ..., - limit: Optional[int] = ..., - freq=..., - **kwargs, - ) -> Series[S1]: ... - def first_valid_index(self) -> Scalar: ... - def last_valid_index(self) -> Scalar: ... - def value_counts( - self, - normalize: _bool = ..., - sort: _bool = ..., - ascending: _bool = ..., - bins: Optional[int] = ..., - dropna: _bool = ..., - ) -> Series[S1]: ... - def transpose(self, *args, **kwargs) -> Series[S1]: ... - @property - def T(self) -> Series[S1]: ... - # The rest of these were left over from the old - # stubs we shipped in preview. They may belong in - # the base classes in some cases; I expect stubgen - # just failed to generate these so I couldn't match - # them up. - @overload - def __add__( - self: Series[Timedelta], other: Series[Timestamp] - ) -> Series[Timestamp]: ... - @overload - def __add__(self: Series[Timedelta], other: DatetimeIndex) -> Series[Timestamp]: ... - @overload - def __add__(self: Series[Timedelta], other: Timestamp) -> Series[Timestamp]: ... - @overload - def __add__( - self, other: Union[num, _str, Timedelta, _ListLike, Series[S1]] - ) -> Series: ... - def __and__(self, other: Union[_ListLike, Series[S1]]) -> Series[_bool]: ... - # def __array__(self, dtype: Optional[_bool] = ...) -> _np_ndarray - def __div__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __eq__(self, other: object) -> Series[_bool]: ... # type: ignore - def __floordiv__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[int]: ... - def __ge__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - def __gt__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - # def __iadd__(self, other: S1) -> Series[S1]: ... - # def __iand__(self, other: S1) -> Series[_bool]: ... - # def __idiv__(self, other: S1) -> Series[S1]: ... - # def __ifloordiv__(self, other: S1) -> Series[S1]: ... - # def __imod__(self, other: S1) -> Series[S1]: ... - # def __imul__(self, other: S1) -> Series[S1]: ... - # def __ior__(self, other: S1) -> Series[_bool]: ... - # def __ipow__(self, other: S1) -> Series[S1]: ... - # def __isub__(self, other: S1) -> Series[S1]: ... - # def __itruediv__(self, other: S1) -> Series[S1]: ... - # def __itruediv__(self, other) -> None: ... - # def __ixor__(self, other: S1) -> Series[_bool]: ... - def __le__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - def __lt__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - @overload - def __mul__(self, other: Union[num, _ListLike, Series[S1]]) -> Series: ... - @overload - def __mul__( - self, other: Union[Timedelta, Series[Timedelta]] - ) -> Series[Timedelta]: ... - @overload - def __mul__( - self: Series[Timedelta], - other: Union[num, Series[bool], Series[int], Series[float]], - ) -> Series[Timedelta]: ... - def __mod__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __ne__(self, other: object) -> Series[_bool]: ... # type: ignore - def __pow__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __or__(self, other: Union[_ListLike, Series[S1]]) -> Series[_bool]: ... - def __radd__( - self, other: Union[num, _str, _ListLike, Series[S1]] - ) -> Series[S1]: ... - def __rand__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - def __rdiv__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __rdivmod__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __rfloordiv__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __rmod__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - @overload - def __rmul__(self, other: Union[num, _ListLike, Series[S1]]) -> Series: ... - @overload - def __rmul__( - self, other: Union[Timedelta, Series[Timedelta]] - ) -> Series[Timedelta]: ... - @overload - def __rmul__( - self: Series[Timedelta], - other: Union[num, Series[bool], Series[int], Series[float]], - ) -> Series[Timedelta]: ... - def __rnatmul__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __rpow__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[S1]: ... - def __ror__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - @overload - def __rsub__( - self: Series[Timestamp], other: Union[Timestamp, Series[Timestamp]] - ) -> Series[Timedelta]: ... - @overload - def __rsub__( - self: Series[Timestamp], other: Union[Timedelta, Series[Timedelta]] - ) -> Series[Timestamp]: ... - @overload - def __rsub__( - self, other: Union[num, Timedelta, _ListLike, Series[S1]] - ) -> Series: ... - @overload - def __rtruediv__( - self: Series[Timedelta], other: Union[Timedelta, Series[Timedelta]] - ) -> Series[float]: ... - @overload - def __rtruediv__(self, other: Union[num, _ListLike, Series[S1]]) -> Series: ... - def __rxor__(self, other: Union[num, _ListLike, Series[S1]]) -> Series[_bool]: ... - @overload - def __sub__( - self: Series[Timestamp], other: Union[Timestamp, Series[Timestamp]] - ) -> Series[Timedelta]: ... - @overload - def __sub__( - self: Series[Timestamp], other: Union[Timedelta, Series[Timedelta]] - ) -> Series[Timestamp]: ... - @overload - def __sub__( - self, other: Union[num, Timedelta, _ListLike, Series[S1]] - ) -> Series: ... - @overload - def __truediv__( - self: Series[Timedelta], other: Union[Timedelta, Series[Timedelta]] - ) -> Series[float]: ... - @overload - def __truediv__(self, other: Union[num, _ListLike, Series[S1]]) -> Series: ... - def __xor__(self, other: Union[_ListLike, Series[S1]]) -> Series: ... - def __invert__(self) -> Series[bool]: ... - # properties - # @property - # def array(self) -> _npndarray - @property - def at(self) -> _LocIndexerSeries[S1]: ... - # @property - # def cat(self) -> ? - @property - def iat(self) -> _iLocIndexerSeries[S1]: ... - @property - def iloc(self) -> _iLocIndexerSeries[S1]: ... - @property - def loc(self) -> _LocIndexerSeries[S1]: ... - # Methods - def add( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: int = ..., - ) -> Series[S1]: ... - def all( - self, - axis: SeriesAxisType = ..., - bool_only: Optional[_bool] = ..., - skipna: _bool = ..., - level: Optional[Level] = ..., - **kwargs, - ) -> _bool: ... - def any( - self, - axis: SeriesAxisType = ..., - bool_only: Optional[_bool] = ..., - skipna: _bool = ..., - level: Optional[Level] = ..., - **kwargs, - ) -> _bool: ... - def cummax( - self, axis: Optional[SeriesAxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> Series[S1]: ... - def cummin( - self, axis: Optional[SeriesAxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> Series[S1]: ... - def cumprod( - self, axis: Optional[SeriesAxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> Series[S1]: ... - def cumsum( - self, axis: Optional[SeriesAxisType] = ..., skipna: _bool = ..., *args, **kwargs - ) -> Series[S1]: ... - def divide( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[float]: ... - def divmod( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def eq( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - def ewm( - self, - com: Optional[float] = ..., - span: Optional[float] = ..., - halflife: Optional[float] = ..., - alpha: Optional[float] = ..., - min_periods: int = ..., - adjust: _bool = ..., - ignore_na: _bool = ..., - axis: SeriesAxisType = ..., - ) -> ExponentialMovingWindow: ... - def expanding( - self, min_periods: int = ..., center: _bool = ..., axis: SeriesAxisType = ... - ) -> DataFrame: ... - def floordiv( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[int]: ... - def ge( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - def gt( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - def item(self) -> S1: ... - @overload - def kurt( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def kurt( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Scalar: ... - @overload - def kurtosis( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Optional[Level], - **kwargs, - ) -> Series[S1]: ... - @overload - def kurtosis( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Scalar: ... - def le( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - def lt( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - @overload - def mad( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def mad( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - **kwargs, - ) -> Scalar: ... - @overload - def max( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - *, - level: Level, - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Series[S1]: ... - @overload - def max( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - *, - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> S1: ... - @overload - def mean( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def mean( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> float: ... - @overload - def median( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def median( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> float: ... - @overload - def min( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def min( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: _bool = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> S1: ... - def mod( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def mul( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def multiply( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def ne( - self, - other: Union[Scalar, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[_bool]: ... - def nunique(self, dropna: _bool = ...) -> int: ... - def pow( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - @overload - def prod( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def prod( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> Scalar: ... - @overload - def product( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def product( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> Scalar: ... - def radd( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rdivmod( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rfloordiv( - self, - other, - level: Optional[Level] = ..., - fill_value: Optional[Union[float, None]] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rmod( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rmul( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - @overload - def rolling( - self, - window, - min_periods: Optional[int] = ..., - center: _bool = ..., - *, - win_type: _str, - on: Optional[_str] = ..., - axis: SeriesAxisType = ..., - closed: Optional[_str] = ..., - ) -> Window: ... - @overload - def rolling( - self, - window, - min_periods: Optional[int] = ..., - center: _bool = ..., - *, - on: Optional[_str] = ..., - axis: SeriesAxisType = ..., - closed: Optional[_str] = ..., - ) -> Rolling: ... - def rpow( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rsub( - self, - other: Union[Series[S1], Scalar], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - def rtruediv( - self, - other, - level: Optional[Level] = ..., - fill_value: Optional[Union[float, None]] = ..., - axis: SeriesAxisType = ..., - ) -> Series[S1]: ... - @overload - def sem( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def sem( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Scalar: ... - @overload - def skew( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def skew( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Scalar: ... - @overload - def std( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[float]: ... - @overload - def std( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> float: ... - def sub( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def subtract( - self, - other: Union[num, _ListLike, Series[S1]], - level: Optional[Level] = ..., - fill_value: Optional[float] = ..., - axis: Optional[SeriesAxisType] = ..., - ) -> Series[S1]: ... - def sum( - self: Series[S1], - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: Optional[Level] = ..., - numeric_only: Optional[_bool] = ..., - min_count: int = ..., - **kwargs, - ) -> S1: ... - def to_list(self) -> List: ... - def to_numpy( - self, - dtype: Optional[Type[DtypeNp]] = ..., - copy: _bool = ..., - na_value=..., - **kwargs, - ) -> np.ndarray: ... - def to_records( - self, - index: _bool = ..., - columnDTypes: Optional[Union[_str, Dict]] = ..., - indexDTypes: Optional[Union[_str, Dict]] = ..., - ): ... - def tolist(self) -> List: ... - def truediv( - self, - other, - level: Optional[Level] = ..., - fill_value: Optional[Union[float, None]] = ..., - axis: SeriesAxisType = ..., - ) -> Series[float]: ... - @overload - def var( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - *, - level: Level, - **kwargs, - ) -> Series[S1]: ... - @overload - def var( - self, - axis: Optional[SeriesAxisType] = ..., - skipna: Optional[_bool] = ..., - level: None = ..., - ddof: int = ..., - numeric_only: Optional[_bool] = ..., - **kwargs, - ) -> Scalar: ... - @overload - def rename_axis( - self, - mapper: Union[Scalar, ListLike] = ..., - index: Optional[Union[Scalar, ListLike, Callable, Dict]] = ..., - columns: Optional[Union[Scalar, ListLike, Callable, Dict]] = ..., - axis: Optional[SeriesAxisType] = ..., - copy: _bool = ..., - *, - inplace: Literal[True], - ) -> None: ... - @overload - def rename_axis( - self, - mapper: Union[Scalar, ListLike] = ..., - index: Optional[Union[Scalar, ListLike, Callable, Dict]] = ..., - columns: Optional[Union[Scalar, ListLike, Callable, Dict]] = ..., - axis: Optional[SeriesAxisType] = ..., - copy: _bool = ..., - inplace: Literal[False] = ..., - ) -> Series: ... - @overload - def set_axis( - self, labels, axis: Axis = ..., inplace: Literal[False] = ... - ) -> Series[S1]: ... - @overload - def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... - @overload - def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... - @overload - def set_axis( - self, labels, axis: Axis = ..., inplace: bool = ... - ) -> Optional[Series[S1]]: ... diff --git a/typings/pandas/core/sorting.pyi b/typings/pandas/core/sorting.pyi deleted file mode 100644 index 76c1590..0000000 --- a/typings/pandas/core/sorting.pyi +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Any - -def get_group_index(labels: Any, shape: Any, sort: bool, xnull: bool) -> Any: ... -def get_compressed_ids(labels: Any, sizes: Any): ... -def is_int64_overflow_possible(shape: Any) -> bool: ... -def decons_group_index(comp_labels: Any, shape: Any): ... -def decons_obs_group_ids( - comp_ids: Any, obs_ids: Any, shape: Any, labels: Any, xnull: bool -) -> Any: ... -def indexer_from_factorized(labels: Any, shape: Any, compress: bool = ...) -> Any: ... -def lexsort_indexer(keys: Any, orders: Any = ..., na_position: str = ...) -> Any: ... -def nargsort( - items: Any, kind: str = ..., ascending: bool = ..., na_position: str = ... -) -> Any: ... - -class _KeyMapper: - levels: Any = ... - labels: Any = ... - comp_ids: Any = ... - k: Any = ... - tables: Any = ... - def __init__( - self, comp_ids: Any, ngroups: int, levels: Any, labels: Any - ) -> None: ... - def get_key(self, comp_id: Any): ... - -def get_flattened_iterator(comp_ids: Any, ngroups: Any, levels: Any, labels: Any): ... -def get_indexer_dict(label_list: Any, keys: Any): ... -def get_group_index_sorter(group_index: Any, ngroups: int) -> Any: ... -def compress_group_index(group_index: Any, sort: bool = ...) -> Any: ... diff --git a/typings/pandas/core/strings.pyi b/typings/pandas/core/strings.pyi deleted file mode 100644 index 32718ac..0000000 --- a/typings/pandas/core/strings.pyi +++ /dev/null @@ -1,101 +0,0 @@ -from pandas.core.base import NoNewAttributesMixin as NoNewAttributesMixin -from pandas.core.series import Series -from typing import List, Generic, TypeVar - -def cat_core(list_of_columns: List, sep: str): ... -def cat_safe(list_of_columns: List, sep: str): ... -def str_count(arr, pat, flags: int = ...): ... -def str_contains( - arr, pat, case: bool = ..., flags: int = ..., na=..., regex: bool = ... -): ... -def str_startswith(arr, pat, na=...): ... -def str_endswith(arr, pat, na=...): ... -def str_replace( - arr, pat, repl, n: int = ..., case=..., flags: int = ..., regex: bool = ... -): ... -def str_repeat(arr, repeats): ... -def str_match(arr, pat, case: bool = ..., flags: int = ..., na=...): ... -def str_extract(arr, pat, flags: int = ..., expand: bool = ...): ... -def str_extractall(arr, pat, flags: int = ...): ... -def str_get_dummies(arr, sep: str = ...): ... -def str_join(arr, sep): ... -def str_findall(arr, pat, flags: int = ...): ... -def str_find(arr, sub, start: int = ..., end=..., side: str = ...): ... -def str_index(arr, sub, start: int = ..., end=..., side: str = ...): ... -def str_pad(arr, width, side: str = ..., fillchar: str = ...): ... -def str_split(arr, pat=..., n=...): ... -def str_rsplit(arr, pat=..., n=...): ... -def str_slice(arr, start=..., stop=..., step=...): ... -def str_slice_replace(arr, start=..., stop=..., repl=...): ... -def str_strip(arr, to_strip=..., side: str = ...): ... -def str_wrap(arr, width, **kwargs): ... -def str_translate(arr, table): ... -def str_get(arr, i): ... -def str_decode(arr, encoding, errors: str = ...): ... -def str_encode(arr, encoding, errors: str = ...): ... -def forbid_nonstring_types(forbidden, name=...): ... -def copy(source): ... - -RT = TypeVar("RT") - -class StringMethods(NoNewAttributesMixin, Generic[RT]): - def __init__(self, data) -> None: ... - def __getitem__(self, key) -> RT: ... - def __iter__(self): ... - def cat(self, others=..., sep=..., na_rep=..., join: str = ...) -> RT: ... - def split(self, pat=..., n: int = ..., expand: bool = ...) -> RT: ... - def rsplit(self, pat=..., n: int = ..., expand: bool = ...) -> RT: ... - def partition(self, sep: str = ..., expand: bool = ...) -> RT: ... - def rpartition(self, sep: str = ..., expand: bool = ...) -> RT: ... - def get(self, i) -> RT: ... - def join(self, sep) -> RT: ... - def contains( - self, pat, case: bool = ..., flags: int = ..., na=..., regex: bool = ... - ) -> Series[bool]: ... - def match(self, pat, case: bool = ..., flags: int = ..., na=...) -> RT: ... - def replace( - self, pat, repl, n: int = ..., case=..., flags: int = ..., regex: bool = ... - ) -> RT: ... - def repeat(self, repeats) -> RT: ... - def pad(self, width, side: str = ..., fillchar: str = ...) -> RT: ... - def center(self, width, fillchar: str = ...) -> RT: ... - def ljust(self, width, fillchar: str = ...) -> RT: ... - def rjust(self, width, fillchar: str = ...) -> RT: ... - def zfill(self, width) -> RT: ... - def slice(self, start=..., stop=..., step=...) -> RT: ... - def slice_replace(self, start=..., stop=..., repl=...) -> RT: ... - def decode(self, encoding, errors: str = ...) -> RT: ... - def encode(self, encoding, errors: str = ...) -> RT: ... - def strip(self, to_strip=...) -> RT: ... - def lstrip(self, to_strip=...) -> RT: ... - def rstrip(self, to_strip=...) -> RT: ... - def wrap(self, width, **kwargs) -> RT: ... - def get_dummies(self, sep: str = ...) -> RT: ... - def translate(self, table) -> RT: ... - count = ... - startswith = ... - endswith = ... - findall = ... - def extract(self, pat, flags: int = ..., expand: bool = ...) -> RT: ... - def extractall(self, pat, flags: int = ...) -> RT: ... - def find(self, sub, start: int = ..., end=...) -> RT: ... - def rfind(self, sub, start: int = ..., end=...) -> RT: ... - def normalize(self, form) -> RT: ... - def index(self, sub, start: int = ..., end=...) -> RT: ... - def rindex(self, sub, start: int = ..., end=...) -> RT: ... - len = ... - lower = ... - upper = ... - title = ... - capitalize = ... - swapcase = ... - casefold = ... - isalnum = ... - isalpha = ... - isdigit = ... - isspace = ... - islower = ... - isupper = ... - istitle = ... - isnumeric = ... - isdecimal = ... diff --git a/typings/pandas/core/tools/__init__.pyi b/typings/pandas/core/tools/__init__.pyi deleted file mode 100644 index 000671b..0000000 --- a/typings/pandas/core/tools/__init__.pyi +++ /dev/null @@ -1,2 +0,0 @@ -from .datetimes import to_datetime as to_datetime -from .timedeltas import to_timedelta as to_timedelta diff --git a/typings/pandas/core/tools/datetimes.pyi b/typings/pandas/core/tools/datetimes.pyi deleted file mode 100644 index 876bcf4..0000000 --- a/typings/pandas/core/tools/datetimes.pyi +++ /dev/null @@ -1,91 +0,0 @@ -from datetime import datetime as datetime -from numpy import datetime64 as datetime64 -import numpy as np - -from pandas._typing import ( - ArrayLike as ArrayLike, - Index as Index, - AnyArrayLike as AnyArrayLike, - DateTimeErrorChoices as DateTimeErrorChoices, - ExtensionArray as ExtensionArray, - Timestamp as Timestamp, -) -from pandas.core.dtypes.generic import ABCSeries as ABCSeries -from pandas.core.generic import NDFrame as NDFrame -from pandas.core.indexes.datetimes import DatetimeIndex as DatetimeIndex -from pandas.core.frame import DataFrame as DataFrame -from pandas.core.series import Series as Series -from typing import List, Optional, Tuple, TypedDict, Union, overload - -ArrayConvertible = Union[List, Tuple, AnyArrayLike] -Scalar = Union[int, float, str] -DatetimeScalar = Union[Scalar, datetime] - -DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] - -DatetimeDictArg = Union[List[Scalar], Tuple[Scalar, ...], AnyArrayLike] - -class YearMonthDayDict(TypedDict, total=True): - year: DatetimeDictArg - month: DatetimeDictArg - day: DatetimeDictArg - -class FulldatetimeDict(YearMonthDayDict, total=False): - hour: DatetimeDictArg - hours: DatetimeDictArg - minute: DatetimeDictArg - minutes: DatetimeDictArg - second: DatetimeDictArg - seconds: DatetimeDictArg - ms: DatetimeDictArg - us: DatetimeDictArg - ns: DatetimeDictArg - -DictConvertible = Union[FulldatetimeDict, "DataFrame"] - -def should_cache( - arg: ArrayConvertible, unique_share: float = ..., check_count: Optional[int] = ... -) -> bool: ... -@overload -def to_datetime( - arg: DatetimeScalar, - errors: DateTimeErrorChoices = ..., - dayfirst: bool = ..., - yearfirst: bool = ..., - utc: bool | None = ..., - format: str | None = ..., - exact: bool = ..., - unit: str | None = ..., - infer_datetime_format: bool = ..., - origin=..., - cache: bool = ..., -) -> Timestamp: ... -@overload -def to_datetime( - arg: Series | DictConvertible, - errors: DateTimeErrorChoices = ..., - dayfirst: bool = ..., - yearfirst: bool = ..., - utc: bool | None = ..., - format: str | None = ..., - exact: bool = ..., - unit: str | None = ..., - infer_datetime_format: bool = ..., - origin=..., - cache: bool = ..., -) -> Series[Timestamp]: ... -@overload -def to_datetime( - arg: list | tuple | np.ndarray | Index | ExtensionArray, - errors: DateTimeErrorChoices = ..., - dayfirst: bool = ..., - yearfirst: bool = ..., - utc: bool | None = ..., - format: str | None = ..., - exact: bool = ..., - unit: str | None = ..., - infer_datetime_format: bool = ..., - origin=..., - cache: bool = ..., -) -> DatetimeIndex: ... -def to_time(arg, format=..., infer_time_format: bool = ..., errors: str = ...): ... diff --git a/typings/pandas/core/tools/numeric.pyi b/typings/pandas/core/tools/numeric.pyi deleted file mode 100644 index e7303c1..0000000 --- a/typings/pandas/core/tools/numeric.pyi +++ /dev/null @@ -1 +0,0 @@ -def to_numeric(arg, errors: str = ..., downcast=...): ... diff --git a/typings/pandas/core/tools/timedeltas.pyi b/typings/pandas/core/tools/timedeltas.pyi deleted file mode 100644 index 4054ada..0000000 --- a/typings/pandas/core/tools/timedeltas.pyi +++ /dev/null @@ -1,29 +0,0 @@ -# def to_timedelta(arg, unit: str = ..., errors: str = ...): ... -from datetime import timedelta -from typing import Literal, Optional, Union, overload -from pandas._libs.tslibs import Timedelta -from pandas._libs.tslibs.timedeltas import UnitChoices -from pandas._typing import DateTimeErrorChoices, ArrayLike, Index as Index -from pandas.core.frame import Series as Series -from pandas.core.indexes.timedeltas import TimedeltaIndex - -# Copied from pandas/_libs/tslibs/timedeltas.pyx - -@overload -def to_timedelta( - arg: Union[str, int, float, timedelta], - unit: Optional[UnitChoices] = ..., - errors: DateTimeErrorChoices = ..., -) -> Timedelta: ... -@overload -def to_timedelta( - arg: Series, - unit: Optional[UnitChoices] = ..., - errors: DateTimeErrorChoices = ..., -) -> Series[Timedelta]: ... -@overload -def to_timedelta( - arg: Union[list, tuple, range, ArrayLike, Index], - unit: Optional[UnitChoices] = ..., - errors: DateTimeErrorChoices = ..., -) -> TimedeltaIndex: ... diff --git a/typings/pandas/core/window/__init__.pyi b/typings/pandas/core/window/__init__.pyi deleted file mode 100644 index 7cd2847..0000000 --- a/typings/pandas/core/window/__init__.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from .ewm import ExponentialMovingWindow as ExponentialMovingWindow -from .expanding import Expanding as Expanding, ExpandingGroupby as ExpandingGroupby -from .rolling import ( - Rolling as Rolling, - RollingGroupby as RollingGroupby, - Window as Window, -) diff --git a/typings/pandas/core/window/ewm.pyi b/typings/pandas/core/window/ewm.pyi deleted file mode 100644 index f21dfe7..0000000 --- a/typings/pandas/core/window/ewm.pyi +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Union, Optional, Callable -from pandas.core.window.rolling import _Rolling - -from pandas._typing import Scalar, Series, DataFrame - -class ExponentialMovingWindow(_Rolling): - obj = ... - com = ... - min_periods: int = ... - adjust = ... - ignore_na = ... - axis = ... - on = ... - def __init__( - self, - obj, - com=..., - span=..., - halflife=..., - alpha=..., - min_periods: int = ..., - adjust: bool = ..., - ignore_na: bool = ..., - axis: int = ..., - ) -> None: ... - def aggregate( - self, func: Optional[Callable] = ..., *args, **kwargs - ) -> Union[Scalar, Series, DataFrame]: ... - agg = aggregate - def mean(self, *args, **kwargs): ... - def std(self, bias: bool = ..., *args, **kwargs): ... - vol = ... - def var(self, bias: bool = ..., *args, **kwargs): ... - def cov(self, other=..., pairwise=..., bias: bool = ..., **kwargs): ... - def corr(self, other=..., pairwise=..., **kwargs): ... diff --git a/typings/pandas/core/window/expanding.pyi b/typings/pandas/core/window/expanding.pyi deleted file mode 100644 index c823470..0000000 --- a/typings/pandas/core/window/expanding.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Callable, Any, Dict, Tuple, Optional -from pandas._typing import FrameOrSeriesUnion as FrameOrSeries -from pandas.core.window.common import WindowGroupByMixin as WindowGroupByMixin -from pandas.core.window.rolling import _Rolling_and_Expanding - -class Expanding(_Rolling_and_Expanding): - def __init__( - self, obj, min_periods: int = ..., center: bool = ..., axis: int = ..., **kwargs - ) -> None: ... - def count(self, **kwargs) -> FrameOrSeries: ... - def apply( - self, - func: Callable[..., Any], - raw: bool = ..., - engine: Optional[str] = ..., - engine_kwargs: Optional[Dict[str, bool]] = ..., - args: Optional[Tuple[Any, ...]] = ..., - kwargs: Optional[Dict[str, Any]] = ..., - ): ... - -class ExpandingGroupby(WindowGroupByMixin, Expanding): ... diff --git a/typings/pandas/core/window/numba_.pyi b/typings/pandas/core/window/numba_.pyi deleted file mode 100644 index c4e8c17..0000000 --- a/typings/pandas/core/window/numba_.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from pandas._typing import Scalar as Scalar -from typing import Any, Callable, Dict, Optional, Tuple - -def make_rolling_apply( - func: Callable[..., Scalar], - args: Tuple, - nogil: bool, - parallel: bool, - nopython: bool, -): ... -def generate_numba_apply_func( - args: Tuple, - kwargs: Dict[str, Any], - func: Callable[..., Scalar], - engine_kwargs: Optional[Dict[str, bool]], -): ... diff --git a/typings/pandas/core/window/rolling.pyi b/typings/pandas/core/window/rolling.pyi deleted file mode 100644 index fdf26ba..0000000 --- a/typings/pandas/core/window/rolling.pyi +++ /dev/null @@ -1,115 +0,0 @@ -import numpy as np -from pandas._typing import ( - Axis as Axis, - FrameOrSeriesUnion as FrameOrSeries, - Scalar as Scalar, -) -from pandas.core.base import ( - PandasObject as PandasObject, - SelectionMixin as SelectionMixin, - ShallowMixin as ShallowMixin, -) -from pandas.core.indexes.api import Index as Index -from pandas.core.window.common import WindowGroupByMixin as WindowGroupByMixin -from typing import Callable, Dict, Mapping, Optional, Sequence, Set, Tuple, Union - -class _Window(PandasObject, ShallowMixin, SelectionMixin): - exclusions: Set[str] = ... - obj = ... - on = ... - closed = ... - window = ... - min_periods: int = ... - center = ... - win_type: str = ... - win_freq = ... - axis = ... - def __init__( - self, - obj, - window=..., - min_periods: Optional[int] = ..., - center: Optional[bool] = ..., - win_type: Optional[str] = ..., - axis: Axis = ..., - on: Optional[Union[str, Index]] = ..., - closed: Optional[str] = ..., - **kwargs, - ) -> None: ... - @property - def is_datetimelike(self) -> Optional[bool]: ... - @property - def is_freq_type(self) -> bool: ... - def validate(self) -> None: ... - def __getattr__(self, attr: str): ... - def __iter__(self): ... - def aggregate( - self, func: Optional[Callable] = ..., *args, **kwargs - ) -> Union[Scalar, FrameOrSeries]: ... - agg = aggregate - -class Window(_Window): - def validate(self) -> None: ... - def sum(self, *args, **kwargs): ... - def mean(self, *args, **kwargs): ... - def var(self, ddof: int = ..., *args, **kwargs): ... - def std(self, ddof: int = ..., *args, **kwargs): ... - -class _Rolling(_Window): ... - -class _Rolling_and_Expanding(_Rolling): - def count(self) -> FrameOrSeries: ... - def apply( - self, - func, - raw: bool = ..., - engine: str = ..., - engine_kwargs: Optional[Dict] = ..., - args: Optional[Tuple] = ..., - kwargs: Optional[Dict] = ..., - ): ... - def sum(self, *args, **kwargs) -> FrameOrSeries: ... - def max(self, *args, **kwargs) -> FrameOrSeries: ... - def min(self, *args, **kwargs) -> FrameOrSeries: ... - def mean(self, *args, **kwargs) -> FrameOrSeries: ... - def median(self, **kwargs) -> FrameOrSeries: ... - def std(self, ddof: int = ..., *args, **kwargs) -> FrameOrSeries: ... - def var(self, ddof: int = ..., *args, **kwargs) -> FrameOrSeries: ... - def skew(self, **kwargs) -> FrameOrSeries: ... - def kurt(self, **kwargs) -> FrameOrSeries: ... - def quantile( - self, quantile: float, interpolation: str = ..., **kwargs - ) -> FrameOrSeries: ... - def cov( - self, - other: Optional[Union[FrameOrSeries, np.ndarray]] = ..., - pairwise: Optional[bool] = ..., - ddof: int = ..., - **kwargs, - ) -> FrameOrSeries: ... - def corr( - self, - other: Optional[Union[FrameOrSeries, np.ndarray]] = ..., - pairwise: Optional[bool] = ..., - **kwargs, - ) -> FrameOrSeries: ... - -class Rolling(_Rolling_and_Expanding): - def is_datetimelike(self) -> bool: ... - win_freq = ... - window = ... - win_type: str = ... - min_periods: int = ... - def validate(self) -> None: ... - def count(self) -> FrameOrSeries: ... - def apply( - self, - func, - raw: bool = ..., - engine: str = ..., - engine_kwargs=..., - args=..., - kwargs=..., - ): ... - -class RollingGroupby(WindowGroupByMixin, Rolling): ... diff --git a/typings/pandas/io/__init__.pyi b/typings/pandas/io/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/io/api.pyi b/typings/pandas/io/api.pyi deleted file mode 100644 index 06cd4f3..0000000 --- a/typings/pandas/io/api.pyi +++ /dev/null @@ -1,27 +0,0 @@ -from pandas.io.clipboards import read_clipboard as read_clipboard -from pandas.io.excel import ( - ExcelFile as ExcelFile, - ExcelWriter as ExcelWriter, - read_excel as read_excel, -) -from pandas.io.feather_format import read_feather as read_feather -from pandas.io.gbq import read_gbq as read_gbq -from pandas.io.html import read_html as read_html -from pandas.io.json import read_json as read_json -from pandas.io.orc import read_orc as read_orc -from pandas.io.parquet import read_parquet as read_parquet -from pandas.io.parsers import ( - read_csv as read_csv, - read_fwf as read_fwf, - read_table as read_table, -) -from pandas.io.pickle import read_pickle as read_pickle, to_pickle as to_pickle -from pandas.io.pytables import HDFStore as HDFStore, read_hdf as read_hdf -from pandas.io.sas import read_sas as read_sas -from pandas.io.spss import read_spss as read_spss -from pandas.io.sql import ( - read_sql as read_sql, - read_sql_query as read_sql_query, - read_sql_table as read_sql_table, -) -from pandas.io.stata import read_stata as read_stata diff --git a/typings/pandas/io/excel/_base.pyi b/typings/pandas/io/excel/_base.pyi deleted file mode 100644 index 6d2dce5..0000000 --- a/typings/pandas/io/excel/_base.pyi +++ /dev/null @@ -1,240 +0,0 @@ -import abc -from pandas._typing import Dtype, Scalar -from pandas.core.frame import DataFrame as DataFrame -from typing import ( - Any, - Callable, - Dict, - List, - Literal, - Optional, - Sequence, - Union, - overload, -) - -@overload -def read_excel( - filepath: str, - sheet_name: Optional[List[str]], - header: Optional[Union[int, Sequence[int]]] = ..., - names: Optional[Sequence[str]] = ..., - index_col: Optional[Union[int, Sequence[int]]] = ..., - usecols: Optional[Union[int, str, Sequence[Union[int, str, Callable]]]] = ..., - squeeze: bool = ..., - dtype: Union[str, Dict[str, Any], Dtype] = ..., - engine: Optional[str] = ..., - converters: Optional[Dict[Union[int, str], Callable]] = ..., - true_values: Optional[Sequence[Scalar]] = ..., - false_values: Optional[Sequence[Scalar]] = ..., - skiprows: Optional[Union[Sequence[int], int, Callable]] = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - verbose: bool = ..., - parse_dates: Union[bool, Sequence, Dict[str, Sequence]] = ..., - date_parser: Optional[Callable] = ..., - thousands: Optional[str] = ..., - comment: Optional[str] = ..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., -) -> Dict[str, DataFrame]: ... -@overload -def read_excel( - filepath: str, - sheet_name: List[int], - header: Optional[Union[int, Sequence[int]]] = ..., - names: Optional[Sequence[str]] = ..., - index_col: Optional[Union[int, Sequence[int]]] = ..., - usecols: Optional[Union[int, str, Sequence[Union[int, str, Callable]]]] = ..., - squeeze: bool = ..., - dtype: Union[str, Dict[str, Any], Dtype] = ..., - engine: Optional[str] = ..., - converters: Optional[Dict[Union[int, str], Callable]] = ..., - true_values: Optional[Sequence[Scalar]] = ..., - false_values: Optional[Sequence[Scalar]] = ..., - skiprows: Optional[Union[Sequence[int], int, Callable]] = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - verbose: bool = ..., - parse_dates: Union[bool, Sequence, Dict[str, Sequence]] = ..., - date_parser: Optional[Callable] = ..., - thousands: Optional[str] = ..., - comment: Optional[str] = ..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., -) -> Dict[int, DataFrame]: ... -@overload -def read_excel( - filepath: str, - sheet_name: List[Union[int, str]], - header: Optional[Union[int, Sequence[int]]] = ..., - names: Optional[Sequence[str]] = ..., - index_col: Optional[Union[int, Sequence[int]]] = ..., - usecols: Optional[Union[int, str, Sequence[Union[int, str, Callable]]]] = ..., - squeeze: bool = ..., - dtype: Union[str, Dict[str, Any], Dtype] = ..., - engine: Optional[str] = ..., - converters: Optional[Dict[Union[int, str], Callable]] = ..., - true_values: Optional[Sequence[Scalar]] = ..., - false_values: Optional[Sequence[Scalar]] = ..., - skiprows: Optional[Union[Sequence[int], int, Callable]] = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - verbose: bool = ..., - parse_dates: Union[bool, Sequence, Dict[str, Sequence]] = ..., - date_parser: Optional[Callable] = ..., - thousands: Optional[str] = ..., - comment: Optional[str] = ..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., -) -> Dict[Union[int, str], DataFrame]: ... -@overload -def read_excel( - filepath: str, - sheet_name: Union[int, str] = ..., - header: Optional[Union[int, Sequence[int]]] = ..., - names: Optional[Sequence[str]] = ..., - index_col: Optional[Union[int, Sequence[int]]] = ..., - usecols: Optional[Union[int, str, Sequence[Union[int, str, Callable]]]] = ..., - squeeze: bool = ..., - dtype: Union[str, Dict[str, Any], Dtype] = ..., - engine: Optional[str] = ..., - converters: Optional[Dict[Union[int, str], Callable]] = ..., - true_values: Optional[Sequence[Scalar]] = ..., - false_values: Optional[Sequence[Scalar]] = ..., - skiprows: Optional[Union[Sequence[int], int, Callable]] = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - verbose: bool = ..., - parse_dates: Union[bool, Sequence, Dict[str, Sequence]] = ..., - date_parser: Optional[Callable] = ..., - thousands: Optional[str] = ..., - comment: Optional[str] = ..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., - **kwargs -) -> DataFrame: ... - -class BaseExcelReader(metaclass=abc.ABCMeta): - book = ... - def __init__(self, filepath_or_buffer) -> None: ... - @abc.abstractmethod - def load_workbook(self, filepath_or_buffer): ... - def close(self) -> None: ... - @property - @abc.abstractmethod - def sheet_names(self): ... - @abc.abstractmethod - def get_sheet_by_name(self, name): ... - @abc.abstractmethod - def get_sheet_by_index(self, index): ... - @abc.abstractmethod - def get_sheet_data(self, sheet, convert_float): ... - def parse( - self, - sheet_name: int = ..., - header: int = ..., - names=..., - index_col=..., - usecols=..., - squeeze: bool = ..., - dtype=..., - true_values=..., - false_values=..., - skiprows=..., - nrows=..., - na_values=..., - verbose: bool = ..., - parse_dates: bool = ..., - date_parser=..., - thousands=..., - comment=..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., - **kwds - ): ... - -class ExcelWriter(metaclass=abc.ABCMeta): - def __new__(cls, path, engine=..., **kwargs): ... - book = ... - curr_sheet = ... - path = ... - @property - def supported_extensions(self): ... - @property - def engine(self): ... - def write_cells( - self, - cells, - sheet_name=..., - startrow: int = ..., - startcol: int = ..., - freeze_panes=..., - ): ... - def save(self): ... - sheets = ... - cur_sheet = ... - date_format: str = ... - datetime_format: str = ... - mode = ... - def __init__( - self, - path, - engine=..., - date_format=..., - datetime_format=..., - mode: str = ..., - **engine_kwargs - ) -> None: ... - def __fspath__(self): ... - @classmethod - def check_extension(cls, ext): ... - def __enter__(self): ... - def __exit__(self, exc_type, exc_value, traceback) -> None: ... - def close(self): ... - -class ExcelFile: - engine = ... - io = ... - def __init__(self, io, engine=...) -> None: ... - def __fspath__(self): ... - def parse( - self, - sheet_name: int = ..., - header: int = ..., - names=..., - index_col=..., - usecols=..., - squeeze: bool = ..., - converters=..., - true_values=..., - false_values=..., - skiprows=..., - nrows=..., - na_values=..., - parse_dates: bool = ..., - date_parser=..., - thousands=..., - comment=..., - skipfooter: int = ..., - convert_float: bool = ..., - mangle_dupe_cols: bool = ..., - **kwds - ): ... - @property - def book(self): ... - @property - def sheet_names(self): ... - def close(self) -> None: ... - def __enter__(self): ... - def __exit__(self, exc_type, exc_value, traceback) -> None: ... - def __del__(self) -> None: ... diff --git a/typings/pandas/io/gcs.pyi b/typings/pandas/io/gcs.pyi deleted file mode 100644 index 5a512e9..0000000 --- a/typings/pandas/io/gcs.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from pandas._typing import FilePathOrBuffer as FilePathOrBuffer - -gcsfs = ... - -def get_filepath_or_buffer( - filepath_or_buffer: FilePathOrBuffer, encoding=..., compression=..., mode=... -): ... diff --git a/typings/pandas/io/json/__init__.pyi b/typings/pandas/io/json/__init__.pyi deleted file mode 100644 index 7e17273..0000000 --- a/typings/pandas/io/json/__init__.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from ._json import ( - dumps as dumps, - loads as loads, - read_json as read_json, - to_json as to_json, -) -from ._normalize import json_normalize as json_normalize -from ._table_schema import build_table_schema as build_table_schema diff --git a/typings/pandas/io/json/_json.pyi b/typings/pandas/io/json/_json.pyi deleted file mode 100644 index ffd4871..0000000 --- a/typings/pandas/io/json/_json.pyi +++ /dev/null @@ -1,206 +0,0 @@ -from collections import abc -import sys -from pandas.core.series import Series as Series -from pandas.core.frame import DataFrame -from pandas._typing import JSONSerializable as JSONSerializable, FilePathOrBuffer -from typing import Any, Callable, Optional, Union, overload - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -loads = ... -dumps = ... -TABLE_SCHEMA_VERSION: str = ... - -def to_json( - path_or_buf, - obj, - orient: Optional[str] = ..., - date_format: str = ..., - double_precision: int = ..., - force_ascii: bool = ..., - date_unit: str = ..., - default_handler: Optional[Callable[[Any], JSONSerializable]] = ..., - lines: bool = ..., - compression: Optional[str] = ..., - index: bool = ..., - indent: int = ..., -): ... - -class Writer: - obj = ... - orient = ... - date_format = ... - double_precision = ... - ensure_ascii = ... - date_unit = ... - default_handler = ... - index = ... - indent = ... - is_copy = ... - def __init__( - self, - obj, - orient: Optional[str], - date_format: str, - double_precision: int, - ensure_ascii: bool, - date_unit: str, - index: bool, - default_handler: Optional[Callable[[Any], JSONSerializable]] = ..., - indent: int = ..., - ) -> None: ... - def write(self): ... - -class SeriesWriter(Writer): ... -class FrameWriter(Writer): ... - -class JSONTableWriter(FrameWriter): - schema = ... - obj = ... - date_format = ... - orient = ... - index = ... - def __init__( - self, - obj, - orient: Optional[str], - date_format: str, - double_precision: int, - ensure_ascii: bool, - date_unit: str, - index: bool, - default_handler: Optional[Callable[[Any], JSONSerializable]] = ..., - indent: int = ..., - ): ... - -@overload -def read_json( - path: FilePathOrBuffer, - orient: Optional[str] = ..., - dtype=..., - convert_axes=..., - convert_dates: bool = ..., - keep_default_dates: bool = ..., - numpy: bool = ..., - precise_float: bool = ..., - date_unit: Optional[str] = ..., - encoding: Optional[str] = ..., - lines: bool = ..., - chunksize: Optional[int] = ..., - compression: Optional[ - Union[str, Literal["infer", "gzip", "bz2", "zip", "xz"]] - ] = ..., - *, - typ: Literal["series"], -) -> Series: ... -@overload -def read_json( - path: FilePathOrBuffer, - orient: Optional[str] = ..., - dtype=..., - convert_axes=..., - convert_dates: bool = ..., - keep_default_dates: bool = ..., - numpy: bool = ..., - precise_float: bool = ..., - date_unit: Optional[str] = ..., - encoding: Optional[str] = ..., - lines: bool = ..., - chunksize: Optional[int] = ..., - compression: Optional[ - Union[str, Literal["infer", "gzip", "bz2", "zip", "xz"]] - ] = ..., - *, - typ: Literal["frame"], -) -> DataFrame: ... -@overload -def read_json( - path: FilePathOrBuffer, - orient: Optional[str] = ..., - typ: Optional[str] = ..., - dtype=..., - convert_axes=..., - convert_dates: bool = ..., - keep_default_dates: bool = ..., - numpy: bool = ..., - precise_float: bool = ..., - date_unit: Optional[str] = ..., - encoding: Optional[str] = ..., - lines: bool = ..., - chunksize: Optional[int] = ..., - compression: Optional[ - Union[str, Literal["infer", "gzip", "bz2", "zip", "xz"]] - ] = ..., -) -> Union[Series, DataFrame]: ... - -class JsonReader(abc.Iterator): - path_or_buf = ... - orient = ... - typ = ... - dtype = ... - convert_axes = ... - convert_dates = ... - keep_default_dates = ... - numpy = ... - precise_float = ... - date_unit = ... - encoding = ... - compression = ... - lines = ... - chunksize = ... - nrows_seen: int = ... - should_close: bool = ... - data = ... - def __init__( - self, - filepath_or_buffer, - orient, - typ, - dtype, - convert_axes, - convert_dates, - keep_default_dates, - numpy, - precise_float, - date_unit, - encoding, - lines, - chunksize, - compression, - ) -> None: ... - def read(self): ... - def close(self) -> None: ... - def __next__(self): ... - -class Parser: - json = ... - orient = ... - dtype = ... - min_stamp = ... - numpy = ... - precise_float = ... - convert_axes = ... - convert_dates = ... - date_unit = ... - keep_default_dates = ... - obj = ... - def __init__( - self, - json, - orient, - dtype=..., - convert_axes: bool = ..., - convert_dates: bool = ..., - keep_default_dates: bool = ..., - numpy: bool = ..., - precise_float: bool = ..., - date_unit=..., - ) -> None: ... - def check_keys_split(self, decoded) -> None: ... - def parse(self): ... - -class SeriesParser(Parser): ... -class FrameParser(Parser): ... diff --git a/typings/pandas/io/json/_normalize.pyi b/typings/pandas/io/json/_normalize.pyi deleted file mode 100644 index 0989773..0000000 --- a/typings/pandas/io/json/_normalize.pyi +++ /dev/null @@ -1,22 +0,0 @@ -from pandas.core.frame import DataFrame as DataFrame -from pandas._typing import Scalar as Scalar -from typing import Any, Dict, List, Optional, Union - -def convert_to_line_delimits(s: Any): ... -def nested_to_record( - ds: Any, - prefix: str = ..., - sep: str = ..., - level: int = ..., - max_level: Optional[int] = ..., -) -> Any: ... -def json_normalize( - data: Union[Dict, List[Dict]], - record_path: Optional[Union[str, List]] = None, - meta: Optional[Union[str, List[Union[str, List[str]]]]] = None, - meta_prefix: Optional[str] = None, - record_prefix: Optional[str] = None, - errors: str = "raise", - sep: str = ".", - max_level: Optional[int] = None, -) -> DataFrame: ... diff --git a/typings/pandas/io/json/_table_schema.pyi b/typings/pandas/io/json/_table_schema.pyi deleted file mode 100644 index 753ed39..0000000 --- a/typings/pandas/io/json/_table_schema.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from typing import Any, Optional - -loads: Any - -def as_json_table_type(x: Any): ... -def set_default_names(data: Any): ... -def convert_pandas_type_to_json_field(arr: Any, dtype: Optional[Any] = ...): ... -def convert_json_field_to_pandas_type(field: Any): ... -def build_table_schema( - data: Any, index: bool = ..., primary_key: Optional[Any] = ..., version: bool = ... -): ... -def parse_table_schema(json: Any, precise_float: Any): ... diff --git a/typings/pandas/io/parsers.pyi b/typings/pandas/io/parsers.pyi deleted file mode 100644 index 7de39aa..0000000 --- a/typings/pandas/io/parsers.pyi +++ /dev/null @@ -1,616 +0,0 @@ -from __future__ import annotations -from collections import abc -import sys -from pandas._typing import ( - FilePath as FilePath, - FilePathOrBuffer as FilePathOrBuffer, - Scalar as Scalar, - ReadBuffer as ReadBuffer, - AnyStr_cov as AnyStr_cov, - DtypeArg as DtypeArg, - CompressionOptions as CompressionOptions, - StorageOptions as StorageOptions, -) -from pandas.core.frame import DataFrame as DataFrame -from typing import ( - Any, - Callable, - Dict, - List, - Mapping, - Optional, - Sequence, - Union, - overload, - Protocol, -) - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -class ReadCsvBuffer(ReadBuffer[AnyStr_cov], Protocol): ... - -# read_csv engines -CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] - -# iterator=True -> TextFileReader -@overload -def read_csv( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: Literal[True], - chunksize: Optional[int] = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> TextFileReader: ... - -# chunksize=int -> TextFileReader -@overload -def read_csv( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., - chunksize: int, - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> TextFileReader: ... - -# default case -> DataFrame -@overload -def read_csv( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: Literal[False] = ..., - chunksize: None = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> DataFrame: ... - -# Unions -> DataFrame | TextFileReader -@overload -def read_csv( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., - chunksize: Optional[int] = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> DataFrame | TextFileReader: ... - -# iterator=True -> TextFileReader -@overload -def read_table( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: Literal[True], - chunksize: Optional[int] = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> TextFileReader: ... - -# chunksize=int -> TextFileReader -@overload -def read_table( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., - chunksize: int, - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> TextFileReader: ... - -# default case -> DataFrame -@overload -def read_table( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: Literal[False] = ..., - chunksize: None = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> DataFrame: ... - -# Unions -> DataFrame | TextFileReader -@overload -def read_table( - filepath_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]], - *, - sep: Optional[str] = ..., - delimiter: Optional[str] = ..., - header: Optional[Union[int, Sequence[int], Literal["infer"]]] = ..., - names=..., - index_col=..., - usecols=..., - squeeze: Optional[bool] = ..., - prefix: Optional[str] = ..., - mangle_dupe_cols: bool = ..., - dtype: Optional[DtypeArg] = ..., - engine: Optional[CSVEngine] = ..., - converters=..., - true_values=..., - false_values=..., - skipinitialspace: bool = ..., - skiprows=..., - skipfooter: int = ..., - nrows: Optional[int] = ..., - na_values=..., - keep_default_na: bool = ..., - na_filter: bool = ..., - verbose: bool = ..., - skip_blank_lines: bool = ..., - parse_dates=..., - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., - date_parser=..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., - chunksize: Optional[int] = ..., - compression: CompressionOptions = ..., - thousands: Optional[str] = ..., - decimal: str = ..., - lineterminator: Optional[str] = ..., - quotechar: str = ..., - quoting: int = ..., - doublequote: bool = ..., - escapechar: Optional[str] = ..., - comment: Optional[str] = ..., - encoding: Optional[str] = ..., - encoding_errors: Optional[str] = ..., - dialect=..., - error_bad_lines: Optional[bool] = ..., - warn_bad_lines: Optional[bool] = ..., - on_bad_lines=..., - delim_whitespace: bool = ..., - low_memory=..., - memory_map: bool = ..., - float_precision: Optional[Literal["high", "legacy"]] = ..., - storage_options: Optional[StorageOptions] = ..., -) -> DataFrame | TextFileReader: ... -def read_fwf( - filepath_or_buffer: FilePathOrBuffer, - colspecs=..., - widths=..., - infer_nrows=..., - **kwds, -): ... - -class TextFileReader(abc.Iterator): - f = ... - orig_options = ... - engine = ... - chunksize = ... - nrows = ... - squeeze = ... - def __init__(self, f, engine=..., **kwds) -> None: ... - def close(self) -> None: ... - def __next__(self): ... - def read(self, nrows=...): ... - def get_chunk(self, size=...): ... - -class ParserBase: - names = ... - orig_names = ... - prefix = ... - index_col = ... - unnamed_cols = ... - index_names = ... - col_names = ... - parse_dates = ... - date_parser = ... - dayfirst = ... - keep_date_col = ... - na_values = ... - na_fvalues = ... - na_filter = ... - keep_default_na = ... - true_values = ... - false_values = ... - mangle_dupe_cols = ... - infer_datetime_format = ... - cache_dates = ... - header = ... - handles = ... - def __init__(self, kwds) -> None: ... - def close(self) -> None: ... - -class CParserWrapper(ParserBase): - kwds = ... - unnamed_cols = ... - names = ... - orig_names = ... - index_names = ... - def __init__(self, src, **kwds) -> None: ... - def close(self) -> None: ... - def set_error_bad_lines(self, status) -> None: ... - def read(self, nrows=...): ... - -def TextParser(*args, **kwds): ... -def count_empty_vals(vals): ... - -class PythonParser(ParserBase): - data = ... - buf = ... - pos: int = ... - line_pos: int = ... - encoding = ... - compression = ... - memory_map = ... - skiprows = ... - skipfunc = ... - skipfooter = ... - delimiter = ... - quotechar = ... - escapechar = ... - doublequote = ... - skipinitialspace = ... - lineterminator = ... - quoting = ... - skip_blank_lines = ... - warn_bad_lines = ... - error_bad_lines = ... - names_passed = ... - has_index_names: bool = ... - verbose = ... - converters = ... - dtype = ... - thousands = ... - decimal = ... - comment = ... - num_original_columns = ... - columns = ... - orig_names = ... - index_names = ... - nonnum = ... - def __init__(self, f, **kwds): ... - def read(self, rows=...): ... - def get_chunk(self, size=...): ... - -class FixedWidthReader(abc.Iterator): - f = ... - buffer = ... - delimiter = ... - comment = ... - colspecs = ... - def __init__( - self, f, colspecs, delimiter, comment, skiprows=..., infer_nrows: int = ... - ) -> None: ... - def get_rows(self, infer_nrows, skiprows=...): ... - def detect_colspecs(self, infer_nrows: int = ..., skiprows=...): ... - def __next__(self): ... - -class FixedWidthFieldParser(PythonParser): - colspecs = ... - infer_nrows = ... - def __init__(self, f, **kwds) -> None: ... diff --git a/typings/pandas/io/s3.pyi b/typings/pandas/io/s3.pyi deleted file mode 100644 index dace921..0000000 --- a/typings/pandas/io/s3.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from pandas._typing import FilePathOrBuffer as FilePathOrBuffer -from typing import Any, IO, Optional, Tuple - -s3fs = ... - -def get_file_and_filesystem( - filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = ... -) -> Tuple[IO, Any]: ... -def get_filepath_or_buffer( - filepath_or_buffer: FilePathOrBuffer, - encoding: Optional[str] = ..., - compression: Optional[str] = ..., - mode: Optional[str] = ..., -) -> Tuple[IO, Optional[str], Optional[str], bool]: ... diff --git a/typings/pandas/io/sas/sas_constants.pyi b/typings/pandas/io/sas/sas_constants.pyi deleted file mode 100644 index c7e4e8f..0000000 --- a/typings/pandas/io/sas/sas_constants.pyi +++ /dev/null @@ -1,108 +0,0 @@ -magic = ... -align_1_checker_value: bytes = ... -align_1_offset: int = ... -align_1_length: int = ... -align_1_value: int = ... -u64_byte_checker_value: bytes = ... -align_2_offset: int = ... -align_2_length: int = ... -align_2_value: int = ... -endianness_offset: int = ... -endianness_length: int = ... -platform_offset: int = ... -platform_length: int = ... -encoding_offset: int = ... -encoding_length: int = ... -dataset_offset: int = ... -dataset_length: int = ... -file_type_offset: int = ... -file_type_length: int = ... -date_created_offset: int = ... -date_created_length: int = ... -date_modified_offset: int = ... -date_modified_length: int = ... -header_size_offset: int = ... -header_size_length: int = ... -page_size_offset: int = ... -page_size_length: int = ... -page_count_offset: int = ... -page_count_length: int = ... -sas_release_offset: int = ... -sas_release_length: int = ... -sas_server_type_offset: int = ... -sas_server_type_length: int = ... -os_version_number_offset: int = ... -os_version_number_length: int = ... -os_maker_offset: int = ... -os_maker_length: int = ... -os_name_offset: int = ... -os_name_length: int = ... -page_bit_offset_x86: int = ... -page_bit_offset_x64: int = ... -subheader_pointer_length_x86: int = ... -subheader_pointer_length_x64: int = ... -page_type_offset: int = ... -page_type_length: int = ... -block_count_offset: int = ... -block_count_length: int = ... -subheader_count_offset: int = ... -subheader_count_length: int = ... -page_meta_type: int = ... -page_data_type: int = ... -page_amd_type: int = ... -page_metc_type: int = ... -page_comp_type: int = ... -page_mix_types = ... -subheader_pointers_offset: int = ... -truncated_subheader_id: int = ... -compressed_subheader_id: int = ... -compressed_subheader_type: int = ... -text_block_size_length: int = ... -row_length_offset_multiplier: int = ... -row_count_offset_multiplier: int = ... -col_count_p1_multiplier: int = ... -col_count_p2_multiplier: int = ... -row_count_on_mix_page_offset_multiplier: int = ... -column_name_pointer_length: int = ... -column_name_text_subheader_offset: int = ... -column_name_text_subheader_length: int = ... -column_name_offset_offset: int = ... -column_name_offset_length: int = ... -column_name_length_offset: int = ... -column_name_length_length: int = ... -column_data_offset_offset: int = ... -column_data_length_offset: int = ... -column_data_length_length: int = ... -column_type_offset: int = ... -column_type_length: int = ... -column_format_text_subheader_index_offset: int = ... -column_format_text_subheader_index_length: int = ... -column_format_offset_offset: int = ... -column_format_offset_length: int = ... -column_format_length_offset: int = ... -column_format_length_length: int = ... -column_label_text_subheader_index_offset: int = ... -column_label_text_subheader_index_length: int = ... -column_label_offset_offset: int = ... -column_label_offset_length: int = ... -column_label_length_offset: int = ... -column_label_length_length: int = ... -rle_compression: bytes = ... -rdc_compression: bytes = ... -compression_literals = ... -encoding_names = ... - -class SASIndex: - row_size_index: int = ... - column_size_index: int = ... - subheader_counts_index: int = ... - column_text_index: int = ... - column_name_index: int = ... - column_attributes_index: int = ... - format_and_label_index: int = ... - column_list_index: int = ... - data_subheader_index: int = ... - -subheader_signature_to_index = ... -sas_date_formats = ... -sas_datetime_formats = ... diff --git a/typings/pandas/io/stata.pyi b/typings/pandas/io/stata.pyi deleted file mode 100644 index face1c8..0000000 --- a/typings/pandas/io/stata.pyi +++ /dev/null @@ -1,159 +0,0 @@ -import datetime -from collections import abc -from pandas._typing import FilePathOrBuffer as FilePathOrBuffer -from pandas.core.frame import DataFrame as DataFrame -from typing import Dict, Hashable, Optional, Sequence - -def read_stata( - path: FilePathOrBuffer, - convert_dates: bool = ..., - convert_categoricals: bool = ..., - index_col: Optional[str] = ..., - convert_missing: bool = ..., - preserve_dtypes: bool = ..., - columns: Optional[Sequence[str]] = ..., - order_categoricals: bool = ..., - chunksize: Optional[int] = ..., - iterator: bool = ..., -) -> DataFrame: ... - -stata_epoch = ... -excessive_string_length_error: str - -class PossiblePrecisionLoss(Warning): ... - -precision_loss_doc: str - -class ValueLabelTypeMismatch(Warning): ... - -value_label_mismatch_doc: str - -class InvalidColumnName(Warning): ... - -invalid_name_doc: str - -class StataValueLabel: - labname = ... - value_labels = ... - text_len = ... - off = ... - val = ... - txt = ... - n: int = ... - len = ... - def __init__(self, catarray, encoding: str = ...): ... - def generate_value_label(self, byteorder): ... - -class StataMissingValue: - MISSING_VALUES = ... - bases = ... - float32_base: bytes = ... - increment = ... - value = ... - int_value = ... - float64_base: bytes = ... - BASE_MISSING_VALUES = ... - def __init__(self, value) -> None: ... - string = ... - def __eq__(self, other) -> bool: ... - @classmethod - def get_base_missing_value(cls, dtype): ... - -class StataParser: - DTYPE_MAP = ... - DTYPE_MAP_XML = ... - TYPE_MAP = ... - TYPE_MAP_XML = ... - VALID_RANGE = ... - OLD_TYPE_MAPPING = ... - MISSING_VALUES = ... - NUMPY_TYPE_MAP = ... - RESERVED_WORDS = ... - def __init__(self) -> None: ... - -class StataReader(StataParser, abc.Iterator): - col_sizes = ... - path_or_buf = ... - def __init__( - self, - path_or_buf, - convert_dates: bool = ..., - convert_categoricals: bool = ..., - index_col=..., - convert_missing: bool = ..., - preserve_dtypes: bool = ..., - columns=..., - order_categoricals: bool = ..., - chunksize=..., - ) -> None: ... - def __enter__(self): ... - def __exit__(self, exc_type, exc_value, traceback) -> None: ... - def close(self) -> None: ... - def __next__(self): ... - def get_chunk(self, size=...): ... - def read( - self, - nrows=..., - convert_dates=..., - convert_categoricals=..., - index_col=..., - convert_missing=..., - preserve_dtypes=..., - columns=..., - order_categoricals=..., - ): ... - @property - def data_label(self): ... - def variable_labels(self): ... - def value_labels(self): ... - -class StataWriter(StataParser): - type_converters = ... - def __init__( - self, - fname, - data, - convert_dates=..., - write_index: bool = ..., - byteorder=..., - time_stamp=..., - data_label=..., - variable_labels=..., - ) -> None: ... - def write_file(self) -> None: ... - -class StataStrLWriter: - df = ... - columns = ... - def __init__(self, df, columns, version: int = ..., byteorder=...) -> None: ... - def generate_table(self): ... - def generate_blob(self, gso_table): ... - -class StataWriter117(StataWriter): - def __init__( - self, - fname, - data, - convert_dates=..., - write_index: bool = ..., - byteorder=..., - time_stamp=..., - data_label=..., - variable_labels=..., - convert_strl=..., - ) -> None: ... - -class StataWriterUTF8(StataWriter117): - def __init__( - self, - fname: FilePathOrBuffer, - data: DataFrame, - convert_dates: Optional[Dict[Hashable, str]] = ..., - write_index: bool = ..., - byteorder: Optional[str] = ..., - time_stamp: Optional[datetime.datetime] = ..., - data_label: Optional[str] = ..., - variable_labels: Optional[Dict[Hashable, str]] = ..., - convert_strl: Optional[Sequence[Hashable]] = ..., - version: Optional[int] = ..., - ) -> None: ... diff --git a/typings/pandas/plotting/__init__.pyi b/typings/pandas/plotting/__init__.pyi deleted file mode 100644 index a044df2..0000000 --- a/typings/pandas/plotting/__init__.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from ._core import ( - PlotAccessor as PlotAccessor, - boxplot as boxplot, - boxplot_frame as boxplot_frame, - boxplot_frame_groupby as boxplot_frame_groupby, - hist_frame as hist_frame, - hist_series as hist_series, -) -from ._misc import ( - andrews_curves as andrews_curves, - autocorrelation_plot as autocorrelation_plot, - bootstrap_plot as bootstrap_plot, - deregister as deregister_matplotlib_converters, - lag_plot as lag_plot, - parallel_coordinates as parallel_coordinates, - plot_params as plot_params, - radviz as radviz, - register as register_matplotlib_converters, - scatter_matrix as scatter_matrix, - table as table, -) diff --git a/typings/pandas/plotting/_core.pyi b/typings/pandas/plotting/_core.pyi deleted file mode 100644 index 2271668..0000000 --- a/typings/pandas/plotting/_core.pyi +++ /dev/null @@ -1,95 +0,0 @@ -from matplotlib.axes import Axes as PlotAxes -from pandas.core.base import PandasObject as PandasObject -from pandas.core.frame import DataFrame -from typing import Optional, Sequence, Tuple, Union - -def hist_series( - self, - by=..., - ax=..., - grid: bool = ..., - xlabelsize=..., - xrot=..., - ylabelsize=..., - yrot=..., - figsize=..., - bins: int = ..., - backend=..., - **kwargs -): ... -def hist_frame( - data, - column=..., - by=..., - grid: bool = ..., - xlabelsize=..., - xrot=..., - ylabelsize=..., - yrot=..., - ax=..., - sharex: bool = ..., - sharey: bool = ..., - figsize=..., - layout=..., - bins: int = ..., - backend=..., - **kwargs -): ... -def boxplot( - data: DataFrame, - column: Optional[Union[str, Sequence[str]]] = ..., - by: Optional[Union[str, Sequence[str]]] = ..., - ax: Optional[PlotAxes] = ..., - fontsize: Optional[Union[float, str]] = ..., - rot: float = ..., - grid: bool = ..., - figsize: Optional[Tuple[float, float]] = ..., - layout: Optional[Tuple[int, int]] = ..., - return_type: Optional[str] = ..., -): ... -def boxplot_frame( - self, - column=..., - by=..., - ax=..., - fontsize=..., - rot: int = ..., - grid: bool = ..., - figsize=..., - layout=..., - return_type=..., - backend=..., - **kwargs -): ... -def boxplot_frame_groupby( - grouped, - subplots: bool = ..., - column=..., - fontsize=..., - rot: int = ..., - grid: bool = ..., - ax=..., - figsize=..., - layout=..., - sharex: bool = ..., - sharey: bool = ..., - backend=..., - **kwargs -): ... - -class PlotAccessor(PandasObject): - def __init__(self, data) -> None: ... - def __call__(self, *args, **kwargs): ... - def line(self, x=..., y=..., **kwargs) -> PlotAccessor: ... - def bar(self, x=..., y=..., **kwargs) -> PlotAccessor: ... - def barh(self, x=..., y=..., **kwargs) -> PlotAccessor: ... - def box(self, by=..., **kwargs) -> PlotAccessor: ... - def hist(self, by=..., bins: int = ..., **kwargs) -> PlotAccessor: ... - def kde(self, bw_method=..., ind=..., **kwargs) -> PlotAccessor: ... - density = ... - def area(self, x=..., y=..., **kwargs) -> PlotAccessor: ... - def pie(self, **kwargs) -> PlotAccessor: ... - def scatter(self, x, y, s=..., c=..., **kwargs) -> PlotAccessor: ... - def hexbin( - self, x, y, C=..., reduce_C_function=..., gridsize=..., **kwargs - ) -> PlotAccessor: ... diff --git a/typings/pandas/plotting/_matplotlib/__init__.pyi b/typings/pandas/plotting/_matplotlib/__init__.pyi deleted file mode 100644 index de69c6c..0000000 --- a/typings/pandas/plotting/_matplotlib/__init__.pyi +++ /dev/null @@ -1,19 +0,0 @@ -from .boxplot import ( - boxplot as boxplot, - boxplot_frame as boxplot_frame, - boxplot_frame_groupby as boxplot_frame_groupby, -) -from .converter import deregister as deregister, register as register -from .hist import hist_frame as hist_frame, hist_series as hist_series -from .misc import ( - andrews_curves as andrews_curves, - autocorrelation_plot as autocorrelation_plot, - bootstrap_plot as bootstrap_plot, - lag_plot as lag_plot, - parallel_coordinates as parallel_coordinates, - radviz as radviz, - scatter_matrix as scatter_matrix, -) -from .tools import table as table - -def plot(data, kind, **kwargs): ... diff --git a/typings/pandas/plotting/_matplotlib/boxplot.pyi b/typings/pandas/plotting/_matplotlib/boxplot.pyi deleted file mode 100644 index 10a296d..0000000 --- a/typings/pandas/plotting/_matplotlib/boxplot.pyi +++ /dev/null @@ -1,58 +0,0 @@ -from matplotlib.axes import Axes as PlotAxes -from pandas.plotting._matplotlib.core import LinePlot as LinePlot -from typing import NamedTuple - -class BoxPlot(LinePlot): - class BoxPlot(NamedTuple): - ax: PlotAxes = ... - lines: dict = ... - BP = BoxPlot - - return_type = ... - def __init__(self, data, return_type: str = ..., **kwargs) -> None: ... - def maybe_color_bp(self, bp) -> None: ... - @property - def orientation(self): ... - @property - def result(self): ... - -def boxplot( - data, - column=..., - by=..., - ax=..., - fontsize=..., - rot: int = ..., - grid: bool = ..., - figsize=..., - layout=..., - return_type=..., - **kwds, -): ... -def boxplot_frame( - self, - column=..., - by=..., - ax=..., - fontsize=..., - rot: int = ..., - grid: bool = ..., - figsize=..., - layout=..., - return_type=..., - **kwds, -): ... -def boxplot_frame_groupby( - grouped, - subplots: bool = ..., - column=..., - fontsize=..., - rot: int = ..., - grid: bool = ..., - ax=..., - figsize=..., - layout=..., - sharex: bool = ..., - sharey: bool = ..., - **kwds, -): ... diff --git a/typings/pandas/plotting/_matplotlib/compat.pyi b/typings/pandas/plotting/_matplotlib/compat.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/plotting/_matplotlib/converter.pyi b/typings/pandas/plotting/_matplotlib/converter.pyi deleted file mode 100644 index 212200a..0000000 --- a/typings/pandas/plotting/_matplotlib/converter.pyi +++ /dev/null @@ -1,103 +0,0 @@ -import matplotlib.dates as dates -import matplotlib.units as units -from matplotlib.ticker import Formatter, Locator - -HOURS_PER_DAY: float = ... -MIN_PER_HOUR: float = ... -SEC_PER_MIN: float = ... -SEC_PER_HOUR: float = ... -SEC_PER_DAY: float = ... -MUSEC_PER_DAY: float = ... - -def get_pairs(): ... -def register_pandas_matplotlib_converters(func): ... -def pandas_converters() -> None: ... -def register() -> None: ... -def deregister() -> None: ... -def time2num(d): ... - -class TimeConverter(units.ConversionInterface): - @staticmethod - def convert(value, unit, axis): ... - @staticmethod - def axisinfo(unit, axis): ... - @staticmethod - def default_units(x, axis): ... - -class TimeFormatter(Formatter): - locs = ... - def __init__(self, locs) -> None: ... - def __call__(self, x, pos: int = ...): ... - -class PeriodConverter(dates.DateConverter): - @staticmethod - def convert(values, units, axis): ... - -def get_datevalue(date, freq): ... - -class DatetimeConverter(dates.DateConverter): - @staticmethod - def convert(values, unit, axis): ... - @staticmethod - def axisinfo(unit, axis): ... - -class PandasAutoDateFormatter(dates.AutoDateFormatter): - def __init__(self, locator, tz=..., defaultfmt: str = ...) -> None: ... - -class PandasAutoDateLocator(dates.AutoDateLocator): - def get_locator(self, dmin, dmax): ... - -class MilliSecondLocator(dates.DateLocator): - UNIT = ... - def __init__(self, tz) -> None: ... - @staticmethod - def get_unit_generic(freq): ... - def __call__(self): ... - def autoscale(self): ... - -def period_break(dates, period): ... -def has_level_label(label_flags, vmin): ... -def get_finder(freq): ... - -class TimeSeries_DateLocator(Locator): - freq = ... - base = ... - isminor = ... - isdynamic = ... - offset: int = ... - plot_obj = ... - finder = ... - def __init__( - self, - freq, - minor_locator: bool = ..., - dynamic_mode: bool = ..., - base: int = ..., - quarter: int = ..., - month: int = ..., - day: int = ..., - plot_obj=..., - ) -> None: ... - def __call__(self): ... - def autoscale(self): ... - -class TimeSeries_DateFormatter(Formatter): - format = ... - freq = ... - locs = ... - formatdict = ... - isminor = ... - isdynamic = ... - offset: int = ... - plot_obj = ... - finder = ... - def __init__( - self, freq, minor_locator: bool = ..., dynamic_mode: bool = ..., plot_obj=... - ) -> None: ... - def set_locs(self, locs) -> None: ... - def __call__(self, x, pos: int = ...): ... - -class TimeSeries_TimedeltaFormatter(Formatter): - @staticmethod - def format_timedelta_ticks(x, pos, n_decimals): ... - def __call__(self, x, pos: int = ...): ... diff --git a/typings/pandas/plotting/_matplotlib/core.pyi b/typings/pandas/plotting/_matplotlib/core.pyi deleted file mode 100644 index da620fe..0000000 --- a/typings/pandas/plotting/_matplotlib/core.pyi +++ /dev/null @@ -1,117 +0,0 @@ -from typing import Optional - -class MPLPlot: - orientation: Optional[str] = ... - data = ... - by = ... - kind = ... - sort_columns = ... - subplots = ... - sharex: bool = ... - sharey = ... - figsize = ... - layout = ... - xticks = ... - yticks = ... - xlim = ... - ylim = ... - title = ... - use_index = ... - fontsize = ... - rot = ... - grid = ... - legend = ... - legend_handles = ... - legend_labels = ... - ax = ... - fig = ... - axes = ... - errors = ... - secondary_y = ... - colormap = ... - table = ... - include_bool = ... - kwds = ... - def __init__( - self, - data, - kind=..., - by=..., - subplots: bool = ..., - sharex=..., - sharey: bool = ..., - use_index: bool = ..., - figsize=..., - grid=..., - legend: bool = ..., - rot=..., - ax=..., - fig=..., - title=..., - xlim=..., - ylim=..., - xticks=..., - yticks=..., - sort_columns: bool = ..., - fontsize=..., - secondary_y: bool = ..., - colormap=..., - table: bool = ..., - layout=..., - include_bool: bool = ..., - **kwds, - ) -> None: ... - @property - def nseries(self): ... - def draw(self) -> None: ... - def generate(self) -> None: ... - @property - def result(self): ... - @property - def legend_title(self): ... - def plt(self): ... - @classmethod - def get_default_ax(cls, ax) -> None: ... - def on_right(self, i): ... - -class PlanePlot(MPLPlot): - x = ... - y = ... - def __init__(self, data, x, y, **kwargs) -> None: ... - @property - def nseries(self): ... - -class ScatterPlot(PlanePlot): - c = ... - def __init__(self, data, x, y, s=..., c=..., **kwargs) -> None: ... - -class HexBinPlot(PlanePlot): - C = ... - def __init__(self, data, x, y, C=..., **kwargs) -> None: ... - -class LinePlot(MPLPlot): - orientation: str = ... - data = ... - x_compat = ... - def __init__(self, data, **kwargs) -> None: ... - -class AreaPlot(LinePlot): - def __init__(self, data, **kwargs) -> None: ... - -class BarPlot(MPLPlot): - orientation: str = ... - bar_width = ... - tick_pos = ... - bottom = ... - left = ... - log = ... - tickoffset = ... - lim_offset = ... - ax_pos = ... - def __init__(self, data, **kwargs) -> None: ... - -class BarhPlot(BarPlot): - orientation: str = ... - -class PiePlot(MPLPlot): - def __init__(self, data, kind=..., **kwargs) -> None: ... diff --git a/typings/pandas/plotting/_matplotlib/hist.pyi b/typings/pandas/plotting/_matplotlib/hist.pyi deleted file mode 100644 index 560b604..0000000 --- a/typings/pandas/plotting/_matplotlib/hist.pyi +++ /dev/null @@ -1,45 +0,0 @@ -from pandas.plotting._matplotlib.core import LinePlot as LinePlot - -class HistPlot(LinePlot): - bins = ... - bottom = ... - def __init__(self, data, bins: int = ..., bottom: int = ..., **kwargs) -> None: ... - @property - def orientation(self): ... - -class KdePlot(HistPlot): - orientation: str = ... - bw_method = ... - ind = ... - def __init__(self, data, bw_method=..., ind=..., **kwargs) -> None: ... - -def hist_series( - self, - by=..., - ax=..., - grid: bool = ..., - xlabelsize=..., - xrot=..., - ylabelsize=..., - yrot=..., - figsize=..., - bins: int = ..., - **kwds, -): ... -def hist_frame( - data, - column=..., - by=..., - grid: bool = ..., - xlabelsize=..., - xrot=..., - ylabelsize=..., - yrot=..., - ax=..., - sharex: bool = ..., - sharey: bool = ..., - figsize=..., - layout=..., - bins: int = ..., - **kwds, -): ... diff --git a/typings/pandas/plotting/_matplotlib/misc.pyi b/typings/pandas/plotting/_matplotlib/misc.pyi deleted file mode 100644 index d1732fd..0000000 --- a/typings/pandas/plotting/_matplotlib/misc.pyi +++ /dev/null @@ -1,34 +0,0 @@ -def scatter_matrix( - frame, - alpha: float = ..., - figsize=..., - ax=..., - grid: bool = ..., - diagonal: str = ..., - marker: str = ..., - density_kwds=..., - hist_kwds=..., - range_padding: float = ..., - **kwds, -): ... -def radviz(frame, class_column, ax=..., color=..., colormap=..., **kwds): ... -def andrews_curves( - frame, class_column, ax=..., samples: int = ..., color=..., colormap=..., **kwds -): ... -def bootstrap_plot(series, fig=..., size: int = ..., samples: int = ..., **kwds): ... -def parallel_coordinates( - frame, - class_column, - cols=..., - ax=..., - color=..., - use_columns: bool = ..., - xticks=..., - colormap=..., - axvlines: bool = ..., - axvlines_kwds=..., - sort_labels: bool = ..., - **kwds, -): ... -def lag_plot(series, lag: int = ..., ax=..., **kwds): ... -def autocorrelation_plot(series, ax=..., **kwds): ... diff --git a/typings/pandas/plotting/_matplotlib/style.pyi b/typings/pandas/plotting/_matplotlib/style.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/plotting/_matplotlib/timeseries.pyi b/typings/pandas/plotting/_matplotlib/timeseries.pyi deleted file mode 100644 index b82fd0c..0000000 --- a/typings/pandas/plotting/_matplotlib/timeseries.pyi +++ /dev/null @@ -1 +0,0 @@ -def format_dateaxis(subplot, freq, index) -> None: ... diff --git a/typings/pandas/plotting/_matplotlib/tools.pyi b/typings/pandas/plotting/_matplotlib/tools.pyi deleted file mode 100644 index 06fa696..0000000 --- a/typings/pandas/plotting/_matplotlib/tools.pyi +++ /dev/null @@ -1,2 +0,0 @@ -def format_date_labels(ax, rot) -> None: ... -def table(ax, data, rowLabels=..., colLabels=..., **kwargs): ... diff --git a/typings/pandas/plotting/_misc.pyi b/typings/pandas/plotting/_misc.pyi deleted file mode 100644 index a45bf39..0000000 --- a/typings/pandas/plotting/_misc.pyi +++ /dev/null @@ -1,81 +0,0 @@ -from matplotlib.axes import Axes as PlotAxes -from matplotlib.figure import Figure -import numpy as np -from pandas.core.series import Series -from pandas.core.frame import DataFrame -from typing import Any, Dict, Optional, Sequence, Tuple, Union - -def table( - ax, - data, - rowLabels=None, - colLabels=None, -): ... -def register() -> None: ... -def deregister() -> None: ... -def scatter_matrix( - frame: DataFrame, - alpha: float = ..., - figsize: Optional[Tuple[float, float]] = ..., - ax: Optional[PlotAxes] = ..., - grid: bool = ..., - diagonal: str = ..., - marker: str = ..., - density_kwds=..., - hist_kwds=..., - range_padding: float = ..., -) -> np.ndarray: ... -def radviz( - frame: DataFrame, - class_column: str, - ax: Optional[PlotAxes] = ..., - color: Optional[Union[Sequence[str], Tuple[str]]] = ..., - colormap=..., -) -> PlotAxes: ... -def andrews_curves( - frame: DataFrame, - class_column: str, - ax: Optional[PlotAxes] = ..., - samples: int = ..., - color: Optional[Union[Sequence[str], Tuple[str]]] = ..., - colormap=..., -) -> PlotAxes: ... -def bootstrap_plot( - series: Series, - fig: Optional[Figure] = ..., - size: int = ..., - samples: int = ..., -) -> Figure: ... -def parallel_coordinates( - frame: DataFrame, - class_column: str, - cols: Optional[Sequence[str]] = ..., - ax: Optional[PlotAxes] = ..., - color: Optional[Union[Sequence[str], Tuple[str]]] = ..., - use_columns: bool = ..., - xticks: Optional[Union[Sequence, Tuple]] = ..., - colormap=..., - axvlines: bool = ..., - axvlines_kwds=..., - sort_labels: bool = ..., -) -> PlotAxes: ... -def lag_plot( - series: Series, - lag: int = ..., - ax: Optional[PlotAxes] = ..., -) -> PlotAxes: ... -def autocorrelation_plot( - series: Series, - ax: Optional[PlotAxes] = ..., -) -> PlotAxes: ... - -class _Options(dict): - def __init__(self, deprecated: bool = ...) -> None: ... - def __getitem__(self, key): ... - def __setitem__(self, key, value): ... - def __delitem__(self, key): ... - def __contains__(self, key) -> bool: ... - def reset(self) -> None: ... - def use(self, key, value) -> None: ... - -plot_params: Dict[str, Any] diff --git a/typings/pandas/py.typed b/typings/pandas/py.typed deleted file mode 100644 index b648ac9..0000000 --- a/typings/pandas/py.typed +++ /dev/null @@ -1 +0,0 @@ -partial diff --git a/typings/pandas/tseries/__init__.pyi b/typings/pandas/tseries/__init__.pyi deleted file mode 100644 index e69de29..0000000 diff --git a/typings/pandas/tseries/api.pyi b/typings/pandas/tseries/api.pyi deleted file mode 100644 index 21aee7b..0000000 --- a/typings/pandas/tseries/api.pyi +++ /dev/null @@ -1 +0,0 @@ -from pandas.tseries.frequencies import infer_freq as infer_freq diff --git a/typings/pandas/tseries/frequencies.pyi b/typings/pandas/tseries/frequencies.pyi deleted file mode 100644 index 5d4d875..0000000 --- a/typings/pandas/tseries/frequencies.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from pandas.tseries.offsets import DateOffset as DateOffset -from typing import Optional - -def get_period_alias(offset_str: str) -> Optional[str]: ... -def to_offset(freq) -> Optional[DateOffset]: ... -def get_offset(name: str) -> DateOffset: ... -def infer_freq(index, warn: bool = ...) -> Optional[str]: ... diff --git a/typings/pandas/tseries/offsets.pyi b/typings/pandas/tseries/offsets.pyi deleted file mode 100644 index f65a652..0000000 --- a/typings/pandas/tseries/offsets.pyi +++ /dev/null @@ -1,44 +0,0 @@ -from pandas._libs.tslibs.offsets import ( - FY5253 as FY5253, - BaseOffset as BaseOffset, - BDay as BDay, - BMonthBegin as BMonthBegin, - BMonthEnd as BMonthEnd, - BQuarterBegin as BQuarterBegin, - BQuarterEnd as BQuarterEnd, - BusinessDay as BusinessDay, - BusinessHour as BusinessHour, - BusinessMonthBegin as BusinessMonthBegin, - BusinessMonthEnd as BusinessMonthEnd, - BYearBegin as BYearBegin, - BYearEnd as BYearEnd, - CBMonthBegin as CBMonthBegin, - CBMonthEnd as CBMonthEnd, - CDay as CDay, - CustomBusinessDay as CustomBusinessDay, - CustomBusinessHour as CustomBusinessHour, - CustomBusinessMonthBegin as CustomBusinessMonthBegin, - CustomBusinessMonthEnd as CustomBusinessMonthEnd, - DateOffset as DateOffset, - Day as Day, - Easter as Easter, - FY5253Quarter as FY5253Quarter, - Hour as Hour, - LastWeekOfMonth as LastWeekOfMonth, - Micro as Micro, - Milli as Milli, - Minute as Minute, - MonthBegin as MonthBegin, - MonthEnd as MonthEnd, - Nano as Nano, - QuarterBegin as QuarterBegin, - QuarterEnd as QuarterEnd, - Second as Second, - SemiMonthBegin as SemiMonthBegin, - SemiMonthEnd as SemiMonthEnd, - Tick as Tick, - Week as Week, - WeekOfMonth as WeekOfMonth, - YearBegin as YearBegin, - YearEnd as YearEnd, -)