Skip to content

Commit

Permalink
BUG: Avoid RangeIndex conversion in read_csv if dtype is specified (#…
Browse files Browse the repository at this point in the history
…59316)

* BUG: Avoid RangeIndex conversion in read_csv if dtype is specified

* Undo change

* Typing
  • Loading branch information
mroeschke authored and jorisvandenbossche committed Nov 16, 2024
1 parent 284e359 commit aa0a90a
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 12 deletions.
36 changes: 25 additions & 11 deletions pandas/io/parsers/base_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,11 @@ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
arrays = []
converters = self._clean_mapping(self.converters)

for i, arr in enumerate(index):
if self.index_names is not None:
names: Iterable = self.index_names
else:
names = itertools.cycle([None])
for i, (arr, name) in enumerate(zip(index, names)):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(
arr,
Expand Down Expand Up @@ -504,12 +508,17 @@ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
arr, _ = self._infer_types(
arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool
)
arrays.append(arr)

names = self.index_names
index = ensure_index_from_sequences(arrays, names)
if cast_type is not None:
# Don't perform RangeIndex inference
idx = Index(arr, name=name, dtype=cast_type)
else:
idx = ensure_index_from_sequences([arr], [name])
arrays.append(idx)

return index
if len(arrays) == 1:
return arrays[0]
else:
return MultiIndex.from_arrays(arrays)

@final
def _convert_to_ndarrays(
Expand Down Expand Up @@ -1084,12 +1093,11 @@ def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):
dtype_dict: defaultdict[Hashable, Any]
if not is_dict_like(dtype):
# if dtype == None, default will be object.
default_dtype = dtype or object
dtype_dict = defaultdict(lambda: default_dtype)
dtype_dict = defaultdict(lambda: dtype)
else:
dtype = cast(dict, dtype)
dtype_dict = defaultdict(
lambda: object,
lambda: None,
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
)

Expand All @@ -1106,8 +1114,14 @@ def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):
if (index_col is None or index_col is False) or index_names is None:
index = default_index(0)
else:
data = [Series([], dtype=dtype_dict[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
# TODO: We could return default_index(0) if dtype_dict[name] is None
data = [
Index([], name=name, dtype=dtype_dict[name]) for name in index_names
]
if len(data) == 1:
index = data[0]
else:
index = MultiIndex.from_arrays(data)
index_col.sort()

for i, n in enumerate(index_col):
Expand Down
18 changes: 17 additions & 1 deletion pandas/tests/io/parser/dtypes/test_dtypes_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
)

xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")


@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
Expand Down Expand Up @@ -594,6 +596,7 @@ def test_string_inference_object_dtype(all_parsers, dtype, using_infer_string):
tm.assert_frame_equal(result, expected)


@xfail_pyarrow
def test_accurate_parsing_of_large_integers(all_parsers):
# GH#52505
data = """SYMBOL,MOMENT,ID,ID_DEAL
Expand All @@ -604,7 +607,7 @@ def test_accurate_parsing_of_large_integers(all_parsers):
AMZN,20230301181139587,2023552585717889759,2023552585717263360
MSFT,20230301181139587,2023552585717889863,2023552585717263361
NVDA,20230301181139587,2023552585717889827,2023552585717263361"""
orders = pd.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()})
orders = all_parsers.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()})
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263358, "ID_DEAL"]) == 1
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263359, "ID_DEAL"]) == 1
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263360, "ID_DEAL"]) == 2
Expand All @@ -626,3 +629,16 @@ def test_dtypes_with_usecols(all_parsers):
values = ["1", "4"]
expected = DataFrame({"a": pd.Series(values, dtype=object), "c": [3, 6]})
tm.assert_frame_equal(result, expected)


def test_index_col_with_dtype_no_rangeindex(all_parsers):
data = StringIO("345.5,519.5,0\n519.5,726.5,1")
result = all_parsers.read_csv(
data,
header=None,
names=["start", "stop", "bin_id"],
dtype={"start": np.float32, "stop": np.float32, "bin_id": np.uint32},
index_col="bin_id",
).index
expected = pd.Index([0, 1], dtype=np.uint32, name="bin_id")
tm.assert_index_equal(result, expected)

0 comments on commit aa0a90a

Please sign in to comment.