Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Consistent Python relative imports. #11006

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 19 additions & 20 deletions python-package/xgboost/spark/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,12 @@
)
from scipy.special import expit, softmax # pylint: disable=no-name-in-module

import xgboost
from xgboost import XGBClassifier
from xgboost.compat import is_cudf_available, is_cupy_available
from xgboost.core import Booster, _check_distributed_params
from xgboost.sklearn import DEFAULT_N_ESTIMATORS, XGBModel, _can_use_qdm
from xgboost.training import train as worker_train

from .._typing import ArrayLike
from ..compat import is_cudf_available, is_cupy_available
from ..config import config_context
from ..core import Booster, _check_distributed_params, _py_version
from ..sklearn import DEFAULT_N_ESTIMATORS, XGBClassifier, XGBModel, _can_use_qdm
from ..training import train as worker_train
from .data import (
_read_csr_matrix_from_unwrapped_spark_vec,
alias,
Expand Down Expand Up @@ -765,7 +763,7 @@ def _get_xgb_train_call_args(
cls, train_params: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
xgb_train_default_args = _get_default_params_from_func(
xgboost.train, _unsupported_train_params
worker_train, _unsupported_train_params
)
booster_params, kwargs_params = {}, {}
for key, value in train_params.items():
Expand Down Expand Up @@ -1136,17 +1134,18 @@ def _train_booster(
_rabit_args = json.loads(messages[0])["rabit_msg"]

evals_result: Dict[str, Any] = {}
with CommunicatorContext(context, **_rabit_args):
with xgboost.config_context(verbosity=verbosity):
dtrain, dvalid = create_dmatrix_from_partitions(
iterator=pandas_df_iter,
feature_cols=feature_prop.features_cols_names,
dev_ordinal=dev_ordinal,
use_qdm=use_qdm,
kwargs=dmatrix_kwargs,
enable_sparse_data_optim=feature_prop.enable_sparse_data_optim,
has_validation_col=feature_prop.has_validation_col,
)
with config_context(verbosity=verbosity), CommunicatorContext(
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to different PR to fix the use_rmm here.

context, **_rabit_args
):
dtrain, dvalid = create_dmatrix_from_partitions(
iterator=pandas_df_iter,
feature_cols=feature_prop.features_cols_names,
dev_ordinal=dev_ordinal,
use_qdm=use_qdm,
kwargs=dmatrix_kwargs,
enable_sparse_data_optim=feature_prop.enable_sparse_data_optim,
has_validation_col=feature_prop.has_validation_col,
)
if dvalid is not None:
dval = [(dtrain, "training"), (dvalid, "validation")]
else:
Expand Down Expand Up @@ -1188,7 +1187,7 @@ def _run_job() -> Tuple[str, str]:
"\n\tbooster params: %s"
"\n\ttrain_call_kwargs_params: %s"
"\n\tdmatrix_kwargs: %s",
xgboost._py_version(),
_py_version(),
num_workers,
booster_params,
train_call_kwargs_params,
Expand Down
10 changes: 5 additions & 5 deletions python-package/xgboost/spark/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,17 @@
import pandas as pd
from scipy.sparse import csr_matrix

from xgboost import DataIter, DMatrix, QuantileDMatrix, XGBModel
from xgboost.compat import concat

from .._typing import ArrayLike
from .utils import get_logger # type: ignore
from ..compat import concat
from ..core import DataIter, DMatrix, QuantileDMatrix
from ..sklearn import XGBModel
from .utils import get_logger


def stack_series(series: pd.Series) -> np.ndarray:
"""Stack a series of arrays."""
array = series.to_numpy(copy=False)
array = np.stack(array)
array = np.stack(array) # type: ignore
return array


Expand Down
3 changes: 1 addition & 2 deletions python-package/xgboost/spark/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasProbabilityCol, HasRawPredictionCol

from xgboost import XGBClassifier, XGBRanker, XGBRegressor

from ..sklearn import XGBClassifier, XGBRanker, XGBRegressor
from .core import ( # type: ignore
_ClassificationModel,
_SparkXGBEstimator,
Expand Down
Loading