diff --git a/python-package/xgboost/spark/core.py b/python-package/xgboost/spark/core.py index c2f10550a41c..e6e042a3756c 100644 --- a/python-package/xgboost/spark/core.py +++ b/python-package/xgboost/spark/core.py @@ -1256,9 +1256,12 @@ def predict_udf(iterator: Iterator[pd.DataFrame]) -> Iterator[pd.Series]: dev_ordinal = -1 if is_local: if run_on_gpu and is_cupy_available(): - total_gpus = cupy.cuda.runtime.getDeviceCount() + import cupy as cp # pylint: disable=import-error + + total_gpus = cp.cuda.runtime.getDeviceCount() if total_gpus > 0: - partition_id = context.partitionId() + from pyspark import TaskContext + partition_id = TaskContext.get().partitionId() # For transform local mode, default the gpu_id to (partition id) % gpus. dev_ordinal = partition_id % total_gpus elif run_on_gpu: @@ -1266,7 +1269,6 @@ def predict_udf(iterator: Iterator[pd.DataFrame]) -> Iterator[pd.Series]: dev_ordinal = _get_gpu_id(TaskContext.get()) if dev_ordinal >= 0: - print("------------------- ", TaskContext.get().partitionId()) device = "cuda:" + str(dev_ordinal) get_logger("XGBoost-PySpark").info( "Do the inference with device: %s", device @@ -1278,7 +1280,7 @@ def predict_udf(iterator: Iterator[pd.DataFrame]) -> Iterator[pd.Series]: ) def to_gpu(data: ArrayLike) -> ArrayLike: - """Move the data to gpu""" + """Move the data to gpu if possible""" if dev_ordinal >= 0: import cudf # pylint: disable=import-error import cupy as cp # pylint: disable=import-error