Fix GPU L1 error. (#8749)
This commit is contained in:
24
tests/cpp/tree/test_node_partition.cc
Normal file
24
tests/cpp/tree/test_node_partition.cc
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Copyright 2023 by XGBoost contributors
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
#include <xgboost/task.h>
|
||||
#include <xgboost/tree_updater.h>
|
||||
|
||||
namespace xgboost {
|
||||
TEST(Updater, HasNodePosition) {
|
||||
Context ctx;
|
||||
ObjInfo task{ObjInfo::kRegression, true, true};
|
||||
std::unique_ptr<TreeUpdater> up{TreeUpdater::Create("grow_histmaker", &ctx, task)};
|
||||
ASSERT_TRUE(up->HasNodePosition());
|
||||
|
||||
up.reset(TreeUpdater::Create("grow_quantile_histmaker", &ctx, task));
|
||||
ASSERT_TRUE(up->HasNodePosition());
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
ctx.gpu_id = 0;
|
||||
up.reset(TreeUpdater::Create("grow_gpu_hist", &ctx, task));
|
||||
ASSERT_TRUE(up->HasNodePosition());
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
}
|
||||
} // namespace xgboost
|
||||
@@ -337,13 +337,21 @@ class TestGPUPredict:
|
||||
@given(predict_parameter_strategy, tm.dataset_strategy)
|
||||
@settings(deadline=None, max_examples=20, print_blob=True)
|
||||
def test_predict_leaf_gbtree(self, param, dataset):
|
||||
# Unsupported for random forest
|
||||
if param.get("num_parallel_tree", 1) > 1 and dataset.name.endswith("-l1"):
|
||||
return
|
||||
|
||||
param['booster'] = 'gbtree'
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
self.run_predict_leaf_booster(param, 10, dataset)
|
||||
|
||||
@given(predict_parameter_strategy, tm.dataset_strategy)
|
||||
@settings(deadline=None, max_examples=20, print_blob=True)
|
||||
def test_predict_leaf_dart(self, param, dataset):
|
||||
def test_predict_leaf_dart(self, param: dict, dataset: tm.TestDataset) -> None:
|
||||
# Unsupported for random forest
|
||||
if param.get("num_parallel_tree", 1) > 1 and dataset.name.endswith("-l1"):
|
||||
return
|
||||
|
||||
param['booster'] = 'dart'
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
self.run_predict_leaf_booster(param, 10, dataset)
|
||||
|
||||
@@ -442,6 +442,22 @@ class TestTreeMethod:
|
||||
config_0 = json.loads(booster_0.save_config())
|
||||
np.testing.assert_allclose(get_score(config_0), get_score(config_1) + 1)
|
||||
|
||||
evals_result: Dict[str, Dict[str, list]] = {}
|
||||
xgb.train(
|
||||
{
|
||||
"tree_method": tree_method,
|
||||
"objective": "reg:absoluteerror",
|
||||
"subsample": 0.8
|
||||
},
|
||||
Xy,
|
||||
num_boost_round=10,
|
||||
evals=[(Xy, "Train")],
|
||||
evals_result=evals_result,
|
||||
)
|
||||
mae = evals_result["Train"]["mae"]
|
||||
assert mae[-1] < 20.0
|
||||
assert tm.non_increasing(mae)
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
@pytest.mark.parametrize(
|
||||
"tree_method,weighted", [
|
||||
|
||||
@@ -215,7 +215,6 @@ MultiClfData = namedtuple("MultiClfData", ("multi_clf_df_train", "multi_clf_df_t
|
||||
|
||||
@pytest.fixture
|
||||
def multi_clf_data(spark: SparkSession) -> Generator[MultiClfData, None, None]:
|
||||
|
||||
X = np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 4.0], [0.0, 1.0, 5.5], [-1.0, -2.0, 1.0]])
|
||||
y = np.array([0, 0, 1, 2])
|
||||
cls1 = xgb.XGBClassifier()
|
||||
|
||||
Reference in New Issue
Block a user