Optional normalization for learning to rank. (#10094)

This commit is contained in:
Jiaming Yuan
2024-03-08 12:41:21 +08:00
committed by GitHub
parent bc516198dc
commit e14c3b9325
8 changed files with 44 additions and 5 deletions

View File

@@ -6,6 +6,7 @@ import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization
pytestmark = tm.timeout(30)
@@ -126,3 +127,7 @@ def test_with_mq2008(objective, metric) -> None:
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")

View File

@@ -13,6 +13,7 @@ import xgboost
from xgboost import testing as tm
from xgboost.testing.data import RelDataCV, simulate_clicks, sort_ltr_samples
from xgboost.testing.params import lambdarank_parameter_strategy
from xgboost.testing.ranking import run_normalization
def test_ndcg_custom_gain():
@@ -188,6 +189,10 @@ def test_unbiased() -> None:
assert df["ti+"].iloc[-1] < df["ti+"].iloc[0]
def test_normalization() -> None:
run_normalization("cpu")
class TestRanking:
@classmethod
def setup_class(cls):