[backport] CI fixes (#6933)
* Relax shotgun test. (#6900) It's non-deterministic algorithm, the test is flaky. * Disable pylint error. (#6911) * [CI] Skip external memory gtest on osx. (#6901) * [CI] Fix custom metric test with empty dataset. (#6917) * Reduce Travis environment setup time. (#6912) * Remove unused r from travis. * Don't update homebrew. * Don't install indirect/unused dependencies like libgit2, wget, openssl. * Move graphviz installation to conda. * Relax shotgun test. (#6918) * Relax test for decision stump in distributed environment. (#6919) * Backport cupy fix.
This commit is contained in:
@@ -57,15 +57,13 @@ class TestLinear:
|
||||
param['updater'] = 'shotgun'
|
||||
param = dataset.set_params(param)
|
||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||
# shotgun is non-deterministic, so we relax the test by sampling
|
||||
# result.
|
||||
# shotgun is non-deterministic, so we relax the test by only using first and last
|
||||
# iteration.
|
||||
if len(result) > 2:
|
||||
sampled_result = [score for i, score in enumerate(result)
|
||||
if i % 2 == 0]
|
||||
sampled_result[-1] = result[-1] # make sure the last one is used
|
||||
sampled_result = (result[0], result[-1])
|
||||
else:
|
||||
sampled_result = result
|
||||
assert tm.non_increasing(sampled_result, 1e-3)
|
||||
assert tm.non_increasing(sampled_result)
|
||||
|
||||
@given(parameter_strategy, strategies.integers(10, 50),
|
||||
tm.dataset_strategy, strategies.floats(1e-5, 2.0),
|
||||
|
||||
@@ -1023,7 +1023,17 @@ class TestWithDask:
|
||||
evals=[(m, 'train')])['history']
|
||||
note(history)
|
||||
history = history['train'][dataset.metric]
|
||||
assert tm.non_increasing(history)
|
||||
|
||||
def is_stump():
|
||||
return params["max_depth"] == 1 or params["max_leaves"] == 1
|
||||
|
||||
def minimum_bin():
|
||||
return "max_bin" in params and params["max_bin"] == 2
|
||||
|
||||
if minimum_bin() and is_stump():
|
||||
assert tm.non_increasing(history, tolerance=1e-3)
|
||||
else:
|
||||
assert tm.non_increasing(history)
|
||||
# Make sure that it's decreasing
|
||||
assert history[-1] < history[0]
|
||||
|
||||
|
||||
@@ -272,6 +272,8 @@ def eval_error_metric(predt, dtrain: xgb.DMatrix):
|
||||
label = dtrain.get_label()
|
||||
r = np.zeros(predt.shape)
|
||||
gt = predt > 0.5
|
||||
if predt.size == 0:
|
||||
return "CustomErr", 0
|
||||
r[gt] = 1 - label[gt]
|
||||
le = predt <= 0.5
|
||||
r[le] = label[le]
|
||||
|
||||
Reference in New Issue
Block a user