Monotone constraints for gpu_hist (#2904)
This commit is contained in:
parent
5867c1b96d
commit
c51adb49b6
@ -46,6 +46,8 @@ Specify the 'tree_method' parameter as one of the following algorithms.
|
||||
+--------------------+------------+-----------+
|
||||
| grow_policy | |cross| | |tick| |
|
||||
+--------------------+------------+-----------+
|
||||
| monotone_constraints | |cross| | |tick| |
|
||||
+--------------------+------------+-----------+
|
||||
|
||||
```
|
||||
|
||||
|
||||
@ -311,6 +311,10 @@ struct XGBOOST_ALIGNAS(16) GradStats {
|
||||
static const int kSimpleStats = 1;
|
||||
/*! \brief constructor, the object must be cleared during construction */
|
||||
explicit GradStats(const TrainParam& param) { this->Clear(); }
|
||||
|
||||
template <typename gpair_t>
|
||||
XGBOOST_DEVICE explicit GradStats(const gpair_t &sum)
|
||||
: sum_grad(sum.GetGrad()), sum_hess(sum.GetHess()) {}
|
||||
/*! \brief clear the statistics */
|
||||
inline void Clear() { sum_grad = sum_hess = 0.0f; }
|
||||
/*! \brief check if necessary information is ready */
|
||||
@ -332,11 +336,13 @@ struct XGBOOST_ALIGNAS(16) GradStats {
|
||||
this->Add(b.GetGrad(), b.GetHess());
|
||||
}
|
||||
/*! \brief calculate leaf weight */
|
||||
inline double CalcWeight(const TrainParam& param) const {
|
||||
template <typename param_t>
|
||||
inline double CalcWeight(const param_t& param) const {
|
||||
return xgboost::tree::CalcWeight(param, sum_grad, sum_hess);
|
||||
}
|
||||
/*! \brief calculate gain of the solution */
|
||||
inline double CalcGain(const TrainParam& param) const {
|
||||
template <typename param_t>
|
||||
inline double CalcGain(const param_t& param) const {
|
||||
return xgboost::tree::CalcGain(param, sum_grad, sum_hess);
|
||||
}
|
||||
/*! \brief add statistics to the data */
|
||||
@ -367,7 +373,9 @@ struct XGBOOST_ALIGNAS(16) GradStats {
|
||||
};
|
||||
|
||||
struct NoConstraint {
|
||||
inline static void Init(TrainParam *param, unsigned num_feature) {}
|
||||
inline static void Init(TrainParam *param, unsigned num_feature) {
|
||||
param->monotone_constraints.resize(num_feature, 0);
|
||||
}
|
||||
inline double CalcSplitGain(const TrainParam ¶m, bst_uint split_index,
|
||||
GradStats left, GradStats right) const {
|
||||
return left.CalcGain(param) + right.CalcGain(param);
|
||||
@ -386,13 +394,14 @@ struct NoConstraint {
|
||||
struct ValueConstraint {
|
||||
double lower_bound;
|
||||
double upper_bound;
|
||||
ValueConstraint()
|
||||
XGBOOST_DEVICE ValueConstraint()
|
||||
: lower_bound(-std::numeric_limits<double>::max()),
|
||||
upper_bound(std::numeric_limits<double>::max()) {}
|
||||
inline static void Init(TrainParam *param, unsigned num_feature) {
|
||||
param->monotone_constraints.resize(num_feature, 1);
|
||||
param->monotone_constraints.resize(num_feature, 0);
|
||||
}
|
||||
inline double CalcWeight(const TrainParam ¶m, GradStats stats) const {
|
||||
template <typename param_t>
|
||||
XGBOOST_DEVICE inline double CalcWeight(const param_t ¶m, GradStats stats) const {
|
||||
double w = stats.CalcWeight(param);
|
||||
if (w < lower_bound) {
|
||||
return lower_bound;
|
||||
@ -403,22 +412,23 @@ struct ValueConstraint {
|
||||
return w;
|
||||
}
|
||||
|
||||
inline double CalcGain(const TrainParam ¶m, GradStats stats) const {
|
||||
template <typename param_t>
|
||||
XGBOOST_DEVICE inline double CalcGain(const param_t ¶m, GradStats stats) const {
|
||||
return CalcGainGivenWeight(param, stats.sum_grad, stats.sum_hess,
|
||||
CalcWeight(param, stats));
|
||||
}
|
||||
|
||||
inline double CalcSplitGain(const TrainParam ¶m, bst_uint split_index,
|
||||
template <typename param_t>
|
||||
XGBOOST_DEVICE inline double CalcSplitGain(const param_t ¶m, int constraint,
|
||||
GradStats left, GradStats right) const {
|
||||
double wleft = CalcWeight(param, left);
|
||||
double wright = CalcWeight(param, right);
|
||||
int c = param.monotone_constraints[split_index];
|
||||
double gain =
|
||||
CalcGainGivenWeight(param, left.sum_grad, left.sum_hess, wleft) +
|
||||
CalcGainGivenWeight(param, right.sum_grad, right.sum_hess, wright);
|
||||
if (c == 0) {
|
||||
if (constraint == 0) {
|
||||
return gain;
|
||||
} else if (c > 0) {
|
||||
} else if (constraint > 0) {
|
||||
return wleft < wright ? gain : 0.0;
|
||||
} else {
|
||||
return wleft > wright ? gain : 0.0;
|
||||
|
||||
@ -319,7 +319,9 @@ class ColMaker: public TreeUpdater {
|
||||
if (c.sum_hess >= param.min_child_weight &&
|
||||
e.stats.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
e.best.Update(loss_chg, fid, fsplit, false);
|
||||
}
|
||||
}
|
||||
@ -329,7 +331,9 @@ class ColMaker: public TreeUpdater {
|
||||
if (c.sum_hess >= param.min_child_weight &&
|
||||
tmp.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, tmp, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], tmp, c) -
|
||||
snode[nid].root_gain);
|
||||
e.best.Update(loss_chg, fid, fsplit, true);
|
||||
}
|
||||
}
|
||||
@ -341,7 +345,9 @@ class ColMaker: public TreeUpdater {
|
||||
if (c.sum_hess >= param.min_child_weight &&
|
||||
tmp.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, tmp, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], tmp, c) -
|
||||
snode[nid].root_gain);
|
||||
e.best.Update(loss_chg, fid, e.last_fvalue + rt_eps, true);
|
||||
}
|
||||
}
|
||||
@ -372,9 +378,11 @@ class ColMaker: public TreeUpdater {
|
||||
if (c.sum_hess >= param.min_child_weight &&
|
||||
e.stats.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) -
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f, false);
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f,
|
||||
false);
|
||||
}
|
||||
}
|
||||
if (need_backward) {
|
||||
@ -383,7 +391,8 @@ class ColMaker: public TreeUpdater {
|
||||
if (c.sum_hess >= param.min_child_weight &&
|
||||
cright.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, c, cright) -
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], c, cright) -
|
||||
snode[nid].root_gain);
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f, true);
|
||||
}
|
||||
@ -414,12 +423,17 @@ class ColMaker: public TreeUpdater {
|
||||
bst_float loss_chg;
|
||||
if (d_step == -1) {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], c, e.stats) -
|
||||
snode[nid].root_gain);
|
||||
} else {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
}
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f, d_step == -1);
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f,
|
||||
d_step == -1);
|
||||
}
|
||||
}
|
||||
// update the statistics
|
||||
@ -492,10 +506,14 @@ class ColMaker: public TreeUpdater {
|
||||
bst_float loss_chg;
|
||||
if (d_step == -1) {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], c, e.stats) -
|
||||
snode[nid].root_gain);
|
||||
} else {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
}
|
||||
const bst_float gap = std::abs(e.last_fvalue) + rt_eps;
|
||||
const bst_float delta = d_step == +1 ? gap: -gap;
|
||||
@ -545,11 +563,13 @@ class ColMaker: public TreeUpdater {
|
||||
bst_float loss_chg;
|
||||
if (d_step == -1) {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) -
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], c, e.stats) -
|
||||
snode[nid].root_gain);
|
||||
} else {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) -
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
}
|
||||
e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f, d_step == -1);
|
||||
@ -565,14 +585,19 @@ class ColMaker: public TreeUpdater {
|
||||
const int nid = qexpand[i];
|
||||
ThreadEntry &e = temp[nid];
|
||||
c.SetSubstract(snode[nid].stats, e.stats);
|
||||
if (e.stats.sum_hess >= param.min_child_weight && c.sum_hess >= param.min_child_weight) {
|
||||
if (e.stats.sum_hess >= param.min_child_weight &&
|
||||
c.sum_hess >= param.min_child_weight) {
|
||||
bst_float loss_chg;
|
||||
if (d_step == -1) {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], c, e.stats) -
|
||||
snode[nid].root_gain);
|
||||
} else {
|
||||
loss_chg = static_cast<bst_float>(
|
||||
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain);
|
||||
constraints_[nid].CalcSplitGain(
|
||||
param, param.monotone_constraints[fid], e.stats, c) -
|
||||
snode[nid].root_gain);
|
||||
}
|
||||
const bst_float gap = std::abs(e.last_fvalue) + rt_eps;
|
||||
const bst_float delta = d_step == +1 ? gap: -gap;
|
||||
|
||||
@ -302,7 +302,7 @@ DEV_INLINE void argMaxWithAtomics(
|
||||
ExactSplitCandidate s;
|
||||
bst_gpair missing = parentSum - colSum;
|
||||
s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain,
|
||||
param, tmp);
|
||||
param, 0, ValueConstraint(), tmp);
|
||||
s.index = id;
|
||||
atomicArgMax(nodeSplits + uid, s);
|
||||
} // end if nodeId != UNUSED_NODE
|
||||
@ -580,7 +580,7 @@ class GPUMaker : public TreeUpdater {
|
||||
// get the default direction for the current node
|
||||
bst_gpair missing = n.sum_gradients - gradSum;
|
||||
loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain,
|
||||
gpu_param, missingLeft);
|
||||
gpu_param, 0, ValueConstraint(), missingLeft);
|
||||
// get the score/weight/id/gradSum for left and right child nodes
|
||||
bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan;
|
||||
bst_gpair rGradSum = n.sum_gradients - lGradSum;
|
||||
|
||||
@ -16,7 +16,8 @@
|
||||
|
||||
#else
|
||||
__device__ __forceinline__ double atomicAdd(double* address, double val) {
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*)address; // NOLINT
|
||||
unsigned long long int* address_as_ull =
|
||||
(unsigned long long int*)address; // NOLINT
|
||||
unsigned long long int old = *address_as_ull, assumed; // NOLINT
|
||||
|
||||
do {
|
||||
@ -240,23 +241,23 @@ __device__ inline float device_calc_loss_chg(const GPUTrainingParam& param,
|
||||
}
|
||||
|
||||
template <typename gpair_t>
|
||||
__device__ float inline loss_chg_missing(const gpair_t& scan,
|
||||
const gpair_t& missing,
|
||||
const gpair_t& parent_sum,
|
||||
const float& parent_gain,
|
||||
const GPUTrainingParam& param,
|
||||
__device__ float inline loss_chg_missing(
|
||||
const gpair_t& scan, const gpair_t& missing, const gpair_t& parent_sum,
|
||||
const float& parent_gain, const GPUTrainingParam& param, int constraint,
|
||||
const ValueConstraint& value_constraint,
|
||||
bool& missing_left_out) { // NOLINT
|
||||
float missing_left_loss =
|
||||
device_calc_loss_chg(param, scan + missing, parent_sum, parent_gain);
|
||||
float missing_right_loss =
|
||||
device_calc_loss_chg(param, scan, parent_sum, parent_gain);
|
||||
float missing_left_gain = value_constraint.CalcSplitGain(
|
||||
param, constraint, GradStats(scan + missing),
|
||||
GradStats(parent_sum - (scan + missing)));
|
||||
float missing_right_gain = value_constraint.CalcSplitGain(
|
||||
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
|
||||
|
||||
if (missing_left_loss >= missing_right_loss) {
|
||||
if (missing_left_gain >= missing_right_gain) {
|
||||
missing_left_out = true;
|
||||
return missing_left_loss;
|
||||
return missing_left_gain - parent_gain;
|
||||
} else {
|
||||
missing_left_out = false;
|
||||
return missing_right_loss;
|
||||
return missing_right_gain - parent_gain;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -56,7 +56,8 @@ __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist,
|
||||
DeviceSplitCandidate* best_split,
|
||||
const DeviceNodeStats& node,
|
||||
const GPUTrainingParam& param,
|
||||
temp_storage_t* temp_storage) {
|
||||
temp_storage_t* temp_storage, int constraint,
|
||||
const ValueConstraint& value_constraint) {
|
||||
int gidx_begin = feature_segments[fidx];
|
||||
int gidx_end = feature_segments[fidx + 1];
|
||||
|
||||
@ -82,7 +83,7 @@ __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist,
|
||||
float gain = null_gain;
|
||||
if (thread_active) {
|
||||
gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param,
|
||||
missing_left);
|
||||
constraint, value_constraint, missing_left);
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
@ -120,7 +121,8 @@ __global__ void evaluate_split_kernel(
|
||||
const gpair_sum_t* d_hist, int nidx, uint64_t n_features,
|
||||
DeviceNodeStats nodes, const int* d_feature_segments,
|
||||
const float* d_fidx_min_map, const float* d_gidx_fvalue_map,
|
||||
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split) {
|
||||
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split,
|
||||
ValueConstraint value_constraint, int* d_monotonic_constraints) {
|
||||
typedef cub::KeyValuePair<int, float> ArgMaxT;
|
||||
typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
|
||||
BlockScanT;
|
||||
@ -145,9 +147,11 @@ __global__ void evaluate_split_kernel(
|
||||
__syncthreads();
|
||||
|
||||
auto fidx = blockIdx.x;
|
||||
auto constraint = d_monotonic_constraints[fidx];
|
||||
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
|
||||
fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
|
||||
&best_split, nodes, gpu_param, &temp_storage);
|
||||
&best_split, nodes, gpu_param, &temp_storage, constraint,
|
||||
value_constraint);
|
||||
|
||||
__syncthreads();
|
||||
|
||||
@ -230,6 +234,7 @@ struct DeviceShard {
|
||||
dh::dvec<int> feature_segments;
|
||||
dh::dvec<float> gidx_fvalue_map;
|
||||
dh::dvec<float> min_fvalue;
|
||||
dh::dvec<int> monotone_constraints;
|
||||
std::vector<bst_gpair> node_sum_gradients;
|
||||
common::CompressedIterator<uint32_t> gidx;
|
||||
int row_stride;
|
||||
@ -287,10 +292,12 @@ struct DeviceShard {
|
||||
ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes,
|
||||
&gpair, n_rows, &ridx, n_rows, &position, n_rows,
|
||||
&feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map,
|
||||
gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size());
|
||||
gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size(),
|
||||
&monotone_constraints, param.monotone_constraints.size());
|
||||
gidx_fvalue_map = gmat.cut->cut;
|
||||
min_fvalue = gmat.cut->min_val;
|
||||
feature_segments = gmat.cut->row_ptr;
|
||||
monotone_constraints = param.monotone_constraints;
|
||||
|
||||
node_sum_gradients.resize(max_nodes);
|
||||
ridx_segments.resize(max_nodes);
|
||||
@ -500,6 +507,7 @@ class GPUHistMaker : public TreeUpdater {
|
||||
// rescale learning rate according to size of trees
|
||||
float lr = param.learning_rate;
|
||||
param.learning_rate = lr / trees.size();
|
||||
ValueConstraint::Init(¶m, dmat->info().num_col);
|
||||
// build tree
|
||||
try {
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
@ -651,7 +659,8 @@ class GPUHistMaker : public TreeUpdater {
|
||||
shard->hist.GetHistPtr(nidx), nidx, info->num_col, node,
|
||||
shard->feature_segments.data(), shard->min_fvalue.data(),
|
||||
shard->gidx_fvalue_map.data(), GPUTrainingParam(param),
|
||||
d_split + i * columns);
|
||||
d_split + i * columns, node_value_constraints_[nidx],
|
||||
shard->monotone_constraints.data());
|
||||
}
|
||||
|
||||
dh::safe_cuda(
|
||||
@ -707,6 +716,9 @@ class GPUHistMaker : public TreeUpdater {
|
||||
shard->node_sum_gradients[root_nidx] = sum_gradient;
|
||||
}
|
||||
|
||||
// Initialise root constraint
|
||||
node_value_constraints_.resize(p_tree->GetNodes().size());
|
||||
|
||||
// Generate first split
|
||||
auto splits = this->EvaluateSplits({root_nidx}, p_tree);
|
||||
qexpand_->push(
|
||||
@ -752,14 +764,27 @@ class GPUHistMaker : public TreeUpdater {
|
||||
candidate.split.dir == LeftDir);
|
||||
tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg;
|
||||
|
||||
// Set up child constraints
|
||||
node_value_constraints_.resize(tree.GetNodes().size());
|
||||
GradStats left_stats(param);
|
||||
left_stats.Add(candidate.split.left_sum);
|
||||
GradStats right_stats(param);
|
||||
right_stats.Add(candidate.split.right_sum);
|
||||
node_value_constraints_[candidate.nid].SetChild(
|
||||
param, parent.split_index(), left_stats, right_stats,
|
||||
&node_value_constraints_[parent.cleft()],
|
||||
&node_value_constraints_[parent.cright()]);
|
||||
|
||||
// Configure left child
|
||||
auto left_weight = CalcWeight(param, candidate.split.left_sum);
|
||||
auto left_weight =
|
||||
node_value_constraints_[parent.cleft()].CalcWeight(param, left_stats);
|
||||
tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0);
|
||||
tree.stat(parent.cleft()).base_weight = left_weight;
|
||||
tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess();
|
||||
|
||||
// Configure right child
|
||||
auto right_weight = CalcWeight(param, candidate.split.right_sum);
|
||||
auto right_weight =
|
||||
node_value_constraints_[parent.cright()].CalcWeight(param, right_stats);
|
||||
tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0);
|
||||
tree.stat(parent.cright()).base_weight = right_weight;
|
||||
tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess();
|
||||
@ -889,10 +914,10 @@ class GPUHistMaker : public TreeUpdater {
|
||||
std::unique_ptr<ExpandQueue> qexpand_;
|
||||
common::Monitor monitor;
|
||||
dh::AllReducer reducer;
|
||||
std::vector<ValueConstraint> node_value_constraints_;
|
||||
};
|
||||
|
||||
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker,
|
||||
"grow_gpu_hist")
|
||||
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
|
||||
.describe("Grow tree with GPU.")
|
||||
.set_body([]() { return new GPUHistMaker(); });
|
||||
} // namespace tree
|
||||
|
||||
@ -109,10 +109,14 @@ def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param,
|
||||
param_tmp[name] = set[i]
|
||||
|
||||
print(param_tmp, file=sys.stderr)
|
||||
assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
|
||||
|
||||
@attr('gpu')
|
||||
@ -122,5 +126,6 @@ class TestGPU(unittest.TestCase):
|
||||
assert_updater_accuracy('gpu_exact', 'exact', variable_param, 0.02)
|
||||
|
||||
def test_gpu_hist(self):
|
||||
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024]}
|
||||
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024],
|
||||
'grow_policy': ['depthwise', 'lossguide']}
|
||||
assert_updater_accuracy('gpu_hist', 'hist', variable_param, 0.01)
|
||||
|
||||
44
tests/python-gpu/test_monotonic_constraints.py
Normal file
44
tests/python-gpu/test_monotonic_constraints.py
Normal file
@ -0,0 +1,44 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import unittest
|
||||
import xgboost as xgb
|
||||
from nose.plugins.attrib import attr
|
||||
from sklearn.datasets import make_regression
|
||||
|
||||
rng = np.random.RandomState(1994)
|
||||
|
||||
|
||||
def non_decreasing(L):
|
||||
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
|
||||
|
||||
|
||||
def non_increasing(L):
|
||||
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
|
||||
|
||||
|
||||
def assert_constraint(constraint, tree_method):
|
||||
n = 1000
|
||||
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
|
||||
dtrain = xgb.DMatrix(X, y)
|
||||
param = {}
|
||||
param['tree_method'] = tree_method
|
||||
param['monotone_constraints'] = "(" + str(constraint) + ")"
|
||||
bst = xgb.train(param, dtrain)
|
||||
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
|
||||
pred = bst.predict(dpredict)
|
||||
if constraint > 0:
|
||||
assert non_decreasing(pred)
|
||||
elif constraint < 0:
|
||||
assert non_increasing(pred)
|
||||
|
||||
|
||||
@attr('gpu')
|
||||
class TestMonotonicConstraints(unittest.TestCase):
|
||||
def test_exact(self):
|
||||
assert_constraint(1, 'exact')
|
||||
assert_constraint(-1, 'exact')
|
||||
|
||||
def test_gpu_hist(self):
|
||||
assert_constraint(1, 'gpu_hist')
|
||||
assert_constraint(-1, 'gpu_hist')
|
||||
Loading…
x
Reference in New Issue
Block a user