Monotone constraints for gpu_hist (#2904)

This commit is contained in:
Rory Mitchell 2017-11-30 10:26:19 +13:00 committed by GitHub
parent 5867c1b96d
commit c51adb49b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 171 additions and 59 deletions

View File

@ -46,6 +46,8 @@ Specify the 'tree_method' parameter as one of the following algorithms.
+--------------------+------------+-----------+ +--------------------+------------+-----------+
| grow_policy | |cross| | |tick| | | grow_policy | |cross| | |tick| |
+--------------------+------------+-----------+ +--------------------+------------+-----------+
| monotone_constraints | |cross| | |tick| |
+--------------------+------------+-----------+
``` ```

View File

@ -311,6 +311,10 @@ struct XGBOOST_ALIGNAS(16) GradStats {
static const int kSimpleStats = 1; static const int kSimpleStats = 1;
/*! \brief constructor, the object must be cleared during construction */ /*! \brief constructor, the object must be cleared during construction */
explicit GradStats(const TrainParam& param) { this->Clear(); } explicit GradStats(const TrainParam& param) { this->Clear(); }
template <typename gpair_t>
XGBOOST_DEVICE explicit GradStats(const gpair_t &sum)
: sum_grad(sum.GetGrad()), sum_hess(sum.GetHess()) {}
/*! \brief clear the statistics */ /*! \brief clear the statistics */
inline void Clear() { sum_grad = sum_hess = 0.0f; } inline void Clear() { sum_grad = sum_hess = 0.0f; }
/*! \brief check if necessary information is ready */ /*! \brief check if necessary information is ready */
@ -332,11 +336,13 @@ struct XGBOOST_ALIGNAS(16) GradStats {
this->Add(b.GetGrad(), b.GetHess()); this->Add(b.GetGrad(), b.GetHess());
} }
/*! \brief calculate leaf weight */ /*! \brief calculate leaf weight */
inline double CalcWeight(const TrainParam& param) const { template <typename param_t>
inline double CalcWeight(const param_t& param) const {
return xgboost::tree::CalcWeight(param, sum_grad, sum_hess); return xgboost::tree::CalcWeight(param, sum_grad, sum_hess);
} }
/*! \brief calculate gain of the solution */ /*! \brief calculate gain of the solution */
inline double CalcGain(const TrainParam& param) const { template <typename param_t>
inline double CalcGain(const param_t& param) const {
return xgboost::tree::CalcGain(param, sum_grad, sum_hess); return xgboost::tree::CalcGain(param, sum_grad, sum_hess);
} }
/*! \brief add statistics to the data */ /*! \brief add statistics to the data */
@ -367,7 +373,9 @@ struct XGBOOST_ALIGNAS(16) GradStats {
}; };
struct NoConstraint { struct NoConstraint {
inline static void Init(TrainParam *param, unsigned num_feature) {} inline static void Init(TrainParam *param, unsigned num_feature) {
param->monotone_constraints.resize(num_feature, 0);
}
inline double CalcSplitGain(const TrainParam &param, bst_uint split_index, inline double CalcSplitGain(const TrainParam &param, bst_uint split_index,
GradStats left, GradStats right) const { GradStats left, GradStats right) const {
return left.CalcGain(param) + right.CalcGain(param); return left.CalcGain(param) + right.CalcGain(param);
@ -386,13 +394,14 @@ struct NoConstraint {
struct ValueConstraint { struct ValueConstraint {
double lower_bound; double lower_bound;
double upper_bound; double upper_bound;
ValueConstraint() XGBOOST_DEVICE ValueConstraint()
: lower_bound(-std::numeric_limits<double>::max()), : lower_bound(-std::numeric_limits<double>::max()),
upper_bound(std::numeric_limits<double>::max()) {} upper_bound(std::numeric_limits<double>::max()) {}
inline static void Init(TrainParam *param, unsigned num_feature) { inline static void Init(TrainParam *param, unsigned num_feature) {
param->monotone_constraints.resize(num_feature, 1); param->monotone_constraints.resize(num_feature, 0);
} }
inline double CalcWeight(const TrainParam &param, GradStats stats) const { template <typename param_t>
XGBOOST_DEVICE inline double CalcWeight(const param_t &param, GradStats stats) const {
double w = stats.CalcWeight(param); double w = stats.CalcWeight(param);
if (w < lower_bound) { if (w < lower_bound) {
return lower_bound; return lower_bound;
@ -403,22 +412,23 @@ struct ValueConstraint {
return w; return w;
} }
inline double CalcGain(const TrainParam &param, GradStats stats) const { template <typename param_t>
XGBOOST_DEVICE inline double CalcGain(const param_t &param, GradStats stats) const {
return CalcGainGivenWeight(param, stats.sum_grad, stats.sum_hess, return CalcGainGivenWeight(param, stats.sum_grad, stats.sum_hess,
CalcWeight(param, stats)); CalcWeight(param, stats));
} }
inline double CalcSplitGain(const TrainParam &param, bst_uint split_index, template <typename param_t>
XGBOOST_DEVICE inline double CalcSplitGain(const param_t &param, int constraint,
GradStats left, GradStats right) const { GradStats left, GradStats right) const {
double wleft = CalcWeight(param, left); double wleft = CalcWeight(param, left);
double wright = CalcWeight(param, right); double wright = CalcWeight(param, right);
int c = param.monotone_constraints[split_index];
double gain = double gain =
CalcGainGivenWeight(param, left.sum_grad, left.sum_hess, wleft) + CalcGainGivenWeight(param, left.sum_grad, left.sum_hess, wleft) +
CalcGainGivenWeight(param, right.sum_grad, right.sum_hess, wright); CalcGainGivenWeight(param, right.sum_grad, right.sum_hess, wright);
if (c == 0) { if (constraint == 0) {
return gain; return gain;
} else if (c > 0) { } else if (constraint > 0) {
return wleft < wright ? gain : 0.0; return wleft < wright ? gain : 0.0;
} else { } else {
return wleft > wright ? gain : 0.0; return wleft > wright ? gain : 0.0;

View File

@ -319,7 +319,9 @@ class ColMaker: public TreeUpdater {
if (c.sum_hess >= param.min_child_weight && if (c.sum_hess >= param.min_child_weight &&
e.stats.sum_hess >= param.min_child_weight) { e.stats.sum_hess >= param.min_child_weight) {
bst_float loss_chg = static_cast<bst_float>( bst_float loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain);
e.best.Update(loss_chg, fid, fsplit, false); e.best.Update(loss_chg, fid, fsplit, false);
} }
} }
@ -329,7 +331,9 @@ class ColMaker: public TreeUpdater {
if (c.sum_hess >= param.min_child_weight && if (c.sum_hess >= param.min_child_weight &&
tmp.sum_hess >= param.min_child_weight) { tmp.sum_hess >= param.min_child_weight) {
bst_float loss_chg = static_cast<bst_float>( bst_float loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, tmp, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], tmp, c) -
snode[nid].root_gain);
e.best.Update(loss_chg, fid, fsplit, true); e.best.Update(loss_chg, fid, fsplit, true);
} }
} }
@ -341,7 +345,9 @@ class ColMaker: public TreeUpdater {
if (c.sum_hess >= param.min_child_weight && if (c.sum_hess >= param.min_child_weight &&
tmp.sum_hess >= param.min_child_weight) { tmp.sum_hess >= param.min_child_weight) {
bst_float loss_chg = static_cast<bst_float>( bst_float loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, tmp, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], tmp, c) -
snode[nid].root_gain);
e.best.Update(loss_chg, fid, e.last_fvalue + rt_eps, true); e.best.Update(loss_chg, fid, e.last_fvalue + rt_eps, true);
} }
} }
@ -372,9 +378,11 @@ class ColMaker: public TreeUpdater {
if (c.sum_hess >= param.min_child_weight && if (c.sum_hess >= param.min_child_weight &&
e.stats.sum_hess >= param.min_child_weight) { e.stats.sum_hess >= param.min_child_weight) {
bst_float loss_chg = static_cast<bst_float>( bst_float loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain); snode[nid].root_gain);
e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f, false); e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f,
false);
} }
} }
if (need_backward) { if (need_backward) {
@ -383,7 +391,8 @@ class ColMaker: public TreeUpdater {
if (c.sum_hess >= param.min_child_weight && if (c.sum_hess >= param.min_child_weight &&
cright.sum_hess >= param.min_child_weight) { cright.sum_hess >= param.min_child_weight) {
bst_float loss_chg = static_cast<bst_float>( bst_float loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, c, cright) - constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], c, cright) -
snode[nid].root_gain); snode[nid].root_gain);
e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f, true); e.best.Update(loss_chg, fid, (fvalue + e.first_fvalue) * 0.5f, true);
} }
@ -414,12 +423,17 @@ class ColMaker: public TreeUpdater {
bst_float loss_chg; bst_float loss_chg;
if (d_step == -1) { if (d_step == -1) {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], c, e.stats) -
snode[nid].root_gain);
} else { } else {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain);
} }
e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f, d_step == -1); e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f,
d_step == -1);
} }
} }
// update the statistics // update the statistics
@ -492,10 +506,14 @@ class ColMaker: public TreeUpdater {
bst_float loss_chg; bst_float loss_chg;
if (d_step == -1) { if (d_step == -1) {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], c, e.stats) -
snode[nid].root_gain);
} else { } else {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain);
} }
const bst_float gap = std::abs(e.last_fvalue) + rt_eps; const bst_float gap = std::abs(e.last_fvalue) + rt_eps;
const bst_float delta = d_step == +1 ? gap: -gap; const bst_float delta = d_step == +1 ? gap: -gap;
@ -545,11 +563,13 @@ class ColMaker: public TreeUpdater {
bst_float loss_chg; bst_float loss_chg;
if (d_step == -1) { if (d_step == -1) {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], c, e.stats) -
snode[nid].root_gain); snode[nid].root_gain);
} else { } else {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain); snode[nid].root_gain);
} }
e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f, d_step == -1); e.best.Update(loss_chg, fid, (fvalue + e.last_fvalue) * 0.5f, d_step == -1);
@ -565,14 +585,19 @@ class ColMaker: public TreeUpdater {
const int nid = qexpand[i]; const int nid = qexpand[i];
ThreadEntry &e = temp[nid]; ThreadEntry &e = temp[nid];
c.SetSubstract(snode[nid].stats, e.stats); c.SetSubstract(snode[nid].stats, e.stats);
if (e.stats.sum_hess >= param.min_child_weight && c.sum_hess >= param.min_child_weight) { if (e.stats.sum_hess >= param.min_child_weight &&
c.sum_hess >= param.min_child_weight) {
bst_float loss_chg; bst_float loss_chg;
if (d_step == -1) { if (d_step == -1) {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, c, e.stats) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], c, e.stats) -
snode[nid].root_gain);
} else { } else {
loss_chg = static_cast<bst_float>( loss_chg = static_cast<bst_float>(
constraints_[nid].CalcSplitGain(param, fid, e.stats, c) - snode[nid].root_gain); constraints_[nid].CalcSplitGain(
param, param.monotone_constraints[fid], e.stats, c) -
snode[nid].root_gain);
} }
const bst_float gap = std::abs(e.last_fvalue) + rt_eps; const bst_float gap = std::abs(e.last_fvalue) + rt_eps;
const bst_float delta = d_step == +1 ? gap: -gap; const bst_float delta = d_step == +1 ? gap: -gap;

View File

@ -302,7 +302,7 @@ DEV_INLINE void argMaxWithAtomics(
ExactSplitCandidate s; ExactSplitCandidate s;
bst_gpair missing = parentSum - colSum; bst_gpair missing = parentSum - colSum;
s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain, s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain,
param, tmp); param, 0, ValueConstraint(), tmp);
s.index = id; s.index = id;
atomicArgMax(nodeSplits + uid, s); atomicArgMax(nodeSplits + uid, s);
} // end if nodeId != UNUSED_NODE } // end if nodeId != UNUSED_NODE
@ -580,7 +580,7 @@ class GPUMaker : public TreeUpdater {
// get the default direction for the current node // get the default direction for the current node
bst_gpair missing = n.sum_gradients - gradSum; bst_gpair missing = n.sum_gradients - gradSum;
loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain, loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain,
gpu_param, missingLeft); gpu_param, 0, ValueConstraint(), missingLeft);
// get the score/weight/id/gradSum for left and right child nodes // get the score/weight/id/gradSum for left and right child nodes
bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan; bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan;
bst_gpair rGradSum = n.sum_gradients - lGradSum; bst_gpair rGradSum = n.sum_gradients - lGradSum;

View File

@ -16,7 +16,8 @@
#else #else
__device__ __forceinline__ double atomicAdd(double* address, double val) { __device__ __forceinline__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address; // NOLINT unsigned long long int* address_as_ull =
(unsigned long long int*)address; // NOLINT
unsigned long long int old = *address_as_ull, assumed; // NOLINT unsigned long long int old = *address_as_ull, assumed; // NOLINT
do { do {
@ -240,23 +241,23 @@ __device__ inline float device_calc_loss_chg(const GPUTrainingParam& param,
} }
template <typename gpair_t> template <typename gpair_t>
__device__ float inline loss_chg_missing(const gpair_t& scan, __device__ float inline loss_chg_missing(
const gpair_t& missing, const gpair_t& scan, const gpair_t& missing, const gpair_t& parent_sum,
const gpair_t& parent_sum, const float& parent_gain, const GPUTrainingParam& param, int constraint,
const float& parent_gain, const ValueConstraint& value_constraint,
const GPUTrainingParam& param, bool& missing_left_out) { // NOLINT
bool& missing_left_out) { // NOLINT float missing_left_gain = value_constraint.CalcSplitGain(
float missing_left_loss = param, constraint, GradStats(scan + missing),
device_calc_loss_chg(param, scan + missing, parent_sum, parent_gain); GradStats(parent_sum - (scan + missing)));
float missing_right_loss = float missing_right_gain = value_constraint.CalcSplitGain(
device_calc_loss_chg(param, scan, parent_sum, parent_gain); param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_loss >= missing_right_loss) { if (missing_left_gain >= missing_right_gain) {
missing_left_out = true; missing_left_out = true;
return missing_left_loss; return missing_left_gain - parent_gain;
} else { } else {
missing_left_out = false; missing_left_out = false;
return missing_right_loss; return missing_right_gain - parent_gain;
} }
} }

View File

@ -56,7 +56,8 @@ __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist,
DeviceSplitCandidate* best_split, DeviceSplitCandidate* best_split,
const DeviceNodeStats& node, const DeviceNodeStats& node,
const GPUTrainingParam& param, const GPUTrainingParam& param,
temp_storage_t* temp_storage) { temp_storage_t* temp_storage, int constraint,
const ValueConstraint& value_constraint) {
int gidx_begin = feature_segments[fidx]; int gidx_begin = feature_segments[fidx];
int gidx_end = feature_segments[fidx + 1]; int gidx_end = feature_segments[fidx + 1];
@ -82,7 +83,7 @@ __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist,
float gain = null_gain; float gain = null_gain;
if (thread_active) { if (thread_active) {
gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param, gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param,
missing_left); constraint, value_constraint, missing_left);
} }
__syncthreads(); __syncthreads();
@ -120,7 +121,8 @@ __global__ void evaluate_split_kernel(
const gpair_sum_t* d_hist, int nidx, uint64_t n_features, const gpair_sum_t* d_hist, int nidx, uint64_t n_features,
DeviceNodeStats nodes, const int* d_feature_segments, DeviceNodeStats nodes, const int* d_feature_segments,
const float* d_fidx_min_map, const float* d_gidx_fvalue_map, const float* d_fidx_min_map, const float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split) { GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split,
ValueConstraint value_constraint, int* d_monotonic_constraints) {
typedef cub::KeyValuePair<int, float> ArgMaxT; typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT; BlockScanT;
@ -145,9 +147,11 @@ __global__ void evaluate_split_kernel(
__syncthreads(); __syncthreads();
auto fidx = blockIdx.x; auto fidx = blockIdx.x;
auto constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map, fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
&best_split, nodes, gpu_param, &temp_storage); &best_split, nodes, gpu_param, &temp_storage, constraint,
value_constraint);
__syncthreads(); __syncthreads();
@ -230,6 +234,7 @@ struct DeviceShard {
dh::dvec<int> feature_segments; dh::dvec<int> feature_segments;
dh::dvec<float> gidx_fvalue_map; dh::dvec<float> gidx_fvalue_map;
dh::dvec<float> min_fvalue; dh::dvec<float> min_fvalue;
dh::dvec<int> monotone_constraints;
std::vector<bst_gpair> node_sum_gradients; std::vector<bst_gpair> node_sum_gradients;
common::CompressedIterator<uint32_t> gidx; common::CompressedIterator<uint32_t> gidx;
int row_stride; int row_stride;
@ -287,10 +292,12 @@ struct DeviceShard {
ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes, ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes,
&gpair, n_rows, &ridx, n_rows, &position, n_rows, &gpair, n_rows, &ridx, n_rows, &position, n_rows,
&feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map, &feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map,
gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size()); gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
gidx_fvalue_map = gmat.cut->cut; gidx_fvalue_map = gmat.cut->cut;
min_fvalue = gmat.cut->min_val; min_fvalue = gmat.cut->min_val;
feature_segments = gmat.cut->row_ptr; feature_segments = gmat.cut->row_ptr;
monotone_constraints = param.monotone_constraints;
node_sum_gradients.resize(max_nodes); node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes); ridx_segments.resize(max_nodes);
@ -500,6 +507,7 @@ class GPUHistMaker : public TreeUpdater {
// rescale learning rate according to size of trees // rescale learning rate according to size of trees
float lr = param.learning_rate; float lr = param.learning_rate;
param.learning_rate = lr / trees.size(); param.learning_rate = lr / trees.size();
ValueConstraint::Init(&param, dmat->info().num_col);
// build tree // build tree
try { try {
for (size_t i = 0; i < trees.size(); ++i) { for (size_t i = 0; i < trees.size(); ++i) {
@ -651,7 +659,8 @@ class GPUHistMaker : public TreeUpdater {
shard->hist.GetHistPtr(nidx), nidx, info->num_col, node, shard->hist.GetHistPtr(nidx), nidx, info->num_col, node,
shard->feature_segments.data(), shard->min_fvalue.data(), shard->feature_segments.data(), shard->min_fvalue.data(),
shard->gidx_fvalue_map.data(), GPUTrainingParam(param), shard->gidx_fvalue_map.data(), GPUTrainingParam(param),
d_split + i * columns); d_split + i * columns, node_value_constraints_[nidx],
shard->monotone_constraints.data());
} }
dh::safe_cuda( dh::safe_cuda(
@ -707,6 +716,9 @@ class GPUHistMaker : public TreeUpdater {
shard->node_sum_gradients[root_nidx] = sum_gradient; shard->node_sum_gradients[root_nidx] = sum_gradient;
} }
// Initialise root constraint
node_value_constraints_.resize(p_tree->GetNodes().size());
// Generate first split // Generate first split
auto splits = this->EvaluateSplits({root_nidx}, p_tree); auto splits = this->EvaluateSplits({root_nidx}, p_tree);
qexpand_->push( qexpand_->push(
@ -752,14 +764,27 @@ class GPUHistMaker : public TreeUpdater {
candidate.split.dir == LeftDir); candidate.split.dir == LeftDir);
tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg; tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg;
// Set up child constraints
node_value_constraints_.resize(tree.GetNodes().size());
GradStats left_stats(param);
left_stats.Add(candidate.split.left_sum);
GradStats right_stats(param);
right_stats.Add(candidate.split.right_sum);
node_value_constraints_[candidate.nid].SetChild(
param, parent.split_index(), left_stats, right_stats,
&node_value_constraints_[parent.cleft()],
&node_value_constraints_[parent.cright()]);
// Configure left child // Configure left child
auto left_weight = CalcWeight(param, candidate.split.left_sum); auto left_weight =
node_value_constraints_[parent.cleft()].CalcWeight(param, left_stats);
tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0); tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0);
tree.stat(parent.cleft()).base_weight = left_weight; tree.stat(parent.cleft()).base_weight = left_weight;
tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess(); tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess();
// Configure right child // Configure right child
auto right_weight = CalcWeight(param, candidate.split.right_sum); auto right_weight =
node_value_constraints_[parent.cright()].CalcWeight(param, right_stats);
tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0); tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0);
tree.stat(parent.cright()).base_weight = right_weight; tree.stat(parent.cright()).base_weight = right_weight;
tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess(); tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess();
@ -889,10 +914,10 @@ class GPUHistMaker : public TreeUpdater {
std::unique_ptr<ExpandQueue> qexpand_; std::unique_ptr<ExpandQueue> qexpand_;
common::Monitor monitor; common::Monitor monitor;
dh::AllReducer reducer; dh::AllReducer reducer;
std::vector<ValueConstraint> node_value_constraints_;
}; };
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
"grow_gpu_hist")
.describe("Grow tree with GPU.") .describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); }); .set_body([]() { return new GPUHistMaker(); });
} // namespace tree } // namespace tree

View File

@ -97,7 +97,7 @@ def train_sparse(param_in, comparison_tree_method):
# Enumerates all permutations of variable parameters # Enumerates all permutations of variable parameters
def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param, tolerance): def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param, tolerance):
param = {'tree_method': tree_method } param = {'tree_method': tree_method}
names = sorted(variable_param) names = sorted(variable_param)
combinations = it.product(*(variable_param[Name] for Name in names)) combinations = it.product(*(variable_param[Name] for Name in names))
@ -109,10 +109,14 @@ def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param,
param_tmp[name] = set[i] param_tmp[name] = set[i]
print(param_tmp, file=sys.stderr) print(param_tmp, file=sys.stderr)
assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp) assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp) param_tmp)
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp) assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp) param_tmp)
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
param_tmp)
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
param_tmp)
@attr('gpu') @attr('gpu')
@ -122,5 +126,6 @@ class TestGPU(unittest.TestCase):
assert_updater_accuracy('gpu_exact', 'exact', variable_param, 0.02) assert_updater_accuracy('gpu_exact', 'exact', variable_param, 0.02)
def test_gpu_hist(self): def test_gpu_hist(self):
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024]} variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024],
'grow_policy': ['depthwise', 'lossguide']}
assert_updater_accuracy('gpu_hist', 'hist', variable_param, 0.01) assert_updater_accuracy('gpu_hist', 'hist', variable_param, 0.01)

View File

@ -0,0 +1,44 @@
from __future__ import print_function
import numpy as np
import unittest
import xgboost as xgb
from nose.plugins.attrib import attr
from sklearn.datasets import make_regression
rng = np.random.RandomState(1994)
def non_decreasing(L):
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
def non_increasing(L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
def assert_constraint(constraint, tree_method):
n = 1000
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
dtrain = xgb.DMatrix(X, y)
param = {}
param['tree_method'] = tree_method
param['monotone_constraints'] = "(" + str(constraint) + ")"
bst = xgb.train(param, dtrain)
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
pred = bst.predict(dpredict)
if constraint > 0:
assert non_decreasing(pred)
elif constraint < 0:
assert non_increasing(pred)
@attr('gpu')
class TestMonotonicConstraints(unittest.TestCase):
def test_exact(self):
assert_constraint(1, 'exact')
assert_constraint(-1, 'exact')
def test_gpu_hist(self):
assert_constraint(1, 'gpu_hist')
assert_constraint(-1, 'gpu_hist')