[UPDATE] Update rabit and threadlocal (#2114)
* [UPDATE] Update rabit and threadlocal * minor fix to make build system happy * upgrade requirement to g++4.8 * upgrade dmlc-core * update travis
This commit is contained in:
parent
b0c972aa4d
commit
d581a3d0e7
@ -41,12 +41,16 @@ matrix:
|
||||
# dependent apt packages
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
packages:
|
||||
- doxygen
|
||||
- wget
|
||||
- libcurl4-openssl-dev
|
||||
- unzip
|
||||
- graphviz
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
|
||||
2
Makefile
2
Makefile
@ -48,7 +48,7 @@ export CXX = $(if $(shell which g++-6),g++-6,$(if $(shell which g++-mp-6),g++-mp
|
||||
endif
|
||||
|
||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
|
||||
export CFLAGS= -std=c++0x -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
||||
export CFLAGS= -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include
|
||||
#java include path
|
||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||
|
||||
4
NEWS.md
4
NEWS.md
@ -8,6 +8,10 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
- Specialized some prediction routine
|
||||
* Automatically remove nan from input data when it is sparse.
|
||||
- This can solve some of user reported problem of istart != hist.size
|
||||
* Minor fixes
|
||||
- Thread local variable is upgraded so it is automatically freed at thread exit.
|
||||
* Migrate to C++11
|
||||
- The current master version now requires C++11 enabled compiled(g++4.8 or higher)
|
||||
|
||||
## v0.6 (2016.07.29)
|
||||
* Version 0.5 is skipped due to major improvements in the core
|
||||
|
||||
@ -1 +1 @@
|
||||
Subproject commit 78b78be34ac27d30f2193f3d51848c62887669c4
|
||||
Subproject commit 2b75a0ce6f191ad0fcb5319039b41e990968542a
|
||||
@ -42,7 +42,7 @@ Our goal is to build the shared library:
|
||||
|
||||
The minimal building requirement is
|
||||
|
||||
- A recent c++ compiler supporting C++ 11 (g++-4.6 or higher)
|
||||
- A recent c++ compiler supporting C++ 11 (g++-4.8 or higher)
|
||||
|
||||
We can edit `make/config.mk` to change the compile options, and then build by
|
||||
`make`. If everything goes well, we can go to the specific language installation section.
|
||||
@ -222,7 +222,7 @@ first follow [Building on OSX](#building-on-osx) to get the OpenMP enabled compi
|
||||
|
||||
### Installing the development version
|
||||
|
||||
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.6 or higher).
|
||||
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher).
|
||||
On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation.
|
||||
And see the previous subsection for an OSX tip.
|
||||
|
||||
|
||||
2
rabit
2
rabit
@ -1 +1 @@
|
||||
Subproject commit a9a2a69dc1144180a43f7d2d1097264482be7817
|
||||
Subproject commit a764d45cfb438cc9f15cf47ce586c02ff2c65d0f
|
||||
@ -385,7 +385,7 @@ XGB_DLL int XGDMatrixSliceDMatrix(DMatrixHandle handle,
|
||||
src.CopyFrom(static_cast<std::shared_ptr<DMatrix>*>(handle)->get());
|
||||
data::SimpleCSRSource& ret = *source;
|
||||
|
||||
CHECK_EQ(src.info.group_ptr.size(), 0)
|
||||
CHECK_EQ(src.info.group_ptr.size(), 0U)
|
||||
<< "slice does not support group structure";
|
||||
|
||||
ret.Clear();
|
||||
|
||||
@ -128,7 +128,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat) {
|
||||
}
|
||||
index.resize(row_ptr.back());
|
||||
|
||||
CHECK_GT(cut->cut.size(), 0);
|
||||
CHECK_GT(cut->cut.size(), 0U);
|
||||
CHECK_EQ(cut->row_ptr.back(), cut->cut.size());
|
||||
|
||||
omp_ulong bsize = static_cast<omp_ulong>(batch.size);
|
||||
|
||||
@ -50,7 +50,7 @@ class RowSetCollection {
|
||||
}
|
||||
// initialize node id 0->everything
|
||||
inline void Init() {
|
||||
CHECK_EQ(elem_of_each_node_.size(), 0);
|
||||
CHECK_EQ(elem_of_each_node_.size(), 0U);
|
||||
const bst_uint* begin = dmlc::BeginPtr(row_indices_);
|
||||
const bst_uint* end = dmlc::BeginPtr(row_indices_) + row_indices_.size();
|
||||
elem_of_each_node_.emplace_back(Elem(begin, end));
|
||||
|
||||
@ -207,14 +207,14 @@ class SparsePage::Writer {
|
||||
* writing is done by another thread inside writer.
|
||||
* \param page The page to be written
|
||||
*/
|
||||
void PushWrite(std::unique_ptr<SparsePage>&& page);
|
||||
void PushWrite(std::shared_ptr<SparsePage>&& page);
|
||||
/*!
|
||||
* \brief Allocate a page to store results.
|
||||
* This function can block when the writer is too slow and buffer pages
|
||||
* have not yet been recycled.
|
||||
* \param out_page Used to store the allocated pages.
|
||||
*/
|
||||
void Alloc(std::unique_ptr<SparsePage>* out_page);
|
||||
void Alloc(std::shared_ptr<SparsePage>* out_page);
|
||||
|
||||
private:
|
||||
/*! \brief number of allocated pages */
|
||||
@ -224,9 +224,9 @@ class SparsePage::Writer {
|
||||
/*! \brief writer threads */
|
||||
std::vector<std::unique_ptr<std::thread> > workers_;
|
||||
/*! \brief recycler queue */
|
||||
dmlc::ConcurrentBlockingQueue<std::unique_ptr<SparsePage> > qrecycle_;
|
||||
dmlc::ConcurrentBlockingQueue<std::shared_ptr<SparsePage> > qrecycle_;
|
||||
/*! \brief worker threads */
|
||||
std::vector<dmlc::ConcurrentBlockingQueue<std::unique_ptr<SparsePage> > > qworkers_;
|
||||
std::vector<dmlc::ConcurrentBlockingQueue<std::shared_ptr<SparsePage> > > qworkers_;
|
||||
};
|
||||
#endif // DMLC_ENABLE_STD_THREAD
|
||||
|
||||
|
||||
@ -254,7 +254,7 @@ void SparsePageDMatrix::InitColAccess(const std::vector<bool>& enabled,
|
||||
|
||||
{
|
||||
SparsePage::Writer writer(name_shards, format_shards, 6);
|
||||
std::unique_ptr<SparsePage> page;
|
||||
std::shared_ptr<SparsePage> page;
|
||||
writer.Alloc(&page); page->Clear();
|
||||
|
||||
double tstart = dmlc::GetTime();
|
||||
|
||||
@ -16,7 +16,7 @@ class SparsePageRawFormat : public SparsePage::Format {
|
||||
public:
|
||||
bool Read(SparsePage* page, dmlc::SeekStream* fi) override {
|
||||
if (!fi->Read(&(page->offset))) return false;
|
||||
CHECK_NE(page->offset.size(), 0) << "Invalid SparsePage file";
|
||||
CHECK_NE(page->offset.size(), 0U) << "Invalid SparsePage file";
|
||||
page->data.resize(page->offset.back());
|
||||
if (page->data.size() != 0) {
|
||||
CHECK_EQ(fi->Read(dmlc::BeginPtr(page->data),
|
||||
|
||||
@ -18,7 +18,7 @@ SparsePageSource::SparsePageSource(const std::string& cache_info)
|
||||
: base_rowid_(0), page_(nullptr), clock_ptr_(0) {
|
||||
// read in the info files
|
||||
std::vector<std::string> cache_shards = common::Split(cache_info, ':');
|
||||
CHECK_NE(cache_shards.size(), 0);
|
||||
CHECK_NE(cache_shards.size(), 0U);
|
||||
{
|
||||
std::string name_info = cache_shards[0];
|
||||
std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r"));
|
||||
@ -85,7 +85,7 @@ const RowBatch& SparsePageSource::Value() const {
|
||||
|
||||
bool SparsePageSource::CacheExist(const std::string& cache_info) {
|
||||
std::vector<std::string> cache_shards = common::Split(cache_info, ':');
|
||||
CHECK_NE(cache_shards.size(), 0);
|
||||
CHECK_NE(cache_shards.size(), 0U);
|
||||
{
|
||||
std::string name_info = cache_shards[0];
|
||||
std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r", true));
|
||||
@ -102,7 +102,7 @@ bool SparsePageSource::CacheExist(const std::string& cache_info) {
|
||||
void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
|
||||
const std::string& cache_info) {
|
||||
std::vector<std::string> cache_shards = common::Split(cache_info, ':');
|
||||
CHECK_NE(cache_shards.size(), 0);
|
||||
CHECK_NE(cache_shards.size(), 0U);
|
||||
// read in the info files.
|
||||
std::string name_info = cache_shards[0];
|
||||
std::vector<std::string> name_shards, format_shards;
|
||||
@ -112,7 +112,7 @@ void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
|
||||
}
|
||||
{
|
||||
SparsePage::Writer writer(name_shards, format_shards, 6);
|
||||
std::unique_ptr<SparsePage> page;
|
||||
std::shared_ptr<SparsePage> page;
|
||||
writer.Alloc(&page); page->Clear();
|
||||
|
||||
MetaInfo info;
|
||||
@ -170,7 +170,7 @@ void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
|
||||
void SparsePageSource::Create(DMatrix* src,
|
||||
const std::string& cache_info) {
|
||||
std::vector<std::string> cache_shards = common::Split(cache_info, ':');
|
||||
CHECK_NE(cache_shards.size(), 0);
|
||||
CHECK_NE(cache_shards.size(), 0U);
|
||||
// read in the info files.
|
||||
std::string name_info = cache_shards[0];
|
||||
std::vector<std::string> name_shards, format_shards;
|
||||
@ -180,7 +180,7 @@ void SparsePageSource::Create(DMatrix* src,
|
||||
}
|
||||
{
|
||||
SparsePage::Writer writer(name_shards, format_shards, 6);
|
||||
std::unique_ptr<SparsePage> page;
|
||||
std::shared_ptr<SparsePage> page;
|
||||
writer.Alloc(&page); page->Clear();
|
||||
|
||||
MetaInfo info = src->info();
|
||||
|
||||
@ -32,7 +32,7 @@ SparsePage::Writer::Writer(
|
||||
std::unique_ptr<SparsePage::Format> fmt(
|
||||
SparsePage::Format::Create(format_shard));
|
||||
fo->Write(format_shard);
|
||||
std::unique_ptr<SparsePage> page;
|
||||
std::shared_ptr<SparsePage> page;
|
||||
while (wqueue->Pop(&page)) {
|
||||
if (page.get() == nullptr) break;
|
||||
fmt->Write(*page, fo.get());
|
||||
@ -47,7 +47,7 @@ SparsePage::Writer::Writer(
|
||||
SparsePage::Writer::~Writer() {
|
||||
for (auto& queue : qworkers_) {
|
||||
// use nullptr to signal termination.
|
||||
std::unique_ptr<SparsePage> sig(nullptr);
|
||||
std::shared_ptr<SparsePage> sig(nullptr);
|
||||
queue.Push(std::move(sig));
|
||||
}
|
||||
for (auto& thread : workers_) {
|
||||
@ -55,12 +55,12 @@ SparsePage::Writer::~Writer() {
|
||||
}
|
||||
}
|
||||
|
||||
void SparsePage::Writer::PushWrite(std::unique_ptr<SparsePage>&& page) {
|
||||
void SparsePage::Writer::PushWrite(std::shared_ptr<SparsePage>&& page) {
|
||||
qworkers_[clock_ptr_].Push(std::move(page));
|
||||
clock_ptr_ = (clock_ptr_ + 1) % workers_.size();
|
||||
}
|
||||
|
||||
void SparsePage::Writer::Alloc(std::unique_ptr<SparsePage>* out_page) {
|
||||
void SparsePage::Writer::Alloc(std::shared_ptr<SparsePage>* out_page) {
|
||||
CHECK(out_page->get() == nullptr);
|
||||
if (num_free_buffer_ != 0) {
|
||||
out_page->reset(new SparsePage());
|
||||
|
||||
@ -176,7 +176,7 @@ class GBLinear : public GradientBooster {
|
||||
if (model.weight.size() == 0) {
|
||||
model.InitModel();
|
||||
}
|
||||
CHECK_EQ(ntree_limit, 0)
|
||||
CHECK_EQ(ntree_limit, 0U)
|
||||
<< "GBLinear::Predict ntrees is only valid for gbtree predictor";
|
||||
std::vector<bst_float> &preds = *out_preds;
|
||||
const std::vector<bst_float>& base_margin = p_fmat->info().base_margin;
|
||||
|
||||
@ -246,7 +246,7 @@ class GBTree : public GradientBooster {
|
||||
new_trees.push_back(std::move(ret));
|
||||
} else {
|
||||
const int ngroup = mparam.num_output_group;
|
||||
CHECK_EQ(gpair.size() % ngroup, 0)
|
||||
CHECK_EQ(gpair.size() % ngroup, 0U)
|
||||
<< "must have exactly ngroup*nrow gpairs";
|
||||
std::vector<bst_gpair> tmp(gpair.size() / ngroup);
|
||||
for (int gid = 0; gid < ngroup; ++gid) {
|
||||
|
||||
@ -243,7 +243,7 @@ class LearnerImpl : public Learner {
|
||||
CHECK_NE(header, "bs64")
|
||||
<< "Base64 format is no longer supported in brick.";
|
||||
if (header == "binf") {
|
||||
CHECK_EQ(fp.Read(&header[0], 4), 4);
|
||||
CHECK_EQ(fp.Read(&header[0], 4), 4U);
|
||||
}
|
||||
}
|
||||
// use the peekable reader.
|
||||
|
||||
@ -24,7 +24,7 @@ struct EvalEWiseBase : public Metric {
|
||||
bst_float Eval(const std::vector<bst_float>& preds,
|
||||
const MetaInfo& info,
|
||||
bool distributed) const override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size())
|
||||
<< "label and prediction size not match, "
|
||||
<< "hint: use merror or mlogloss for multi-class classification";
|
||||
|
||||
@ -23,11 +23,11 @@ struct EvalMClassBase : public Metric {
|
||||
bst_float Eval(const std::vector<bst_float> &preds,
|
||||
const MetaInfo &info,
|
||||
bool distributed) const override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK(preds.size() % info.labels.size() == 0)
|
||||
<< "label and prediction size not match";
|
||||
const size_t nclass = preds.size() / info.labels.size();
|
||||
CHECK_GE(nclass, 1)
|
||||
CHECK_GE(nclass, 1U)
|
||||
<< "mlogloss and merror are only used for multi-class classification,"
|
||||
<< " use logloss for binary classification";
|
||||
const bst_omp_uint ndata = static_cast<bst_omp_uint>(info.labels.size());
|
||||
|
||||
@ -84,7 +84,7 @@ struct EvalAuc : public Metric {
|
||||
bst_float Eval(const std::vector<bst_float> &preds,
|
||||
const MetaInfo &info,
|
||||
bool distributed) const override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size())
|
||||
<< "label size predict size not match";
|
||||
std::vector<unsigned> tgptr(2, 0);
|
||||
@ -166,7 +166,7 @@ struct EvalRankList : public Metric {
|
||||
std::vector<unsigned> tgptr(2, 0);
|
||||
tgptr[1] = static_cast<unsigned>(preds.size());
|
||||
const std::vector<unsigned> &gptr = info.group_ptr.size() == 0 ? tgptr : info.group_ptr;
|
||||
CHECK_NE(gptr.size(), 0) << "must specify group when constructing rank file";
|
||||
CHECK_NE(gptr.size(), 0U) << "must specify group when constructing rank file";
|
||||
CHECK_EQ(gptr.back(), preds.size())
|
||||
<< "EvalRanklist: group structure must match number of prediction";
|
||||
const bst_omp_uint ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
|
||||
|
||||
@ -39,7 +39,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
const MetaInfo& info,
|
||||
int iter,
|
||||
std::vector<bst_gpair>* out_gpair) override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK(preds.size() == (static_cast<size_t>(param_.num_class) * info.labels.size()))
|
||||
<< "SoftmaxMultiClassObj: label size and pred size does not match";
|
||||
out_gpair->resize(preds.size());
|
||||
|
||||
@ -86,7 +86,7 @@ class RegLossObj : public ObjFunction {
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
std::vector<bst_gpair> *out_gpair) override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size())
|
||||
<< "labels are not correctly provided"
|
||||
<< "preds.size=" << preds.size() << ", label.size=" << info.labels.size();
|
||||
@ -168,7 +168,7 @@ class PoissonRegression : public ObjFunction {
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
std::vector<bst_gpair> *out_gpair) override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
|
||||
out_gpair->resize(preds.size());
|
||||
// check if label in range
|
||||
@ -229,7 +229,7 @@ class GammaRegression : public ObjFunction {
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
std::vector<bst_gpair> *out_gpair) override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
|
||||
out_gpair->resize(preds.size());
|
||||
// check if label in range
|
||||
@ -294,7 +294,7 @@ class TweedieRegression : public ObjFunction {
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
std::vector<bst_gpair> *out_gpair) override {
|
||||
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty";
|
||||
CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
|
||||
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
|
||||
out_gpair->resize(preds.size());
|
||||
// check if label in range
|
||||
|
||||
@ -204,7 +204,7 @@ struct TrainParam : public dmlc::Parameter<TrainParam> {
|
||||
/*! \brief maximum sketch size */
|
||||
inline unsigned max_sketch_size() const {
|
||||
unsigned ret = static_cast<unsigned>(sketch_ratio / sketch_eps);
|
||||
CHECK_GT(ret, 0);
|
||||
CHECK_GT(ret, 0U);
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
@ -159,7 +159,7 @@ class ColMaker: public TreeUpdater {
|
||||
}
|
||||
unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size());
|
||||
std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom());
|
||||
CHECK_GT(n, 0)
|
||||
CHECK_GT(n, 0U)
|
||||
<< "colsample_bytree=" << param.colsample_bytree
|
||||
<< " is too small that no feature can be included";
|
||||
feat_index.resize(n);
|
||||
@ -628,7 +628,7 @@ class ColMaker: public TreeUpdater {
|
||||
if (param.colsample_bylevel != 1.0f) {
|
||||
std::shuffle(feat_set.begin(), feat_set.end(), common::GlobalRandom());
|
||||
unsigned n = static_cast<unsigned>(param.colsample_bylevel * feat_index.size());
|
||||
CHECK_GT(n, 0)
|
||||
CHECK_GT(n, 0U)
|
||||
<< "colsample_bylevel is too small that no feature can be included";
|
||||
feat_set.resize(n);
|
||||
}
|
||||
@ -784,7 +784,7 @@ class DistColMaker : public ColMaker<TStats, TConstraint> {
|
||||
DMatrix* dmat,
|
||||
const std::vector<RegTree*> &trees) override {
|
||||
TStats::CheckInfo(dmat->info());
|
||||
CHECK_EQ(trees.size(), 1) << "DistColMaker: only support one tree at a time";
|
||||
CHECK_EQ(trees.size(), 1U) << "DistColMaker: only support one tree at a time";
|
||||
// build the tree
|
||||
builder.Update(gpair, dmat, trees[0]);
|
||||
//// prune the tree, note that pruner will sync the tree
|
||||
|
||||
@ -283,7 +283,7 @@ class FastHistMaker: public TreeUpdater {
|
||||
}
|
||||
builder_.Init(this->nthread, nbins);
|
||||
|
||||
CHECK_EQ(info.root_index.size(), 0);
|
||||
CHECK_EQ(info.root_index.size(), 0U);
|
||||
std::vector<bst_uint>& row_indices = row_set_collection_.row_indices_;
|
||||
// mark subsample and build list of member rows
|
||||
if (param.subsample < 1.0f) {
|
||||
@ -313,7 +313,7 @@ class FastHistMaker: public TreeUpdater {
|
||||
}
|
||||
unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size());
|
||||
std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom());
|
||||
CHECK_GT(n, 0)
|
||||
CHECK_GT(n, 0U)
|
||||
<< "colsample_bytree=" << param.colsample_bytree
|
||||
<< " is too small that no feature can be included";
|
||||
feat_index.resize(n);
|
||||
@ -353,7 +353,7 @@ class FastHistMaker: public TreeUpdater {
|
||||
}
|
||||
}
|
||||
}
|
||||
CHECK_GT(min_nbins_per_feature, 0);
|
||||
CHECK_GT(min_nbins_per_feature, 0U);
|
||||
}
|
||||
{
|
||||
snode.reserve(256);
|
||||
|
||||
@ -55,7 +55,7 @@ class HistMaker: public BaseMaker {
|
||||
const MetaInfo &info,
|
||||
const bst_uint ridx) {
|
||||
unsigned i = std::upper_bound(cut, cut + size, fv) - cut;
|
||||
CHECK_NE(size, 0) << "try insert into size=0";
|
||||
CHECK_NE(size, 0U) << "try insert into size=0";
|
||||
CHECK_LT(i, size);
|
||||
data[i].Add(gpair, info, ridx);
|
||||
}
|
||||
@ -664,7 +664,7 @@ class GlobalProposalHistMaker: public CQHistMaker<TStats> {
|
||||
cached_cut_.clear();
|
||||
}
|
||||
if (cached_rptr_.size() == 0) {
|
||||
CHECK_EQ(this->qexpand.size(), 1);
|
||||
CHECK_EQ(this->qexpand.size(), 1U);
|
||||
CQHistMaker<TStats>::ResetPosAndPropose(gpair, p_fmat, fset, tree);
|
||||
cached_rptr_ = this->wspace.rptr;
|
||||
cached_cut_ = this->wspace.cut;
|
||||
|
||||
@ -257,7 +257,7 @@ class SketchMaker: public BaseMaker {
|
||||
}
|
||||
}
|
||||
inline void SyncNodeStats(void) {
|
||||
CHECK_NE(qexpand.size(), 0);
|
||||
CHECK_NE(qexpand.size(), 0U);
|
||||
std::vector<SKStats> tmp(qexpand.size());
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
tmp[i] = node_stats[qexpand[i]];
|
||||
|
||||
@ -21,6 +21,9 @@ if [ ${TRAVIS_OS_NAME} == "osx" ]; then
|
||||
echo 'USE_OPENMP=0' >> config.mk
|
||||
echo 'TMPVAR := $(XGB_PLUGINS)' >> config.mk
|
||||
echo 'XGB_PLUGINS = $(filter-out plugin/lz4/plugin.mk, $(TMPVAR))' >> config.mk
|
||||
else
|
||||
# use g++-4.8 for linux
|
||||
export CXX=g++-4.8
|
||||
fi
|
||||
|
||||
if [ ${TASK} == "python_test" ]; then
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user