[UPDATE] Update rabit and threadlocal (#2114)

* [UPDATE] Update rabit and threadlocal

* minor fix to make build system happy

* upgrade requirement to g++4.8

* upgrade dmlc-core

* update travis
This commit is contained in:
Tianqi Chen 2017-03-16 18:48:37 -07:00 committed by GitHub
parent b0c972aa4d
commit d581a3d0e7
28 changed files with 59 additions and 48 deletions

View File

@ -41,12 +41,16 @@ matrix:
# dependent apt packages # dependent apt packages
addons: addons:
apt: apt:
sources:
- ubuntu-toolchain-r-test
packages: packages:
- doxygen - doxygen
- wget - wget
- libcurl4-openssl-dev - libcurl4-openssl-dev
- unzip - unzip
- graphviz - graphviz
- gcc-4.8
- g++-4.8
before_install: before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh - source dmlc-core/scripts/travis/travis_setup_env.sh

View File

@ -48,7 +48,7 @@ export CXX = $(if $(shell which g++-6),g++-6,$(if $(shell which g++-mp-6),g++-mp
endif endif
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS) export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
export CFLAGS= -std=c++0x -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS) export CFLAGS= -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include
#java include path #java include path
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java

View File

@ -8,6 +8,10 @@ This file records the changes in xgboost library in reverse chronological order.
- Specialized some prediction routine - Specialized some prediction routine
* Automatically remove nan from input data when it is sparse. * Automatically remove nan from input data when it is sparse.
- This can solve some of user reported problem of istart != hist.size - This can solve some of user reported problem of istart != hist.size
* Minor fixes
- Thread local variable is upgraded so it is automatically freed at thread exit.
* Migrate to C++11
- The current master version now requires C++11 enabled compiled(g++4.8 or higher)
## v0.6 (2016.07.29) ## v0.6 (2016.07.29)
* Version 0.5 is skipped due to major improvements in the core * Version 0.5 is skipped due to major improvements in the core

@ -1 +1 @@
Subproject commit 78b78be34ac27d30f2193f3d51848c62887669c4 Subproject commit 2b75a0ce6f191ad0fcb5319039b41e990968542a

View File

@ -42,7 +42,7 @@ Our goal is to build the shared library:
The minimal building requirement is The minimal building requirement is
- A recent c++ compiler supporting C++ 11 (g++-4.6 or higher) - A recent c++ compiler supporting C++ 11 (g++-4.8 or higher)
We can edit `make/config.mk` to change the compile options, and then build by We can edit `make/config.mk` to change the compile options, and then build by
`make`. If everything goes well, we can go to the specific language installation section. `make`. If everything goes well, we can go to the specific language installation section.
@ -222,7 +222,7 @@ first follow [Building on OSX](#building-on-osx) to get the OpenMP enabled compi
### Installing the development version ### Installing the development version
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.6 or higher). Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher).
On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation. On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation.
And see the previous subsection for an OSX tip. And see the previous subsection for an OSX tip.

2
rabit

@ -1 +1 @@
Subproject commit a9a2a69dc1144180a43f7d2d1097264482be7817 Subproject commit a764d45cfb438cc9f15cf47ce586c02ff2c65d0f

View File

@ -385,7 +385,7 @@ XGB_DLL int XGDMatrixSliceDMatrix(DMatrixHandle handle,
src.CopyFrom(static_cast<std::shared_ptr<DMatrix>*>(handle)->get()); src.CopyFrom(static_cast<std::shared_ptr<DMatrix>*>(handle)->get());
data::SimpleCSRSource& ret = *source; data::SimpleCSRSource& ret = *source;
CHECK_EQ(src.info.group_ptr.size(), 0) CHECK_EQ(src.info.group_ptr.size(), 0U)
<< "slice does not support group structure"; << "slice does not support group structure";
ret.Clear(); ret.Clear();

View File

@ -128,7 +128,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat) {
} }
index.resize(row_ptr.back()); index.resize(row_ptr.back());
CHECK_GT(cut->cut.size(), 0); CHECK_GT(cut->cut.size(), 0U);
CHECK_EQ(cut->row_ptr.back(), cut->cut.size()); CHECK_EQ(cut->row_ptr.back(), cut->cut.size());
omp_ulong bsize = static_cast<omp_ulong>(batch.size); omp_ulong bsize = static_cast<omp_ulong>(batch.size);

View File

@ -50,7 +50,7 @@ class RowSetCollection {
} }
// initialize node id 0->everything // initialize node id 0->everything
inline void Init() { inline void Init() {
CHECK_EQ(elem_of_each_node_.size(), 0); CHECK_EQ(elem_of_each_node_.size(), 0U);
const bst_uint* begin = dmlc::BeginPtr(row_indices_); const bst_uint* begin = dmlc::BeginPtr(row_indices_);
const bst_uint* end = dmlc::BeginPtr(row_indices_) + row_indices_.size(); const bst_uint* end = dmlc::BeginPtr(row_indices_) + row_indices_.size();
elem_of_each_node_.emplace_back(Elem(begin, end)); elem_of_each_node_.emplace_back(Elem(begin, end));

View File

@ -207,14 +207,14 @@ class SparsePage::Writer {
* writing is done by another thread inside writer. * writing is done by another thread inside writer.
* \param page The page to be written * \param page The page to be written
*/ */
void PushWrite(std::unique_ptr<SparsePage>&& page); void PushWrite(std::shared_ptr<SparsePage>&& page);
/*! /*!
* \brief Allocate a page to store results. * \brief Allocate a page to store results.
* This function can block when the writer is too slow and buffer pages * This function can block when the writer is too slow and buffer pages
* have not yet been recycled. * have not yet been recycled.
* \param out_page Used to store the allocated pages. * \param out_page Used to store the allocated pages.
*/ */
void Alloc(std::unique_ptr<SparsePage>* out_page); void Alloc(std::shared_ptr<SparsePage>* out_page);
private: private:
/*! \brief number of allocated pages */ /*! \brief number of allocated pages */
@ -224,9 +224,9 @@ class SparsePage::Writer {
/*! \brief writer threads */ /*! \brief writer threads */
std::vector<std::unique_ptr<std::thread> > workers_; std::vector<std::unique_ptr<std::thread> > workers_;
/*! \brief recycler queue */ /*! \brief recycler queue */
dmlc::ConcurrentBlockingQueue<std::unique_ptr<SparsePage> > qrecycle_; dmlc::ConcurrentBlockingQueue<std::shared_ptr<SparsePage> > qrecycle_;
/*! \brief worker threads */ /*! \brief worker threads */
std::vector<dmlc::ConcurrentBlockingQueue<std::unique_ptr<SparsePage> > > qworkers_; std::vector<dmlc::ConcurrentBlockingQueue<std::shared_ptr<SparsePage> > > qworkers_;
}; };
#endif // DMLC_ENABLE_STD_THREAD #endif // DMLC_ENABLE_STD_THREAD

View File

@ -254,7 +254,7 @@ void SparsePageDMatrix::InitColAccess(const std::vector<bool>& enabled,
{ {
SparsePage::Writer writer(name_shards, format_shards, 6); SparsePage::Writer writer(name_shards, format_shards, 6);
std::unique_ptr<SparsePage> page; std::shared_ptr<SparsePage> page;
writer.Alloc(&page); page->Clear(); writer.Alloc(&page); page->Clear();
double tstart = dmlc::GetTime(); double tstart = dmlc::GetTime();

View File

@ -16,7 +16,7 @@ class SparsePageRawFormat : public SparsePage::Format {
public: public:
bool Read(SparsePage* page, dmlc::SeekStream* fi) override { bool Read(SparsePage* page, dmlc::SeekStream* fi) override {
if (!fi->Read(&(page->offset))) return false; if (!fi->Read(&(page->offset))) return false;
CHECK_NE(page->offset.size(), 0) << "Invalid SparsePage file"; CHECK_NE(page->offset.size(), 0U) << "Invalid SparsePage file";
page->data.resize(page->offset.back()); page->data.resize(page->offset.back());
if (page->data.size() != 0) { if (page->data.size() != 0) {
CHECK_EQ(fi->Read(dmlc::BeginPtr(page->data), CHECK_EQ(fi->Read(dmlc::BeginPtr(page->data),

View File

@ -18,7 +18,7 @@ SparsePageSource::SparsePageSource(const std::string& cache_info)
: base_rowid_(0), page_(nullptr), clock_ptr_(0) { : base_rowid_(0), page_(nullptr), clock_ptr_(0) {
// read in the info files // read in the info files
std::vector<std::string> cache_shards = common::Split(cache_info, ':'); std::vector<std::string> cache_shards = common::Split(cache_info, ':');
CHECK_NE(cache_shards.size(), 0); CHECK_NE(cache_shards.size(), 0U);
{ {
std::string name_info = cache_shards[0]; std::string name_info = cache_shards[0];
std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r")); std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r"));
@ -85,7 +85,7 @@ const RowBatch& SparsePageSource::Value() const {
bool SparsePageSource::CacheExist(const std::string& cache_info) { bool SparsePageSource::CacheExist(const std::string& cache_info) {
std::vector<std::string> cache_shards = common::Split(cache_info, ':'); std::vector<std::string> cache_shards = common::Split(cache_info, ':');
CHECK_NE(cache_shards.size(), 0); CHECK_NE(cache_shards.size(), 0U);
{ {
std::string name_info = cache_shards[0]; std::string name_info = cache_shards[0];
std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r", true)); std::unique_ptr<dmlc::Stream> finfo(dmlc::Stream::Create(name_info.c_str(), "r", true));
@ -102,7 +102,7 @@ bool SparsePageSource::CacheExist(const std::string& cache_info) {
void SparsePageSource::Create(dmlc::Parser<uint32_t>* src, void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
const std::string& cache_info) { const std::string& cache_info) {
std::vector<std::string> cache_shards = common::Split(cache_info, ':'); std::vector<std::string> cache_shards = common::Split(cache_info, ':');
CHECK_NE(cache_shards.size(), 0); CHECK_NE(cache_shards.size(), 0U);
// read in the info files. // read in the info files.
std::string name_info = cache_shards[0]; std::string name_info = cache_shards[0];
std::vector<std::string> name_shards, format_shards; std::vector<std::string> name_shards, format_shards;
@ -112,7 +112,7 @@ void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
} }
{ {
SparsePage::Writer writer(name_shards, format_shards, 6); SparsePage::Writer writer(name_shards, format_shards, 6);
std::unique_ptr<SparsePage> page; std::shared_ptr<SparsePage> page;
writer.Alloc(&page); page->Clear(); writer.Alloc(&page); page->Clear();
MetaInfo info; MetaInfo info;
@ -170,7 +170,7 @@ void SparsePageSource::Create(dmlc::Parser<uint32_t>* src,
void SparsePageSource::Create(DMatrix* src, void SparsePageSource::Create(DMatrix* src,
const std::string& cache_info) { const std::string& cache_info) {
std::vector<std::string> cache_shards = common::Split(cache_info, ':'); std::vector<std::string> cache_shards = common::Split(cache_info, ':');
CHECK_NE(cache_shards.size(), 0); CHECK_NE(cache_shards.size(), 0U);
// read in the info files. // read in the info files.
std::string name_info = cache_shards[0]; std::string name_info = cache_shards[0];
std::vector<std::string> name_shards, format_shards; std::vector<std::string> name_shards, format_shards;
@ -180,7 +180,7 @@ void SparsePageSource::Create(DMatrix* src,
} }
{ {
SparsePage::Writer writer(name_shards, format_shards, 6); SparsePage::Writer writer(name_shards, format_shards, 6);
std::unique_ptr<SparsePage> page; std::shared_ptr<SparsePage> page;
writer.Alloc(&page); page->Clear(); writer.Alloc(&page); page->Clear();
MetaInfo info = src->info(); MetaInfo info = src->info();

View File

@ -32,7 +32,7 @@ SparsePage::Writer::Writer(
std::unique_ptr<SparsePage::Format> fmt( std::unique_ptr<SparsePage::Format> fmt(
SparsePage::Format::Create(format_shard)); SparsePage::Format::Create(format_shard));
fo->Write(format_shard); fo->Write(format_shard);
std::unique_ptr<SparsePage> page; std::shared_ptr<SparsePage> page;
while (wqueue->Pop(&page)) { while (wqueue->Pop(&page)) {
if (page.get() == nullptr) break; if (page.get() == nullptr) break;
fmt->Write(*page, fo.get()); fmt->Write(*page, fo.get());
@ -47,7 +47,7 @@ SparsePage::Writer::Writer(
SparsePage::Writer::~Writer() { SparsePage::Writer::~Writer() {
for (auto& queue : qworkers_) { for (auto& queue : qworkers_) {
// use nullptr to signal termination. // use nullptr to signal termination.
std::unique_ptr<SparsePage> sig(nullptr); std::shared_ptr<SparsePage> sig(nullptr);
queue.Push(std::move(sig)); queue.Push(std::move(sig));
} }
for (auto& thread : workers_) { for (auto& thread : workers_) {
@ -55,12 +55,12 @@ SparsePage::Writer::~Writer() {
} }
} }
void SparsePage::Writer::PushWrite(std::unique_ptr<SparsePage>&& page) { void SparsePage::Writer::PushWrite(std::shared_ptr<SparsePage>&& page) {
qworkers_[clock_ptr_].Push(std::move(page)); qworkers_[clock_ptr_].Push(std::move(page));
clock_ptr_ = (clock_ptr_ + 1) % workers_.size(); clock_ptr_ = (clock_ptr_ + 1) % workers_.size();
} }
void SparsePage::Writer::Alloc(std::unique_ptr<SparsePage>* out_page) { void SparsePage::Writer::Alloc(std::shared_ptr<SparsePage>* out_page) {
CHECK(out_page->get() == nullptr); CHECK(out_page->get() == nullptr);
if (num_free_buffer_ != 0) { if (num_free_buffer_ != 0) {
out_page->reset(new SparsePage()); out_page->reset(new SparsePage());

View File

@ -176,7 +176,7 @@ class GBLinear : public GradientBooster {
if (model.weight.size() == 0) { if (model.weight.size() == 0) {
model.InitModel(); model.InitModel();
} }
CHECK_EQ(ntree_limit, 0) CHECK_EQ(ntree_limit, 0U)
<< "GBLinear::Predict ntrees is only valid for gbtree predictor"; << "GBLinear::Predict ntrees is only valid for gbtree predictor";
std::vector<bst_float> &preds = *out_preds; std::vector<bst_float> &preds = *out_preds;
const std::vector<bst_float>& base_margin = p_fmat->info().base_margin; const std::vector<bst_float>& base_margin = p_fmat->info().base_margin;

View File

@ -246,7 +246,7 @@ class GBTree : public GradientBooster {
new_trees.push_back(std::move(ret)); new_trees.push_back(std::move(ret));
} else { } else {
const int ngroup = mparam.num_output_group; const int ngroup = mparam.num_output_group;
CHECK_EQ(gpair.size() % ngroup, 0) CHECK_EQ(gpair.size() % ngroup, 0U)
<< "must have exactly ngroup*nrow gpairs"; << "must have exactly ngroup*nrow gpairs";
std::vector<bst_gpair> tmp(gpair.size() / ngroup); std::vector<bst_gpair> tmp(gpair.size() / ngroup);
for (int gid = 0; gid < ngroup; ++gid) { for (int gid = 0; gid < ngroup; ++gid) {

View File

@ -243,7 +243,7 @@ class LearnerImpl : public Learner {
CHECK_NE(header, "bs64") CHECK_NE(header, "bs64")
<< "Base64 format is no longer supported in brick."; << "Base64 format is no longer supported in brick.";
if (header == "binf") { if (header == "binf") {
CHECK_EQ(fp.Read(&header[0], 4), 4); CHECK_EQ(fp.Read(&header[0], 4), 4U);
} }
} }
// use the peekable reader. // use the peekable reader.

View File

@ -24,7 +24,7 @@ struct EvalEWiseBase : public Metric {
bst_float Eval(const std::vector<bst_float>& preds, bst_float Eval(const std::vector<bst_float>& preds,
const MetaInfo& info, const MetaInfo& info,
bool distributed) const override { bool distributed) const override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) CHECK_EQ(preds.size(), info.labels.size())
<< "label and prediction size not match, " << "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification"; << "hint: use merror or mlogloss for multi-class classification";

View File

@ -23,11 +23,11 @@ struct EvalMClassBase : public Metric {
bst_float Eval(const std::vector<bst_float> &preds, bst_float Eval(const std::vector<bst_float> &preds,
const MetaInfo &info, const MetaInfo &info,
bool distributed) const override { bool distributed) const override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK(preds.size() % info.labels.size() == 0) CHECK(preds.size() % info.labels.size() == 0)
<< "label and prediction size not match"; << "label and prediction size not match";
const size_t nclass = preds.size() / info.labels.size(); const size_t nclass = preds.size() / info.labels.size();
CHECK_GE(nclass, 1) CHECK_GE(nclass, 1U)
<< "mlogloss and merror are only used for multi-class classification," << "mlogloss and merror are only used for multi-class classification,"
<< " use logloss for binary classification"; << " use logloss for binary classification";
const bst_omp_uint ndata = static_cast<bst_omp_uint>(info.labels.size()); const bst_omp_uint ndata = static_cast<bst_omp_uint>(info.labels.size());

View File

@ -84,7 +84,7 @@ struct EvalAuc : public Metric {
bst_float Eval(const std::vector<bst_float> &preds, bst_float Eval(const std::vector<bst_float> &preds,
const MetaInfo &info, const MetaInfo &info,
bool distributed) const override { bool distributed) const override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) CHECK_EQ(preds.size(), info.labels.size())
<< "label size predict size not match"; << "label size predict size not match";
std::vector<unsigned> tgptr(2, 0); std::vector<unsigned> tgptr(2, 0);
@ -166,7 +166,7 @@ struct EvalRankList : public Metric {
std::vector<unsigned> tgptr(2, 0); std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(preds.size()); tgptr[1] = static_cast<unsigned>(preds.size());
const std::vector<unsigned> &gptr = info.group_ptr.size() == 0 ? tgptr : info.group_ptr; const std::vector<unsigned> &gptr = info.group_ptr.size() == 0 ? tgptr : info.group_ptr;
CHECK_NE(gptr.size(), 0) << "must specify group when constructing rank file"; CHECK_NE(gptr.size(), 0U) << "must specify group when constructing rank file";
CHECK_EQ(gptr.back(), preds.size()) CHECK_EQ(gptr.back(), preds.size())
<< "EvalRanklist: group structure must match number of prediction"; << "EvalRanklist: group structure must match number of prediction";
const bst_omp_uint ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); const bst_omp_uint ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);

View File

@ -39,7 +39,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
const MetaInfo& info, const MetaInfo& info,
int iter, int iter,
std::vector<bst_gpair>* out_gpair) override { std::vector<bst_gpair>* out_gpair) override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK(preds.size() == (static_cast<size_t>(param_.num_class) * info.labels.size())) CHECK(preds.size() == (static_cast<size_t>(param_.num_class) * info.labels.size()))
<< "SoftmaxMultiClassObj: label size and pred size does not match"; << "SoftmaxMultiClassObj: label size and pred size does not match";
out_gpair->resize(preds.size()); out_gpair->resize(preds.size());

View File

@ -86,7 +86,7 @@ class RegLossObj : public ObjFunction {
const MetaInfo &info, const MetaInfo &info,
int iter, int iter,
std::vector<bst_gpair> *out_gpair) override { std::vector<bst_gpair> *out_gpair) override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) CHECK_EQ(preds.size(), info.labels.size())
<< "labels are not correctly provided" << "labels are not correctly provided"
<< "preds.size=" << preds.size() << ", label.size=" << info.labels.size(); << "preds.size=" << preds.size() << ", label.size=" << info.labels.size();
@ -168,7 +168,7 @@ class PoissonRegression : public ObjFunction {
const MetaInfo &info, const MetaInfo &info,
int iter, int iter,
std::vector<bst_gpair> *out_gpair) override { std::vector<bst_gpair> *out_gpair) override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
out_gpair->resize(preds.size()); out_gpair->resize(preds.size());
// check if label in range // check if label in range
@ -229,7 +229,7 @@ class GammaRegression : public ObjFunction {
const MetaInfo &info, const MetaInfo &info,
int iter, int iter,
std::vector<bst_gpair> *out_gpair) override { std::vector<bst_gpair> *out_gpair) override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
out_gpair->resize(preds.size()); out_gpair->resize(preds.size());
// check if label in range // check if label in range
@ -294,7 +294,7 @@ class TweedieRegression : public ObjFunction {
const MetaInfo &info, const MetaInfo &info,
int iter, int iter,
std::vector<bst_gpair> *out_gpair) override { std::vector<bst_gpair> *out_gpair) override {
CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided";
out_gpair->resize(preds.size()); out_gpair->resize(preds.size());
// check if label in range // check if label in range

View File

@ -204,7 +204,7 @@ struct TrainParam : public dmlc::Parameter<TrainParam> {
/*! \brief maximum sketch size */ /*! \brief maximum sketch size */
inline unsigned max_sketch_size() const { inline unsigned max_sketch_size() const {
unsigned ret = static_cast<unsigned>(sketch_ratio / sketch_eps); unsigned ret = static_cast<unsigned>(sketch_ratio / sketch_eps);
CHECK_GT(ret, 0); CHECK_GT(ret, 0U);
return ret; return ret;
} }
}; };

View File

@ -159,7 +159,7 @@ class ColMaker: public TreeUpdater {
} }
unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size()); unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size());
std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom()); std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom());
CHECK_GT(n, 0) CHECK_GT(n, 0U)
<< "colsample_bytree=" << param.colsample_bytree << "colsample_bytree=" << param.colsample_bytree
<< " is too small that no feature can be included"; << " is too small that no feature can be included";
feat_index.resize(n); feat_index.resize(n);
@ -628,7 +628,7 @@ class ColMaker: public TreeUpdater {
if (param.colsample_bylevel != 1.0f) { if (param.colsample_bylevel != 1.0f) {
std::shuffle(feat_set.begin(), feat_set.end(), common::GlobalRandom()); std::shuffle(feat_set.begin(), feat_set.end(), common::GlobalRandom());
unsigned n = static_cast<unsigned>(param.colsample_bylevel * feat_index.size()); unsigned n = static_cast<unsigned>(param.colsample_bylevel * feat_index.size());
CHECK_GT(n, 0) CHECK_GT(n, 0U)
<< "colsample_bylevel is too small that no feature can be included"; << "colsample_bylevel is too small that no feature can be included";
feat_set.resize(n); feat_set.resize(n);
} }
@ -784,7 +784,7 @@ class DistColMaker : public ColMaker<TStats, TConstraint> {
DMatrix* dmat, DMatrix* dmat,
const std::vector<RegTree*> &trees) override { const std::vector<RegTree*> &trees) override {
TStats::CheckInfo(dmat->info()); TStats::CheckInfo(dmat->info());
CHECK_EQ(trees.size(), 1) << "DistColMaker: only support one tree at a time"; CHECK_EQ(trees.size(), 1U) << "DistColMaker: only support one tree at a time";
// build the tree // build the tree
builder.Update(gpair, dmat, trees[0]); builder.Update(gpair, dmat, trees[0]);
//// prune the tree, note that pruner will sync the tree //// prune the tree, note that pruner will sync the tree

View File

@ -283,7 +283,7 @@ class FastHistMaker: public TreeUpdater {
} }
builder_.Init(this->nthread, nbins); builder_.Init(this->nthread, nbins);
CHECK_EQ(info.root_index.size(), 0); CHECK_EQ(info.root_index.size(), 0U);
std::vector<bst_uint>& row_indices = row_set_collection_.row_indices_; std::vector<bst_uint>& row_indices = row_set_collection_.row_indices_;
// mark subsample and build list of member rows // mark subsample and build list of member rows
if (param.subsample < 1.0f) { if (param.subsample < 1.0f) {
@ -313,7 +313,7 @@ class FastHistMaker: public TreeUpdater {
} }
unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size()); unsigned n = static_cast<unsigned>(param.colsample_bytree * feat_index.size());
std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom()); std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom());
CHECK_GT(n, 0) CHECK_GT(n, 0U)
<< "colsample_bytree=" << param.colsample_bytree << "colsample_bytree=" << param.colsample_bytree
<< " is too small that no feature can be included"; << " is too small that no feature can be included";
feat_index.resize(n); feat_index.resize(n);
@ -353,7 +353,7 @@ class FastHistMaker: public TreeUpdater {
} }
} }
} }
CHECK_GT(min_nbins_per_feature, 0); CHECK_GT(min_nbins_per_feature, 0U);
} }
{ {
snode.reserve(256); snode.reserve(256);

View File

@ -55,7 +55,7 @@ class HistMaker: public BaseMaker {
const MetaInfo &info, const MetaInfo &info,
const bst_uint ridx) { const bst_uint ridx) {
unsigned i = std::upper_bound(cut, cut + size, fv) - cut; unsigned i = std::upper_bound(cut, cut + size, fv) - cut;
CHECK_NE(size, 0) << "try insert into size=0"; CHECK_NE(size, 0U) << "try insert into size=0";
CHECK_LT(i, size); CHECK_LT(i, size);
data[i].Add(gpair, info, ridx); data[i].Add(gpair, info, ridx);
} }
@ -664,7 +664,7 @@ class GlobalProposalHistMaker: public CQHistMaker<TStats> {
cached_cut_.clear(); cached_cut_.clear();
} }
if (cached_rptr_.size() == 0) { if (cached_rptr_.size() == 0) {
CHECK_EQ(this->qexpand.size(), 1); CHECK_EQ(this->qexpand.size(), 1U);
CQHistMaker<TStats>::ResetPosAndPropose(gpair, p_fmat, fset, tree); CQHistMaker<TStats>::ResetPosAndPropose(gpair, p_fmat, fset, tree);
cached_rptr_ = this->wspace.rptr; cached_rptr_ = this->wspace.rptr;
cached_cut_ = this->wspace.cut; cached_cut_ = this->wspace.cut;

View File

@ -257,7 +257,7 @@ class SketchMaker: public BaseMaker {
} }
} }
inline void SyncNodeStats(void) { inline void SyncNodeStats(void) {
CHECK_NE(qexpand.size(), 0); CHECK_NE(qexpand.size(), 0U);
std::vector<SKStats> tmp(qexpand.size()); std::vector<SKStats> tmp(qexpand.size());
for (size_t i = 0; i < qexpand.size(); ++i) { for (size_t i = 0; i < qexpand.size(); ++i) {
tmp[i] = node_stats[qexpand[i]]; tmp[i] = node_stats[qexpand[i]];

View File

@ -21,6 +21,9 @@ if [ ${TRAVIS_OS_NAME} == "osx" ]; then
echo 'USE_OPENMP=0' >> config.mk echo 'USE_OPENMP=0' >> config.mk
echo 'TMPVAR := $(XGB_PLUGINS)' >> config.mk echo 'TMPVAR := $(XGB_PLUGINS)' >> config.mk
echo 'XGB_PLUGINS = $(filter-out plugin/lz4/plugin.mk, $(TMPVAR))' >> config.mk echo 'XGB_PLUGINS = $(filter-out plugin/lz4/plugin.mk, $(TMPVAR))' >> config.mk
else
# use g++-4.8 for linux
export CXX=g++-4.8
fi fi
if [ ${TASK} == "python_test" ]; then if [ ${TASK} == "python_test" ]; then