From d581a3d0e7fbc8e73dbeb6f81fdc1e101606737e Mon Sep 17 00:00:00 2001 From: Tianqi Chen Date: Thu, 16 Mar 2017 18:48:37 -0700 Subject: [PATCH] [UPDATE] Update rabit and threadlocal (#2114) * [UPDATE] Update rabit and threadlocal * minor fix to make build system happy * upgrade requirement to g++4.8 * upgrade dmlc-core * update travis --- .travis.yml | 6 +++++- Makefile | 2 +- NEWS.md | 4 ++++ dmlc-core | 2 +- doc/build.md | 4 ++-- rabit | 2 +- src/c_api/c_api.cc | 2 +- src/common/hist_util.cc | 2 +- src/common/row_set.h | 2 +- src/data/sparse_batch_page.h | 8 ++++---- src/data/sparse_page_dmatrix.cc | 2 +- src/data/sparse_page_raw_format.cc | 2 +- src/data/sparse_page_source.cc | 12 ++++++------ src/data/sparse_page_writer.cc | 8 ++++---- src/gbm/gblinear.cc | 2 +- src/gbm/gbtree.cc | 2 +- src/learner.cc | 2 +- src/metric/elementwise_metric.cc | 2 +- src/metric/multiclass_metric.cc | 4 ++-- src/metric/rank_metric.cc | 4 ++-- src/objective/multiclass_obj.cc | 2 +- src/objective/regression_obj.cc | 8 ++++---- src/tree/param.h | 2 +- src/tree/updater_colmaker.cc | 6 +++--- src/tree/updater_fast_hist.cc | 6 +++--- src/tree/updater_histmaker.cc | 4 ++-- src/tree/updater_skmaker.cc | 2 +- tests/travis/run_test.sh | 3 +++ 28 files changed, 59 insertions(+), 48 deletions(-) diff --git a/.travis.yml b/.travis.yml index 85bf0c4c8..714cf4eaa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -41,17 +41,21 @@ matrix: # dependent apt packages addons: apt: + sources: + - ubuntu-toolchain-r-test packages: - doxygen - wget - libcurl4-openssl-dev - unzip - graphviz + - gcc-4.8 + - g++-4.8 before_install: - source dmlc-core/scripts/travis/travis_setup_env.sh - export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package - - echo "MAVEN_OPTS='-Xmx2048m -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc + - echo "MAVEN_OPTS='-Xmx2048m -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc install: - source tests/travis/setup.sh diff --git a/Makefile b/Makefile index 42065f0cd..20776577c 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ export CXX = $(if $(shell which g++-6),g++-6,$(if $(shell which g++-mp-6),g++-mp endif export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS) -export CFLAGS= -std=c++0x -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS) +export CFLAGS= -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS) CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include #java include path export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java diff --git a/NEWS.md b/NEWS.md index da512f4b1..48c382169 100644 --- a/NEWS.md +++ b/NEWS.md @@ -8,6 +8,10 @@ This file records the changes in xgboost library in reverse chronological order. - Specialized some prediction routine * Automatically remove nan from input data when it is sparse. - This can solve some of user reported problem of istart != hist.size +* Minor fixes + - Thread local variable is upgraded so it is automatically freed at thread exit. +* Migrate to C++11 + - The current master version now requires C++11 enabled compiled(g++4.8 or higher) ## v0.6 (2016.07.29) * Version 0.5 is skipped due to major improvements in the core diff --git a/dmlc-core b/dmlc-core index 78b78be34..2b75a0ce6 160000 --- a/dmlc-core +++ b/dmlc-core @@ -1 +1 @@ -Subproject commit 78b78be34ac27d30f2193f3d51848c62887669c4 +Subproject commit 2b75a0ce6f191ad0fcb5319039b41e990968542a diff --git a/doc/build.md b/doc/build.md index d2eaf99cd..d0d1fec16 100644 --- a/doc/build.md +++ b/doc/build.md @@ -42,7 +42,7 @@ Our goal is to build the shared library: The minimal building requirement is -- A recent c++ compiler supporting C++ 11 (g++-4.6 or higher) +- A recent c++ compiler supporting C++ 11 (g++-4.8 or higher) We can edit `make/config.mk` to change the compile options, and then build by `make`. If everything goes well, we can go to the specific language installation section. @@ -222,7 +222,7 @@ first follow [Building on OSX](#building-on-osx) to get the OpenMP enabled compi ### Installing the development version -Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.6 or higher). +Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher). On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation. And see the previous subsection for an OSX tip. diff --git a/rabit b/rabit index a9a2a69dc..a764d45cf 160000 --- a/rabit +++ b/rabit @@ -1 +1 @@ -Subproject commit a9a2a69dc1144180a43f7d2d1097264482be7817 +Subproject commit a764d45cfb438cc9f15cf47ce586c02ff2c65d0f diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index d43f9fe00..1621c8b0f 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -385,7 +385,7 @@ XGB_DLL int XGDMatrixSliceDMatrix(DMatrixHandle handle, src.CopyFrom(static_cast*>(handle)->get()); data::SimpleCSRSource& ret = *source; - CHECK_EQ(src.info.group_ptr.size(), 0) + CHECK_EQ(src.info.group_ptr.size(), 0U) << "slice does not support group structure"; ret.Clear(); diff --git a/src/common/hist_util.cc b/src/common/hist_util.cc index fa61bfc79..09fe1f4de 100644 --- a/src/common/hist_util.cc +++ b/src/common/hist_util.cc @@ -128,7 +128,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat) { } index.resize(row_ptr.back()); - CHECK_GT(cut->cut.size(), 0); + CHECK_GT(cut->cut.size(), 0U); CHECK_EQ(cut->row_ptr.back(), cut->cut.size()); omp_ulong bsize = static_cast(batch.size); diff --git a/src/common/row_set.h b/src/common/row_set.h index 58103e664..7a8cafcbb 100644 --- a/src/common/row_set.h +++ b/src/common/row_set.h @@ -50,7 +50,7 @@ class RowSetCollection { } // initialize node id 0->everything inline void Init() { - CHECK_EQ(elem_of_each_node_.size(), 0); + CHECK_EQ(elem_of_each_node_.size(), 0U); const bst_uint* begin = dmlc::BeginPtr(row_indices_); const bst_uint* end = dmlc::BeginPtr(row_indices_) + row_indices_.size(); elem_of_each_node_.emplace_back(Elem(begin, end)); diff --git a/src/data/sparse_batch_page.h b/src/data/sparse_batch_page.h index 5f706a93b..9951fbe1d 100644 --- a/src/data/sparse_batch_page.h +++ b/src/data/sparse_batch_page.h @@ -207,14 +207,14 @@ class SparsePage::Writer { * writing is done by another thread inside writer. * \param page The page to be written */ - void PushWrite(std::unique_ptr&& page); + void PushWrite(std::shared_ptr&& page); /*! * \brief Allocate a page to store results. * This function can block when the writer is too slow and buffer pages * have not yet been recycled. * \param out_page Used to store the allocated pages. */ - void Alloc(std::unique_ptr* out_page); + void Alloc(std::shared_ptr* out_page); private: /*! \brief number of allocated pages */ @@ -224,9 +224,9 @@ class SparsePage::Writer { /*! \brief writer threads */ std::vector > workers_; /*! \brief recycler queue */ - dmlc::ConcurrentBlockingQueue > qrecycle_; + dmlc::ConcurrentBlockingQueue > qrecycle_; /*! \brief worker threads */ - std::vector > > qworkers_; + std::vector > > qworkers_; }; #endif // DMLC_ENABLE_STD_THREAD diff --git a/src/data/sparse_page_dmatrix.cc b/src/data/sparse_page_dmatrix.cc index 61fde3e50..676d0dcd5 100644 --- a/src/data/sparse_page_dmatrix.cc +++ b/src/data/sparse_page_dmatrix.cc @@ -254,7 +254,7 @@ void SparsePageDMatrix::InitColAccess(const std::vector& enabled, { SparsePage::Writer writer(name_shards, format_shards, 6); - std::unique_ptr page; + std::shared_ptr page; writer.Alloc(&page); page->Clear(); double tstart = dmlc::GetTime(); diff --git a/src/data/sparse_page_raw_format.cc b/src/data/sparse_page_raw_format.cc index d0019fde6..07c390cab 100644 --- a/src/data/sparse_page_raw_format.cc +++ b/src/data/sparse_page_raw_format.cc @@ -16,7 +16,7 @@ class SparsePageRawFormat : public SparsePage::Format { public: bool Read(SparsePage* page, dmlc::SeekStream* fi) override { if (!fi->Read(&(page->offset))) return false; - CHECK_NE(page->offset.size(), 0) << "Invalid SparsePage file"; + CHECK_NE(page->offset.size(), 0U) << "Invalid SparsePage file"; page->data.resize(page->offset.back()); if (page->data.size() != 0) { CHECK_EQ(fi->Read(dmlc::BeginPtr(page->data), diff --git a/src/data/sparse_page_source.cc b/src/data/sparse_page_source.cc index a4c244291..5d87d0502 100644 --- a/src/data/sparse_page_source.cc +++ b/src/data/sparse_page_source.cc @@ -18,7 +18,7 @@ SparsePageSource::SparsePageSource(const std::string& cache_info) : base_rowid_(0), page_(nullptr), clock_ptr_(0) { // read in the info files std::vector cache_shards = common::Split(cache_info, ':'); - CHECK_NE(cache_shards.size(), 0); + CHECK_NE(cache_shards.size(), 0U); { std::string name_info = cache_shards[0]; std::unique_ptr finfo(dmlc::Stream::Create(name_info.c_str(), "r")); @@ -85,7 +85,7 @@ const RowBatch& SparsePageSource::Value() const { bool SparsePageSource::CacheExist(const std::string& cache_info) { std::vector cache_shards = common::Split(cache_info, ':'); - CHECK_NE(cache_shards.size(), 0); + CHECK_NE(cache_shards.size(), 0U); { std::string name_info = cache_shards[0]; std::unique_ptr finfo(dmlc::Stream::Create(name_info.c_str(), "r", true)); @@ -102,7 +102,7 @@ bool SparsePageSource::CacheExist(const std::string& cache_info) { void SparsePageSource::Create(dmlc::Parser* src, const std::string& cache_info) { std::vector cache_shards = common::Split(cache_info, ':'); - CHECK_NE(cache_shards.size(), 0); + CHECK_NE(cache_shards.size(), 0U); // read in the info files. std::string name_info = cache_shards[0]; std::vector name_shards, format_shards; @@ -112,7 +112,7 @@ void SparsePageSource::Create(dmlc::Parser* src, } { SparsePage::Writer writer(name_shards, format_shards, 6); - std::unique_ptr page; + std::shared_ptr page; writer.Alloc(&page); page->Clear(); MetaInfo info; @@ -170,7 +170,7 @@ void SparsePageSource::Create(dmlc::Parser* src, void SparsePageSource::Create(DMatrix* src, const std::string& cache_info) { std::vector cache_shards = common::Split(cache_info, ':'); - CHECK_NE(cache_shards.size(), 0); + CHECK_NE(cache_shards.size(), 0U); // read in the info files. std::string name_info = cache_shards[0]; std::vector name_shards, format_shards; @@ -180,7 +180,7 @@ void SparsePageSource::Create(DMatrix* src, } { SparsePage::Writer writer(name_shards, format_shards, 6); - std::unique_ptr page; + std::shared_ptr page; writer.Alloc(&page); page->Clear(); MetaInfo info = src->info(); diff --git a/src/data/sparse_page_writer.cc b/src/data/sparse_page_writer.cc index e16d1aee6..939e7a7a0 100644 --- a/src/data/sparse_page_writer.cc +++ b/src/data/sparse_page_writer.cc @@ -32,7 +32,7 @@ SparsePage::Writer::Writer( std::unique_ptr fmt( SparsePage::Format::Create(format_shard)); fo->Write(format_shard); - std::unique_ptr page; + std::shared_ptr page; while (wqueue->Pop(&page)) { if (page.get() == nullptr) break; fmt->Write(*page, fo.get()); @@ -47,7 +47,7 @@ SparsePage::Writer::Writer( SparsePage::Writer::~Writer() { for (auto& queue : qworkers_) { // use nullptr to signal termination. - std::unique_ptr sig(nullptr); + std::shared_ptr sig(nullptr); queue.Push(std::move(sig)); } for (auto& thread : workers_) { @@ -55,12 +55,12 @@ SparsePage::Writer::~Writer() { } } -void SparsePage::Writer::PushWrite(std::unique_ptr&& page) { +void SparsePage::Writer::PushWrite(std::shared_ptr&& page) { qworkers_[clock_ptr_].Push(std::move(page)); clock_ptr_ = (clock_ptr_ + 1) % workers_.size(); } -void SparsePage::Writer::Alloc(std::unique_ptr* out_page) { +void SparsePage::Writer::Alloc(std::shared_ptr* out_page) { CHECK(out_page->get() == nullptr); if (num_free_buffer_ != 0) { out_page->reset(new SparsePage()); diff --git a/src/gbm/gblinear.cc b/src/gbm/gblinear.cc index 1fee685fd..cb2252256 100644 --- a/src/gbm/gblinear.cc +++ b/src/gbm/gblinear.cc @@ -176,7 +176,7 @@ class GBLinear : public GradientBooster { if (model.weight.size() == 0) { model.InitModel(); } - CHECK_EQ(ntree_limit, 0) + CHECK_EQ(ntree_limit, 0U) << "GBLinear::Predict ntrees is only valid for gbtree predictor"; std::vector &preds = *out_preds; const std::vector& base_margin = p_fmat->info().base_margin; diff --git a/src/gbm/gbtree.cc b/src/gbm/gbtree.cc index af96732dc..078883568 100644 --- a/src/gbm/gbtree.cc +++ b/src/gbm/gbtree.cc @@ -246,7 +246,7 @@ class GBTree : public GradientBooster { new_trees.push_back(std::move(ret)); } else { const int ngroup = mparam.num_output_group; - CHECK_EQ(gpair.size() % ngroup, 0) + CHECK_EQ(gpair.size() % ngroup, 0U) << "must have exactly ngroup*nrow gpairs"; std::vector tmp(gpair.size() / ngroup); for (int gid = 0; gid < ngroup; ++gid) { diff --git a/src/learner.cc b/src/learner.cc index f9ebc2afc..911e00f19 100644 --- a/src/learner.cc +++ b/src/learner.cc @@ -243,7 +243,7 @@ class LearnerImpl : public Learner { CHECK_NE(header, "bs64") << "Base64 format is no longer supported in brick."; if (header == "binf") { - CHECK_EQ(fp.Read(&header[0], 4), 4); + CHECK_EQ(fp.Read(&header[0], 4), 4U); } } // use the peekable reader. diff --git a/src/metric/elementwise_metric.cc b/src/metric/elementwise_metric.cc index f022c8130..e3e2eb005 100644 --- a/src/metric/elementwise_metric.cc +++ b/src/metric/elementwise_metric.cc @@ -24,7 +24,7 @@ struct EvalEWiseBase : public Metric { bst_float Eval(const std::vector& preds, const MetaInfo& info, bool distributed) const override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "label and prediction size not match, " << "hint: use merror or mlogloss for multi-class classification"; diff --git a/src/metric/multiclass_metric.cc b/src/metric/multiclass_metric.cc index f1e407709..191813720 100644 --- a/src/metric/multiclass_metric.cc +++ b/src/metric/multiclass_metric.cc @@ -23,11 +23,11 @@ struct EvalMClassBase : public Metric { bst_float Eval(const std::vector &preds, const MetaInfo &info, bool distributed) const override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK(preds.size() % info.labels.size() == 0) << "label and prediction size not match"; const size_t nclass = preds.size() / info.labels.size(); - CHECK_GE(nclass, 1) + CHECK_GE(nclass, 1U) << "mlogloss and merror are only used for multi-class classification," << " use logloss for binary classification"; const bst_omp_uint ndata = static_cast(info.labels.size()); diff --git a/src/metric/rank_metric.cc b/src/metric/rank_metric.cc index d31c30180..eb1d0fd54 100644 --- a/src/metric/rank_metric.cc +++ b/src/metric/rank_metric.cc @@ -84,7 +84,7 @@ struct EvalAuc : public Metric { bst_float Eval(const std::vector &preds, const MetaInfo &info, bool distributed) const override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "label size predict size not match"; std::vector tgptr(2, 0); @@ -166,7 +166,7 @@ struct EvalRankList : public Metric { std::vector tgptr(2, 0); tgptr[1] = static_cast(preds.size()); const std::vector &gptr = info.group_ptr.size() == 0 ? tgptr : info.group_ptr; - CHECK_NE(gptr.size(), 0) << "must specify group when constructing rank file"; + CHECK_NE(gptr.size(), 0U) << "must specify group when constructing rank file"; CHECK_EQ(gptr.back(), preds.size()) << "EvalRanklist: group structure must match number of prediction"; const bst_omp_uint ngroup = static_cast(gptr.size() - 1); diff --git a/src/objective/multiclass_obj.cc b/src/objective/multiclass_obj.cc index b2376329a..51925c8d1 100644 --- a/src/objective/multiclass_obj.cc +++ b/src/objective/multiclass_obj.cc @@ -39,7 +39,7 @@ class SoftmaxMultiClassObj : public ObjFunction { const MetaInfo& info, int iter, std::vector* out_gpair) override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK(preds.size() == (static_cast(param_.num_class) * info.labels.size())) << "SoftmaxMultiClassObj: label size and pred size does not match"; out_gpair->resize(preds.size()); diff --git a/src/objective/regression_obj.cc b/src/objective/regression_obj.cc index 5e56a5301..ac7e5a4fe 100644 --- a/src/objective/regression_obj.cc +++ b/src/objective/regression_obj.cc @@ -86,7 +86,7 @@ class RegLossObj : public ObjFunction { const MetaInfo &info, int iter, std::vector *out_gpair) override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided" << "preds.size=" << preds.size() << ", label.size=" << info.labels.size(); @@ -168,7 +168,7 @@ class PoissonRegression : public ObjFunction { const MetaInfo &info, int iter, std::vector *out_gpair) override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; out_gpair->resize(preds.size()); // check if label in range @@ -229,7 +229,7 @@ class GammaRegression : public ObjFunction { const MetaInfo &info, int iter, std::vector *out_gpair) override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; out_gpair->resize(preds.size()); // check if label in range @@ -294,7 +294,7 @@ class TweedieRegression : public ObjFunction { const MetaInfo &info, int iter, std::vector *out_gpair) override { - CHECK_NE(info.labels.size(), 0) << "label set cannot be empty"; + CHECK_NE(info.labels.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.size(), info.labels.size()) << "labels are not correctly provided"; out_gpair->resize(preds.size()); // check if label in range diff --git a/src/tree/param.h b/src/tree/param.h index a9130ba2a..0fdc82885 100644 --- a/src/tree/param.h +++ b/src/tree/param.h @@ -204,7 +204,7 @@ struct TrainParam : public dmlc::Parameter { /*! \brief maximum sketch size */ inline unsigned max_sketch_size() const { unsigned ret = static_cast(sketch_ratio / sketch_eps); - CHECK_GT(ret, 0); + CHECK_GT(ret, 0U); return ret; } }; diff --git a/src/tree/updater_colmaker.cc b/src/tree/updater_colmaker.cc index 60a8eb75b..49284d18e 100644 --- a/src/tree/updater_colmaker.cc +++ b/src/tree/updater_colmaker.cc @@ -159,7 +159,7 @@ class ColMaker: public TreeUpdater { } unsigned n = static_cast(param.colsample_bytree * feat_index.size()); std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom()); - CHECK_GT(n, 0) + CHECK_GT(n, 0U) << "colsample_bytree=" << param.colsample_bytree << " is too small that no feature can be included"; feat_index.resize(n); @@ -628,7 +628,7 @@ class ColMaker: public TreeUpdater { if (param.colsample_bylevel != 1.0f) { std::shuffle(feat_set.begin(), feat_set.end(), common::GlobalRandom()); unsigned n = static_cast(param.colsample_bylevel * feat_index.size()); - CHECK_GT(n, 0) + CHECK_GT(n, 0U) << "colsample_bylevel is too small that no feature can be included"; feat_set.resize(n); } @@ -784,7 +784,7 @@ class DistColMaker : public ColMaker { DMatrix* dmat, const std::vector &trees) override { TStats::CheckInfo(dmat->info()); - CHECK_EQ(trees.size(), 1) << "DistColMaker: only support one tree at a time"; + CHECK_EQ(trees.size(), 1U) << "DistColMaker: only support one tree at a time"; // build the tree builder.Update(gpair, dmat, trees[0]); //// prune the tree, note that pruner will sync the tree diff --git a/src/tree/updater_fast_hist.cc b/src/tree/updater_fast_hist.cc index 9d5afd1d9..237cff0e0 100644 --- a/src/tree/updater_fast_hist.cc +++ b/src/tree/updater_fast_hist.cc @@ -283,7 +283,7 @@ class FastHistMaker: public TreeUpdater { } builder_.Init(this->nthread, nbins); - CHECK_EQ(info.root_index.size(), 0); + CHECK_EQ(info.root_index.size(), 0U); std::vector& row_indices = row_set_collection_.row_indices_; // mark subsample and build list of member rows if (param.subsample < 1.0f) { @@ -313,7 +313,7 @@ class FastHistMaker: public TreeUpdater { } unsigned n = static_cast(param.colsample_bytree * feat_index.size()); std::shuffle(feat_index.begin(), feat_index.end(), common::GlobalRandom()); - CHECK_GT(n, 0) + CHECK_GT(n, 0U) << "colsample_bytree=" << param.colsample_bytree << " is too small that no feature can be included"; feat_index.resize(n); @@ -353,7 +353,7 @@ class FastHistMaker: public TreeUpdater { } } } - CHECK_GT(min_nbins_per_feature, 0); + CHECK_GT(min_nbins_per_feature, 0U); } { snode.reserve(256); diff --git a/src/tree/updater_histmaker.cc b/src/tree/updater_histmaker.cc index 87c73e9f8..80e8b5495 100644 --- a/src/tree/updater_histmaker.cc +++ b/src/tree/updater_histmaker.cc @@ -55,7 +55,7 @@ class HistMaker: public BaseMaker { const MetaInfo &info, const bst_uint ridx) { unsigned i = std::upper_bound(cut, cut + size, fv) - cut; - CHECK_NE(size, 0) << "try insert into size=0"; + CHECK_NE(size, 0U) << "try insert into size=0"; CHECK_LT(i, size); data[i].Add(gpair, info, ridx); } @@ -664,7 +664,7 @@ class GlobalProposalHistMaker: public CQHistMaker { cached_cut_.clear(); } if (cached_rptr_.size() == 0) { - CHECK_EQ(this->qexpand.size(), 1); + CHECK_EQ(this->qexpand.size(), 1U); CQHistMaker::ResetPosAndPropose(gpair, p_fmat, fset, tree); cached_rptr_ = this->wspace.rptr; cached_cut_ = this->wspace.cut; diff --git a/src/tree/updater_skmaker.cc b/src/tree/updater_skmaker.cc index c2320a0ef..daf4e1e83 100644 --- a/src/tree/updater_skmaker.cc +++ b/src/tree/updater_skmaker.cc @@ -257,7 +257,7 @@ class SketchMaker: public BaseMaker { } } inline void SyncNodeStats(void) { - CHECK_NE(qexpand.size(), 0); + CHECK_NE(qexpand.size(), 0U); std::vector tmp(qexpand.size()); for (size_t i = 0; i < qexpand.size(); ++i) { tmp[i] = node_stats[qexpand[i]]; diff --git a/tests/travis/run_test.sh b/tests/travis/run_test.sh index 0b04e359b..1917b4ac5 100755 --- a/tests/travis/run_test.sh +++ b/tests/travis/run_test.sh @@ -21,6 +21,9 @@ if [ ${TRAVIS_OS_NAME} == "osx" ]; then echo 'USE_OPENMP=0' >> config.mk echo 'TMPVAR := $(XGB_PLUGINS)' >> config.mk echo 'XGB_PLUGINS = $(filter-out plugin/lz4/plugin.mk, $(TMPVAR))' >> config.mk +else + # use g++-4.8 for linux + export CXX=g++-4.8 fi if [ ${TASK} == "python_test" ]; then