Clang-tidy static analysis (#3222)

* Clang-tidy static analysis

* Modernise checks

* Google coding standard checks

* Identifier renaming according to Google style
This commit is contained in:
Rory Mitchell
2018-04-19 18:57:13 +12:00
committed by GitHub
parent 3242b0a378
commit ccf80703ef
97 changed files with 3407 additions and 3354 deletions

View File

@@ -24,7 +24,7 @@ class CPUPredictor : public Predictor {
for (size_t i = tree_begin; i < tree_end; ++i) {
if (tree_info[i] == bst_group) {
int tid = trees[i]->GetLeafIndex(*p_feats, root_index);
psum += (*trees[i])[tid].leaf_value();
psum += (*trees[i])[tid].LeafValue();
}
}
p_feats->Drop(inst);
@@ -45,35 +45,35 @@ class CPUPredictor : public Predictor {
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int num_group,
unsigned tree_begin, unsigned tree_end) {
const MetaInfo& info = p_fmat->info();
const MetaInfo& info = p_fmat->Info();
const int nthread = omp_get_max_threads();
InitThreadTemp(nthread, model.param.num_feature);
std::vector<bst_float>& preds = *out_preds;
CHECK_EQ(model.param.size_leaf_vector, 0)
<< "size_leaf_vector is enforced to 0 so far";
CHECK_EQ(preds.size(), p_fmat->info().num_row * num_group);
CHECK_EQ(preds.size(), p_fmat->Info().num_row_ * num_group);
// start collecting the prediction
dmlc::DataIter<RowBatch>* iter = p_fmat->RowIterator();
iter->BeforeFirst();
while (iter->Next()) {
const RowBatch& batch = iter->Value();
// parallel over local batch
const int K = 8;
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
const bst_omp_uint rest = nsize % K;
constexpr int kUnroll = 8;
const auto nsize = static_cast<bst_omp_uint>(batch.size);
const bst_omp_uint rest = nsize % kUnroll;
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize - rest; i += K) {
for (bst_omp_uint i = 0; i < nsize - rest; i += kUnroll) {
const int tid = omp_get_thread_num();
RegTree::FVec& feats = thread_temp[tid];
int64_t ridx[K];
RowBatch::Inst inst[K];
for (int k = 0; k < K; ++k) {
int64_t ridx[kUnroll];
RowBatch::Inst inst[kUnroll];
for (int k = 0; k < kUnroll; ++k) {
ridx[k] = static_cast<int64_t>(batch.base_rowid + i + k);
}
for (int k = 0; k < K; ++k) {
for (int k = 0; k < kUnroll; ++k) {
inst[k] = batch[i + k];
}
for (int k = 0; k < K; ++k) {
for (int k = 0; k < kUnroll; ++k) {
for (int gid = 0; gid < num_group; ++gid) {
const size_t offset = ridx[k] * num_group + gid;
preds[offset] += this->PredValue(
@@ -84,7 +84,7 @@ class CPUPredictor : public Predictor {
}
for (bst_omp_uint i = nsize - rest; i < nsize; ++i) {
RegTree::FVec& feats = thread_temp[0];
const int64_t ridx = static_cast<int64_t>(batch.base_rowid + i);
const auto ridx = static_cast<int64_t>(batch.base_rowid + i);
const RowBatch::Inst inst = batch[i];
for (int gid = 0; gid < num_group; ++gid) {
const size_t offset = ridx * num_group + gid;
@@ -113,10 +113,10 @@ class CPUPredictor : public Predictor {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.size() != 0) {
out_preds->resize(y.size());
std::copy(y.data_h().begin(), y.data_h().end(),
out_preds->data_h().begin());
if (y.Size() != 0) {
out_preds->Resize(y.Size());
std::copy(y.HostVector().begin(), y.HostVector().end(),
out_preds->HostVector().begin());
return true;
}
}
@@ -127,12 +127,12 @@ class CPUPredictor : public Predictor {
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n = model.param.num_output_group * info.num_row;
const std::vector<bst_float>& base_margin = info.base_margin;
out_preds->resize(n);
std::vector<bst_float>& out_preds_h = out_preds->data_h();
size_t n = model.param.num_output_group * info.num_row_;
const std::vector<bst_float>& base_margin = info.base_margin_;
out_preds->Resize(n);
std::vector<bst_float>& out_preds_h = out_preds->HostVector();
if (base_margin.size() != 0) {
CHECK_EQ(out_preds->size(), n);
CHECK_EQ(out_preds->Size(), n);
std::copy(base_margin.begin(), base_margin.end(), out_preds_h.begin());
} else {
std::fill(out_preds_h.begin(), out_preds_h.end(), model.base_margin);
@@ -147,14 +147,14 @@ class CPUPredictor : public Predictor {
return;
}
this->InitOutPredictions(dmat->info(), out_preds, model);
this->InitOutPredictions(dmat->Info(), out_preds, model);
ntree_limit *= model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
ntree_limit = static_cast<unsigned>(model.trees.size());
}
this->PredLoopInternal(dmat, &out_preds->data_h(), model,
this->PredLoopInternal(dmat, &out_preds->HostVector(), model,
tree_begin, ntree_limit);
}
@@ -167,9 +167,9 @@ class CPUPredictor : public Predictor {
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
if (e.predictions.size() == 0) {
InitOutPredictions(e.data->info(), &(e.predictions), model);
PredLoopInternal(e.data.get(), &(e.predictions.data_h()), model, 0,
if (e.predictions.Size() == 0) {
InitOutPredictions(e.data->Info(), &(e.predictions), model);
PredLoopInternal(e.data.get(), &(e.predictions.HostVector()), model, 0,
model.trees.size());
} else if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
@@ -177,7 +177,7 @@ class CPUPredictor : public Predictor {
&(e.predictions))) {
{} // do nothing
} else {
PredLoopInternal(e.data.get(), &(e.predictions.data_h()), model, old_ntree,
PredLoopInternal(e.data.get(), &(e.predictions.HostVector()), model, old_ntree,
model.trees.size());
}
}
@@ -209,25 +209,25 @@ class CPUPredictor : public Predictor {
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
const int nthread = omp_get_max_threads();
InitThreadTemp(nthread, model.param.num_feature);
const MetaInfo& info = p_fmat->info();
const MetaInfo& info = p_fmat->Info();
// number of valid trees
ntree_limit *= model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
ntree_limit = static_cast<unsigned>(model.trees.size());
}
std::vector<bst_float>& preds = *out_preds;
preds.resize(info.num_row * ntree_limit);
preds.resize(info.num_row_ * ntree_limit);
// start collecting the prediction
dmlc::DataIter<RowBatch>* iter = p_fmat->RowIterator();
iter->BeforeFirst();
while (iter->Next()) {
const RowBatch& batch = iter->Value();
// parallel over local batch
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
const auto nsize = static_cast<bst_omp_uint>(batch.size);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
const int tid = omp_get_thread_num();
size_t ridx = static_cast<size_t>(batch.base_rowid + i);
auto ridx = static_cast<size_t>(batch.base_rowid + i);
RegTree::FVec& feats = thread_temp[tid];
feats.Fill(batch[i]);
for (unsigned j = 0; j < ntree_limit; ++j) {
@@ -246,7 +246,7 @@ class CPUPredictor : public Predictor {
unsigned condition_feature) override {
const int nthread = omp_get_max_threads();
InitThreadTemp(nthread, model.param.num_feature);
const MetaInfo& info = p_fmat->info();
const MetaInfo& info = p_fmat->Info();
// number of valid trees
ntree_limit *= model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
@@ -256,7 +256,7 @@ class CPUPredictor : public Predictor {
size_t ncolumns = model.param.num_feature + 1;
// allocate space for (number of features + bias) times the number of rows
std::vector<bst_float>& contribs = *out_contribs;
contribs.resize(info.num_row * ncolumns * model.param.num_output_group);
contribs.resize(info.num_row_ * ncolumns * model.param.num_output_group);
// make sure contributions is zeroed, we could be reusing a previously
// allocated one
std::fill(contribs.begin(), contribs.end(), 0);
@@ -267,15 +267,15 @@ class CPUPredictor : public Predictor {
}
// start collecting the contributions
dmlc::DataIter<RowBatch>* iter = p_fmat->RowIterator();
const std::vector<bst_float>& base_margin = info.base_margin;
const std::vector<bst_float>& base_margin = info.base_margin_;
iter->BeforeFirst();
while (iter->Next()) {
const RowBatch& batch = iter->Value();
// parallel over local batch
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
const auto nsize = static_cast<bst_omp_uint>(batch.size);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
size_t row_idx = static_cast<size_t>(batch.base_rowid + i);
auto row_idx = static_cast<size_t>(batch.base_rowid + i);
unsigned root_id = info.GetRoot(row_idx);
RegTree::FVec& feats = thread_temp[omp_get_thread_num()];
// loop over all classes
@@ -310,7 +310,7 @@ class CPUPredictor : public Predictor {
void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate) override {
const MetaInfo& info = p_fmat->info();
const MetaInfo& info = p_fmat->Info();
const int ngroup = model.param.num_output_group;
size_t ncolumns = model.param.num_feature;
const unsigned row_chunk = ngroup * (ncolumns + 1) * (ncolumns + 1);
@@ -319,10 +319,10 @@ class CPUPredictor : public Predictor {
// allocate space for (number of features^2) times the number of rows and tmp off/on contribs
std::vector<bst_float>& contribs = *out_contribs;
contribs.resize(info.num_row * ngroup * (ncolumns + 1) * (ncolumns + 1));
std::vector<bst_float> contribs_off(info.num_row * ngroup * (ncolumns + 1));
std::vector<bst_float> contribs_on(info.num_row * ngroup * (ncolumns + 1));
std::vector<bst_float> contribs_diag(info.num_row * ngroup * (ncolumns + 1));
contribs.resize(info.num_row_ * ngroup * (ncolumns + 1) * (ncolumns + 1));
std::vector<bst_float> contribs_off(info.num_row_ * ngroup * (ncolumns + 1));
std::vector<bst_float> contribs_on(info.num_row_ * ngroup * (ncolumns + 1));
std::vector<bst_float> contribs_diag(info.num_row_ * ngroup * (ncolumns + 1));
// Compute the difference in effects when conditioning on each of the features on and off
// see: Axiomatic characterizations of probabilistic and
@@ -332,7 +332,7 @@ class CPUPredictor : public Predictor {
PredictContribution(p_fmat, &contribs_off, model, ntree_limit, approximate, -1, i);
PredictContribution(p_fmat, &contribs_on, model, ntree_limit, approximate, 1, i);
for (size_t j = 0; j < info.num_row; ++j) {
for (size_t j = 0; j < info.num_row_; ++j) {
for (int l = 0; l < ngroup; ++l) {
const unsigned o_offset = j * row_chunk + l * mrow_chunk + i * (ncolumns + 1);
const unsigned c_offset = j * crow_chunk + l * (ncolumns + 1);

View File

@@ -36,8 +36,8 @@ struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename iter_t>
void increment_offset(iter_t begin_itr, iter_t end_itr, size_t amount) {
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
@@ -50,16 +50,16 @@ void increment_offset(iter_t begin_itr, iter_t end_itr, size_t amount) {
struct DeviceMatrix {
DMatrix* p_mat; // Pointer to the original matrix on the host
dh::bulk_allocator<dh::memory_type::DEVICE> ba;
dh::dvec<size_t> row_ptr;
dh::dvec<SparseBatch::Entry> data;
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<size_t> row_ptr;
dh::DVec<SparseBatch::Entry> data;
thrust::device_vector<float> predictions;
DeviceMatrix(DMatrix* dmat, int device_idx, bool silent) : p_mat(dmat) {
dh::safe_cuda(cudaSetDevice(device_idx));
auto info = dmat->info();
ba.allocate(device_idx, silent, &row_ptr, info.num_row + 1, &data,
info.num_nonzero);
auto info = dmat->Info();
ba.Allocate(device_idx, silent, &row_ptr, info.num_row_ + 1, &data,
info.num_nonzero_);
auto iter = dmat->RowIterator();
iter->BeforeFirst();
size_t data_offset = 0;
@@ -71,7 +71,7 @@ struct DeviceMatrix {
if (batch.base_rowid > 0) {
auto begin_itr = row_ptr.tbegin() + batch.base_rowid;
auto end_itr = begin_itr + batch.size + 1;
increment_offset(begin_itr, end_itr, batch.base_rowid);
IncrementOffset(begin_itr, end_itr, batch.base_rowid);
}
// Copy data
thrust::copy(batch.data_ptr, batch.data_ptr + batch.ind_ptr[batch.size],
@@ -103,17 +103,17 @@ struct DevicePredictionNode {
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
this->left_child_idx = n.cleft();
this->right_child_idx = n.cright();
this->fidx = n.split_index();
if (n.default_left()) {
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.is_leaf()) {
this->val.leaf_weight = n.leaf_value();
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.split_cond();
this->val.fvalue = n.SplitCond();
}
}
@@ -155,7 +155,7 @@ struct ElementLoader {
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::block_fill(smem, shared_elements, nanf(""));
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
@@ -309,16 +309,16 @@ class GPUPredictor : public xgboost::Predictor {
thrust::copy(model.tree_info.begin(), model.tree_info.end(),
tree_group.begin());
device_matrix->predictions.resize(out_preds->size());
device_matrix->predictions.resize(out_preds->Size());
thrust::copy(out_preds->tbegin(param.gpu_id), out_preds->tend(param.gpu_id),
device_matrix->predictions.begin());
const int BLOCK_THREADS = 128;
const int GRID_SIZE = static_cast<int>(
dh::div_round_up(device_matrix->row_ptr.size() - 1, BLOCK_THREADS));
dh::DivRoundUp(device_matrix->row_ptr.Size() - 1, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>(
sizeof(float) * device_matrix->p_mat->info().num_col * BLOCK_THREADS);
sizeof(float) * device_matrix->p_mat->Info().num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
@@ -327,11 +327,11 @@ class GPUPredictor : public xgboost::Predictor {
PredictKernel<BLOCK_THREADS>
<<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>(
dh::raw(nodes), dh::raw(device_matrix->predictions),
dh::raw(tree_segments), dh::raw(tree_group),
device_matrix->row_ptr.data(), device_matrix->data.data(),
tree_begin, tree_end, device_matrix->p_mat->info().num_col,
device_matrix->p_mat->info().num_row, use_shared,
dh::Raw(nodes), dh::Raw(device_matrix->predictions),
dh::Raw(tree_segments), dh::Raw(tree_group),
device_matrix->row_ptr.Data(), device_matrix->data.Data(),
tree_begin, tree_end, device_matrix->p_mat->Info().num_col_,
device_matrix->p_mat->Info().num_row_, use_shared,
model.param.num_output_group);
dh::safe_cuda(cudaDeviceSynchronize());
@@ -349,7 +349,7 @@ class GPUPredictor : public xgboost::Predictor {
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->info(), out_preds, model);
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
@@ -364,11 +364,11 @@ class GPUPredictor : public xgboost::Predictor {
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n = model.param.num_output_group * info.num_row;
const std::vector<bst_float>& base_margin = info.base_margin;
out_preds->resize(n, 0.0f, param.gpu_id);
size_t n = model.param.num_output_group * info.num_row_;
const std::vector<bst_float>& base_margin = info.base_margin_;
out_preds->Resize(n, 0.0f, param.gpu_id);
if (base_margin.size() != 0) {
CHECK_EQ(out_preds->size(), n);
CHECK_EQ(out_preds->Size(), n);
thrust::copy(base_margin.begin(), base_margin.end(),
out_preds->tbegin(param.gpu_id));
} else {
@@ -384,12 +384,12 @@ class GPUPredictor : public xgboost::Predictor {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.size() != 0) {
if (y.Size() != 0) {
dh::safe_cuda(cudaSetDevice(param.gpu_id));
out_preds->resize(y.size(), 0.0f, param.gpu_id);
out_preds->Resize(y.Size(), 0.0f, param.gpu_id);
dh::safe_cuda(cudaMemcpy(
out_preds->ptr_d(param.gpu_id), y.ptr_d(param.gpu_id),
out_preds->size() * sizeof(bst_float), cudaMemcpyDefault));
out_preds->DevicePointer(param.gpu_id), y.DevicePointer(param.gpu_id),
out_preds->Size() * sizeof(bst_float), cudaMemcpyDefault));
return true;
}
}
@@ -409,9 +409,9 @@ class GPUPredictor : public xgboost::Predictor {
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.size() == 0) {
if (predictions.Size() == 0) {
// ensure that the device in predictions is correct
predictions.resize(0, 0.0f, param.gpu_id);
predictions.Resize(0, 0.0f, param.gpu_id);
cpu_predictor->PredictBatch(dmat, &predictions, model, 0,
static_cast<bst_uint>(model.trees.size()));
} else if (model.param.num_output_group == 1 && updaters->size() > 0 &&
@@ -462,7 +462,7 @@ class GPUPredictor : public xgboost::Predictor {
Predictor::Init(cfg, cache);
cpu_predictor->Init(cfg, cache);
param.InitAllowUnknown(cfg);
max_shared_memory_bytes = dh::max_shared_memory(param.gpu_id);
max_shared_memory_bytes = dh::MaxSharedMemory(param.gpu_id);
}
private:

View File

@@ -11,8 +11,9 @@ namespace xgboost {
void Predictor::Init(
const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) {
for (const std::shared_ptr<DMatrix>& d : cache)
for (const std::shared_ptr<DMatrix>& d : cache) {
cache_[d.get()].data = d;
}
}
Predictor* Predictor::Create(std::string name) {
auto* e = ::dmlc::Registry<PredictorReg>::Get()->Find(name);