Introducing DPC++-based plugin (predictor, objective function) supporting oneAPI programming model (#5825)

* Added plugin with DPC++-based predictor and objective function

* Update CMakeLists.txt

* Update regression_obj_oneapi.cc

* Added README.md for OneAPI plugin

* Added OneAPI predictor support to gbtree

* Update README.md

* Merged kernels in gradient computation. Enabled multiple loss functions with DPC++ backend

* Aligned plugin CMake files with latest master changes. Fixed whitespace typos

* Removed debug output

* [CI] Make oneapi_plugin a CMake target

* Added tests for OneAPI plugin for predictor and obj. functions

* Temporarily switched to default selector for device dispacthing in OneAPI plugin to enable execution in environments without gpus

* Updated readme file.

* Fixed USM usage in predictor

* Removed workaround with explicit templated names for DPC++ kernels

* Fixed warnings in plugin tests

* Fix CMake build of gtest

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Vladislav Epifanov 2020-08-09 04:40:40 +03:00 committed by GitHub
parent 7cf3e9be59
commit 388f975cf5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1223 additions and 1 deletions

View File

@ -60,6 +60,8 @@ address, leak, undefined and thread.")
## Plugins
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
## TODO: 1. Add check if DPC++ compiler is used for building
option(PLUGIN_UPDATER_ONEAPI "DPC++ updater" OFF)
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
#-- Checks for building XGBoost

View File

@ -6,3 +6,29 @@ endif (PLUGIN_LZ4)
if (PLUGIN_DENSE_PARSER)
target_sources(objxgboost PRIVATE ${xgboost_SOURCE_DIR}/plugin/dense_parser/dense_libsvm.cc)
endif (PLUGIN_DENSE_PARSER)
if (PLUGIN_UPDATER_ONEAPI)
add_library(oneapi_plugin OBJECT
${xgboost_SOURCE_DIR}/plugin/updater_oneapi/regression_obj_oneapi.cc
${xgboost_SOURCE_DIR}/plugin/updater_oneapi/predictor_oneapi.cc)
target_include_directories(oneapi_plugin
PRIVATE
${xgboost_SOURCE_DIR}/include
${xgboost_SOURCE_DIR}/dmlc-core/include
${xgboost_SOURCE_DIR}/rabit/include)
target_compile_definitions(oneapi_plugin PUBLIC -DXGBOOST_USE_ONEAPI=1)
target_link_libraries(oneapi_plugin PUBLIC -fsycl)
set_target_properties(oneapi_plugin PROPERTIES
COMPILE_FLAGS -fsycl
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
if (USE_OPENMP)
find_package(OpenMP REQUIRED)
target_link_libraries(oneapi_plugin PUBLIC OpenMP::OpenMP_CXX)
endif (USE_OPENMP)
# Get compilation and link flags of oneapi_plugin and propagate to objxgboost
target_link_libraries(objxgboost PUBLIC oneapi_plugin)
# Add all objects of oneapi_plugin to objxgboost
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:oneapi_plugin>)
endif (PLUGIN_UPDATER_ONEAPI)

42
plugin/updater_oneapi/README.md Executable file
View File

@ -0,0 +1,42 @@
# DPC++-based Algorithm for Tree Construction
This plugin adds support of OneAPI programming model for tree construction and prediction algorithms to XGBoost.
## Usage
Specify the 'objective' parameter as one of the following options to offload computation of objective function on OneAPI device.
### Algorithms
| objective | Description |
| --- | --- |
reg:squarederror_oneapi | regression with squared loss |
reg:squaredlogerror_oneapi | regression with root mean squared logarithmic loss |
reg:logistic_oneapi | logistic regression for probability regression task |
binary:logistic_oneapi | logistic regression for binary classification task |
binary:logitraw_oneapi | logistic regression for classification, output score before logistic transformation |
Specify the 'predictor' parameter as one of the following options to offload prediction stage on OneAPI device.
### Algorithms
| predictor | Description |
| --- | --- |
predictor_oneapi | prediction using OneAPI device |
Please note that parameter names are not finalized and can be changed during further integration of OneAPI support.
Python example:
```python
param['predictor'] = 'predictor_oneapi'
param['objective'] = 'reg:squarederror_oneapi'
```
## Dependencies
Building the plugin requires Data Parallel C++ Compiler (https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-compiler.html)
## Build
From the command line on Linux starting from the xgboost directory:
```bash
$ mkdir build
$ cd build
$ EXPORT CXX=dpcpp && cmake .. -DPLUGIN_UPDATER_ONEAPI=ON
$ make -j
```

View File

@ -0,0 +1,448 @@
/*!
* Copyright by Contributors 2017-2020
*/
#include <cstddef>
#include <limits>
#include <mutex>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/logging.h"
#include "xgboost/host_device_vector.h"
#include "../../src/data/adapter.h"
#include "../../src/common/math.h"
#include "../../src/gbm/gbtree_model.h"
#include "CL/sycl.hpp"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(predictor_oneapi);
/*! \brief Element from a sparse vector */
struct EntryOneAPI {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
EntryOneAPI() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
EntryOneAPI(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
EntryOneAPI(const Entry& entry) : index(entry.index), fvalue(entry.fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const EntryOneAPI& a, const EntryOneAPI& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const EntryOneAPI& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
struct DeviceMatrixOneAPI {
DMatrix* p_mat; // Pointer to the original matrix on the host
cl::sycl::queue qu_;
size_t* row_ptr;
size_t row_ptr_size;
EntryOneAPI* data;
DeviceMatrixOneAPI(DMatrix* dmat, cl::sycl::queue qu) : p_mat(dmat), qu_(qu) {
size_t num_row = 0;
size_t num_nonzero = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
const auto& data_vec = batch.data.HostVector();
const auto& offset_vec = batch.offset.HostVector();
num_nonzero += data_vec.size();
num_row += batch.Size();
}
row_ptr = cl::sycl::malloc_shared<size_t>(num_row + 1, qu_);
data = cl::sycl::malloc_shared<EntryOneAPI>(num_nonzero, qu_);
size_t data_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
const auto& data_vec = batch.data.HostVector();
const auto& offset_vec = batch.offset.HostVector();
size_t batch_size = batch.Size();
if (batch_size > 0) {
std::copy(offset_vec.data(), offset_vec.data() + batch_size,
row_ptr + batch.base_rowid);
if (batch.base_rowid > 0) {
for(size_t i = 0; i < batch_size; i++)
row_ptr[i + batch.base_rowid] += batch.base_rowid;
}
std::copy(data_vec.data(), data_vec.data() + offset_vec[batch_size],
data + data_offset);
data_offset += offset_vec[batch_size];
}
}
row_ptr[num_row] = data_offset;
row_ptr_size = num_row + 1;
}
~DeviceMatrixOneAPI() {
if (row_ptr) {
cl::sycl::free(row_ptr, qu_);
}
if (data) {
cl::sycl::free(data, qu_);
}
}
};
struct DeviceNodeOneAPI {
DeviceNodeOneAPI()
: fidx(-1), left_child_idx(-1), right_child_idx(-1) {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DeviceNodeOneAPI(const RegTree::Node& n) { // NOLINT
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
bool IsLeaf() const { return left_child_idx == -1; }
int GetFidx() const { return fidx & ((1U << 31) - 1U); }
bool MissingLeft() const { return (fidx >> 31) != 0; }
int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
float GetFvalue() const { return val.fvalue; }
float GetWeight() const { return val.leaf_weight; }
};
class DeviceModelOneAPI {
public:
cl::sycl::queue qu_;
DeviceNodeOneAPI* nodes;
size_t* tree_segments;
int* tree_group;
size_t tree_beg_;
size_t tree_end_;
int num_group;
DeviceModelOneAPI() : nodes(nullptr), tree_segments(nullptr), tree_group(nullptr) {}
~DeviceModelOneAPI() {
Reset();
}
void Reset() {
if (nodes)
cl::sycl::free(nodes, qu_);
if (tree_segments)
cl::sycl::free(tree_segments, qu_);
if (tree_group)
cl::sycl::free(tree_group, qu_);
}
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, cl::sycl::queue qu) {
qu_ = qu;
CHECK_EQ(model.param.size_leaf_vector, 0);
Reset();
tree_segments = cl::sycl::malloc_shared<size_t>((tree_end - tree_begin) + 1, qu_);
int sum = 0;
tree_segments[0] = sum;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees[tree_idx]->GetNodes().size();
tree_segments[tree_idx - tree_begin + 1] = sum;
}
nodes = cl::sycl::malloc_shared<DeviceNodeOneAPI>(sum, qu_);
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees[tree_idx]->GetNodes();
for (size_t node_idx = 0; node_idx < src_nodes.size(); node_idx++)
nodes[node_idx + tree_segments[tree_idx - tree_begin]] = src_nodes[node_idx];
}
tree_group = cl::sycl::malloc_shared<int>(model.tree_info.size(), qu_);
for (size_t tree_idx = 0; tree_idx < model.tree_info.size(); tree_idx++)
tree_group[tree_idx] = model.tree_info[tree_idx];
tree_beg_ = tree_begin;
tree_end_ = tree_end;
num_group = model.learner_model_param->num_output_group;
}
};
float GetFvalue(int ridx, int fidx, EntryOneAPI* data, size_t* row_ptr, bool& is_missing) {
// Binary search
auto begin_ptr = data + row_ptr[ridx];
auto end_ptr = data + row_ptr[ridx + 1];
EntryOneAPI* previous_middle = nullptr;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
is_missing = false;
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
is_missing = true;
return 0.0;
}
float GetLeafWeight(int ridx, const DeviceNodeOneAPI* tree, EntryOneAPI* data, size_t* row_ptr) {
DeviceNodeOneAPI n = tree[0];
int node_id = 0;
bool is_missing;
while (!n.IsLeaf()) {
float fvalue = GetFvalue(ridx, n.GetFidx(), data, row_ptr, is_missing);
// Missing value
if (is_missing) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
node_id = n.left_child_idx;
n = tree[n.left_child_idx];
} else {
node_id = n.right_child_idx;
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
class PredictorOneAPI : public Predictor {
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
CHECK_NE(model.learner_model_param->num_output_group, 0);
size_t n = model.learner_model_param->num_output_group * info.num_row_;
const auto& base_margin = info.base_margin_.HostVector();
out_preds->Resize(n);
std::vector<bst_float>& out_preds_h = out_preds->HostVector();
if (base_margin.size() == n) {
CHECK_EQ(out_preds->Size(), n);
std::copy(base_margin.begin(), base_margin.end(), out_preds_h.begin());
} else {
if (!base_margin.empty()) {
std::ostringstream oss;
oss << "Ignoring the base margin, since it has incorrect length. "
<< "The base margin must be an array of length ";
if (model.learner_model_param->num_output_group > 1) {
oss << "[num_class] * [number of data points], i.e. "
<< model.learner_model_param->num_output_group << " * " << info.num_row_
<< " = " << n << ". ";
} else {
oss << "[number of data points], i.e. " << info.num_row_ << ". ";
}
oss << "Instead, all data points will use "
<< "base_score = " << model.learner_model_param->base_score;
LOG(WARNING) << oss.str();
}
std::fill(out_preds_h.begin(), out_preds_h.end(),
model.learner_model_param->base_score);
}
}
void DevicePredictInternal(DeviceMatrixOneAPI* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) {
return;
}
model_.Init(model, tree_begin, tree_end, qu_);
auto& out_preds_vec = out_preds->HostVector();
DeviceNodeOneAPI* nodes = model_.nodes;
cl::sycl::buffer<float, 1> out_preds_buf(out_preds_vec.data(), out_preds_vec.size());
size_t* tree_segments = model_.tree_segments;
int* tree_group = model_.tree_group;
size_t* row_ptr = dmat->row_ptr;
EntryOneAPI* data = dmat->data;
int num_features = dmat->p_mat->Info().num_col_;
int num_rows = dmat->row_ptr_size - 1;
int num_group = model.learner_model_param->num_output_group;
qu_.submit([&](cl::sycl::handler& cgh) {
auto out_predictions = out_preds_buf.get_access<cl::sycl::access::mode::read_write>(cgh);
cgh.parallel_for<class PredictInternal>(cl::sycl::range<1>(num_rows), [=](cl::sycl::id<1> pid) {
int global_idx = pid[0];
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0.0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DeviceNodeOneAPI* tree = nodes + tree_segments[tree_idx - tree_begin];
sum += GetLeafWeight(global_idx, tree, data, row_ptr);
}
out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DeviceNodeOneAPI* tree = nodes + tree_segments[tree_idx - tree_begin];
int out_prediction_idx = global_idx * num_group + tree_group[tree_idx];
out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, tree, data, row_ptr);
}
}
});
}).wait();
}
public:
explicit PredictorOneAPI(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param}, cpu_predictor(Predictor::Create("cpu_predictor", generic_param)) {
cl::sycl::default_selector selector;
qu_ = cl::sycl::queue(selector);
}
// ntree_limit is a very problematic parameter, as it's ambiguous in the context of
// multi-output and forest. Same problem exists for tree_begin
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, int tree_begin,
uint32_t const ntree_limit = 0) override {
if (this->device_matrix_cache_.find(dmat) ==
this->device_matrix_cache_.end()) {
this->device_matrix_cache_.emplace(
dmat, std::unique_ptr<DeviceMatrixOneAPI>(
new DeviceMatrixOneAPI(dmat, qu_)));
}
DeviceMatrixOneAPI* device_matrix = device_matrix_cache_.find(dmat)->second.get();
// tree_begin is not used, right now we just enforce it to be 0.
CHECK_EQ(tree_begin, 0);
auto* out_preds = &predts->predictions;
CHECK_GE(predts->version, tree_begin);
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (predts->version == 0) {
// out_preds->Size() can be non-zero as it's initialized here before any tree is
// built at the 0^th iterator.
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
uint32_t const output_groups = model.learner_model_param->num_output_group;
CHECK_NE(output_groups, 0);
// Right now we just assume ntree_limit provided by users means number of tree layers
// in the context of multi-output model
uint32_t real_ntree_limit = ntree_limit * output_groups;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups;
// When users have provided ntree_limit, end_version can be lesser, cache is violated
if (predts->version > end_version) {
CHECK_NE(ntree_limit, 0);
this->InitOutPredictions(dmat->Info(), out_preds, model);
predts->version = 0;
}
uint32_t const beg_version = predts->version;
CHECK_LE(beg_version, end_version);
if (beg_version < end_version) {
DevicePredictInternal(device_matrix, out_preds, model,
beg_version * output_groups,
end_version * output_groups);
}
// delta means {size of forest} * {number of newly accumulated layers}
uint32_t delta = end_version - beg_version;
CHECK_LE(delta, model.trees.size());
predts->Update(delta);
CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ ||
out_preds->Size() == dmat->Info().num_row_);
}
void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
float missing, PredictionCacheEntry *out_preds,
uint32_t tree_begin, unsigned tree_end) const override {
cpu_predictor->InplacePredict(x, model, missing, out_preds, tree_begin, tree_end);
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
cpu_predictor->PredictInstance(inst, out_preds, model, ntree_limit);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
cpu_predictor->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, uint32_t ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor->PredictContribution(p_fmat, out_contribs, model, ntree_limit, tree_weights, approximate, condition, condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate) override {
cpu_predictor->PredictInteractionContributions(p_fmat, out_contribs, model, ntree_limit, tree_weights, approximate);
}
private:
cl::sycl::queue qu_;
DeviceModelOneAPI model_;
std::mutex lock_;
std::unique_ptr<Predictor> cpu_predictor;
std::unordered_map<DMatrix*, std::unique_ptr<DeviceMatrixOneAPI>>
device_matrix_cache_;
};
XGBOOST_REGISTER_PREDICTOR(PredictorOneAPI, "oneapi_predictor")
.describe("Make predictions using DPC++.")
.set_body([](GenericParameter const* generic_param) {
return new PredictorOneAPI(generic_param);
});
} // namespace predictor
} // namespace xgboost

View File

@ -0,0 +1,145 @@
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#ifndef XGBOOST_OBJECTIVE_REGRESSION_LOSS_ONEAPI_H_
#define XGBOOST_OBJECTIVE_REGRESSION_LOSS_ONEAPI_H_
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <algorithm>
#include "CL/sycl.hpp"
namespace xgboost {
namespace obj {
/*!
* \brief calculate the sigmoid of the input.
* \param x input parameter
* \return the transformed value.
*/
inline float SigmoidOneAPI(float x) {
return 1.0f / (1.0f + cl::sycl::exp(-x));
}
// common regressions
// linear regression
struct LinearSquareLossOneAPI {
static bst_float PredTransform(bst_float x) { return x; }
static bool CheckLabel(bst_float x) { return true; }
static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
return predt - label;
}
static bst_float SecondOrderGradient(bst_float predt, bst_float label) {
return 1.0f;
}
static bst_float ProbToMargin(bst_float base_score) { return base_score; }
static const char* LabelErrorMsg() { return ""; }
static const char* DefaultEvalMetric() { return "rmse"; }
static const char* Name() { return "reg:squarederror_oneapi"; }
};
// TODO: DPC++ does not fully support std math inside offloaded kernels
struct SquaredLogErrorOneAPI {
static bst_float PredTransform(bst_float x) { return x; }
static bool CheckLabel(bst_float label) {
return label > -1;
}
static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
predt = std::max(predt, (bst_float)(-1 + 1e-6)); // ensure correct value for log1p
return (cl::sycl::log1p(predt) - cl::sycl::log1p(label)) / (predt + 1);
}
static bst_float SecondOrderGradient(bst_float predt, bst_float label) {
predt = std::max(predt, (bst_float)(-1 + 1e-6));
float res = (-cl::sycl::log1p(predt) + cl::sycl::log1p(label) + 1) /
cl::sycl::pow(predt + 1, (bst_float)2);
res = std::max(res, (bst_float)1e-6f);
return res;
}
static bst_float ProbToMargin(bst_float base_score) { return base_score; }
static const char* LabelErrorMsg() {
return "label must be greater than -1 for rmsle so that log(label + 1) can be valid.";
}
static const char* DefaultEvalMetric() { return "rmsle"; }
static const char* Name() { return "reg:squaredlogerror_oneapi"; }
};
// logistic loss for probability regression task
struct LogisticRegressionOneAPI {
// duplication is necessary, as __device__ specifier
// cannot be made conditional on template parameter
static bst_float PredTransform(bst_float x) { return SigmoidOneAPI(x); }
static bool CheckLabel(bst_float x) { return x >= 0.0f && x <= 1.0f; }
static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
return predt - label;
}
static bst_float SecondOrderGradient(bst_float predt, bst_float label) {
const bst_float eps = 1e-16f;
return std::max(predt * (1.0f - predt), eps);
}
template <typename T>
static T PredTransform(T x) { return SigmoidOneAPI(x); }
template <typename T>
static T FirstOrderGradient(T predt, T label) { return predt - label; }
template <typename T>
static T SecondOrderGradient(T predt, T label) {
const T eps = T(1e-16f);
return std::max(predt * (T(1.0f) - predt), eps);
}
static bst_float ProbToMargin(bst_float base_score) {
CHECK(base_score > 0.0f && base_score < 1.0f)
<< "base_score must be in (0,1) for logistic loss, got: " << base_score;
return -logf(1.0f / base_score - 1.0f);
}
static const char* LabelErrorMsg() {
return "label must be in [0,1] for logistic regression";
}
static const char* DefaultEvalMetric() { return "rmse"; }
static const char* Name() { return "reg:logistic_oneapi"; }
};
// logistic loss for binary classification task
struct LogisticClassificationOneAPI : public LogisticRegressionOneAPI {
static const char* DefaultEvalMetric() { return "error"; }
static const char* Name() { return "binary:logistic_oneapi"; }
};
// logistic loss, but predict un-transformed margin
struct LogisticRawOneAPI : public LogisticRegressionOneAPI {
// duplication is necessary, as __device__ specifier
// cannot be made conditional on template parameter
static bst_float PredTransform(bst_float x) { return x; }
static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
predt = SigmoidOneAPI(predt);
return predt - label;
}
static bst_float SecondOrderGradient(bst_float predt, bst_float label) {
const bst_float eps = 1e-16f;
predt = SigmoidOneAPI(predt);
return std::max(predt * (1.0f - predt), eps);
}
template <typename T>
static T PredTransform(T x) { return x; }
template <typename T>
static T FirstOrderGradient(T predt, T label) {
predt = SigmoidOneAPI(predt);
return predt - label;
}
template <typename T>
static T SecondOrderGradient(T predt, T label) {
const T eps = T(1e-16f);
predt = SigmoidOneAPI(predt);
return std::max(predt * (T(1.0f) - predt), eps);
}
static const char* DefaultEvalMetric() { return "auc"; }
static const char* Name() { return "binary:logitraw_oneapi"; }
};
} // namespace obj
} // namespace xgboost
#endif // XGBOOST_OBJECTIVE_REGRESSION_LOSS_ONEAPI_H_

View File

@ -0,0 +1,182 @@
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <cmath>
#include <memory>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "../../src/common/transform.h"
#include "../../src/common/common.h"
#include "./regression_loss_oneapi.h"
#include "CL/sycl.hpp"
namespace xgboost {
namespace obj {
DMLC_REGISTRY_FILE_TAG(regression_obj_oneapi);
struct RegLossParamOneAPI : public XGBoostParameter<RegLossParamOneAPI> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParamOneAPI) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObjOneAPI : public ObjFunction {
protected:
HostDeviceVector<int> label_correct_;
public:
RegLossObjOneAPI() = default;
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
cl::sycl::default_selector selector;
qu_ = cl::sycl::queue(selector);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
if (info.labels_.Size() == 0U) {
LOG(WARNING) << "Label set is empty.";
}
CHECK_EQ(preds.Size(), info.labels_.Size())
<< " " << "labels are not correctly provided"
<< "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", "
<< "Loss: " << Loss::Name();
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
// TODO: add label_correct check
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
cl::sycl::buffer<bst_float, 1> preds_buf(preds.HostPointer(), preds.Size());
cl::sycl::buffer<bst_float, 1> labels_buf(info.labels_.HostPointer(), info.labels_.Size());
cl::sycl::buffer<GradientPair, 1> out_gpair_buf(out_gpair->HostPointer(), out_gpair->Size());
cl::sycl::buffer<bst_float, 1> weights_buf(is_null_weight ? NULL : info.weights_.HostPointer(),
is_null_weight ? 1 : info.weights_.Size());
cl::sycl::buffer<int, 1> additional_input_buf(1);
{
auto additional_input_acc = additional_input_buf.get_access<cl::sycl::access::mode::write>();
additional_input_acc[0] = 1; // Fill the label_correct flag
}
auto scale_pos_weight = param_.scale_pos_weight;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
qu_.submit([&](cl::sycl::handler& cgh) {
auto preds_acc = preds_buf.get_access<cl::sycl::access::mode::read>(cgh);
auto labels_acc = labels_buf.get_access<cl::sycl::access::mode::read>(cgh);
auto weights_acc = weights_buf.get_access<cl::sycl::access::mode::read>(cgh);
auto out_gpair_acc = out_gpair_buf.get_access<cl::sycl::access::mode::write>(cgh);
auto additional_input_acc = additional_input_buf.get_access<cl::sycl::access::mode::write>(cgh);
cgh.parallel_for<>(cl::sycl::range<1>(ndata), [=](cl::sycl::id<1> pid) {
int idx = pid[0];
bst_float p = Loss::PredTransform(preds_acc[idx]);
bst_float w = is_null_weight ? 1.0f : weights_acc[idx];
bst_float label = labels_acc[idx];
if (label == 1.0f) {
w *= scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
additional_input_acc[0] = 0;
}
out_gpair_acc[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
});
}).wait();
int flag = 1;
{
auto additional_input_acc = additional_input_buf.get_access<cl::sycl::access::mode::read>();
flag = additional_input_acc[0];
}
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) override {
size_t const ndata = io_preds->Size();
cl::sycl::buffer<bst_float, 1> io_preds_buf(io_preds->HostPointer(), io_preds->Size());
qu_.submit([&](cl::sycl::handler& cgh) {
auto io_preds_acc = io_preds_buf.get_access<cl::sycl::access::mode::read_write>(cgh);
cgh.parallel_for<>(cl::sycl::range<1>(ndata), [=](cl::sycl::id<1> pid) {
int idx = pid[0];
io_preds_acc[idx] = Loss::PredTransform(io_preds_acc[idx]);
});
}).wait();
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], &param_);
}
protected:
RegLossParamOneAPI param_;
cl::sycl::queue qu_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParamOneAPI);
// TODO: Find a better way to dispatch names of DPC++ kernels with various template parameters of loss function
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegressionOneAPI, LinearSquareLossOneAPI::Name())
.describe("Regression with squared error with DPC++ backend.")
.set_body([]() { return new RegLossObjOneAPI<LinearSquareLossOneAPI>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogErrorOneAPI, SquaredLogErrorOneAPI::Name())
.describe("Regression with root mean squared logarithmic error with DPC++ backend.")
.set_body([]() { return new RegLossObjOneAPI<SquaredLogErrorOneAPI>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegressionOneAPI, LogisticRegressionOneAPI::Name())
.describe("Logistic regression for probability regression task with DPC++ backend.")
.set_body([]() { return new RegLossObjOneAPI<LogisticRegressionOneAPI>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassificationOneAPI, LogisticClassificationOneAPI::Name())
.describe("Logistic regression for binary classification task with DPC++ backend.")
.set_body([]() { return new RegLossObjOneAPI<LogisticClassificationOneAPI>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRawOneAPI, LogisticRawOneAPI::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation with DPC++ backend.")
.set_body([]() { return new RegLossObjOneAPI<LogisticRawOneAPI>(); });
} // namespace obj
} // namespace xgboost

View File

@ -154,6 +154,12 @@ inline void AssertGPUSupport() {
#endif // XGBOOST_USE_CUDA
}
inline void AssertOneAPISupport() {
#ifndef XGBOOST_USE_ONEAPI
LOG(FATAL) << "XGBoost version not compiled with OneAPI support.";
#endif // XGBOOST_USE_ONEAPI
}
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COMMON_H_

View File

@ -62,6 +62,14 @@ void GBTree::Configure(const Args& cfg) {
}
#endif // defined(XGBOOST_USE_CUDA)
#if defined(XGBOOST_USE_ONEAPI)
if (!oneapi_predictor_) {
oneapi_predictor_ = std::unique_ptr<Predictor>(
Predictor::Create("oneapi_predictor", this->generic_param_));
}
oneapi_predictor_->Configure(cfg);
#endif // defined(XGBOOST_USE_ONEAPI)
monitor_.Init("GBTree");
specified_updater_ = std::any_of(cfg.cbegin(), cfg.cend(),
@ -413,6 +421,14 @@ GBTree::GetPredictor(HostDeviceVector<float> const *out_pred,
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
if (tparam_.predictor == PredictorType::kOneAPIPredictor) {
#if defined(XGBOOST_USE_ONEAPI)
CHECK(oneapi_predictor_);
return oneapi_predictor_;
#else
common::AssertOneAPISupport();
#endif // defined(XGBOOST_USE_ONEAPI)
}
CHECK(cpu_predictor_);
return cpu_predictor_;

View File

@ -45,7 +45,8 @@ enum class TreeProcessType : int {
enum class PredictorType : int {
kAuto = 0,
kCPUPredictor,
kGPUPredictor
kGPUPredictor,
kOneAPIPredictor
};
} // namespace xgboost
@ -93,6 +94,7 @@ struct GBTreeTrainParam : public XGBoostParameter<GBTreeTrainParam> {
.add_enum("auto", PredictorType::kAuto)
.add_enum("cpu_predictor", PredictorType::kCPUPredictor)
.add_enum("gpu_predictor", PredictorType::kGPUPredictor)
.add_enum("oneapi_predictor", PredictorType::kOneAPIPredictor)
.describe("Predictor algorithm type");
DMLC_DECLARE_FIELD(tree_method)
.set_default(TreeMethod::kAuto)
@ -292,6 +294,9 @@ class GBTree : public GradientBooster {
#if defined(XGBOOST_USE_CUDA)
std::unique_ptr<Predictor> gpu_predictor_;
#endif // defined(XGBOOST_USE_CUDA)
#if defined(XGBOOST_USE_ONEAPI)
std::unique_ptr<Predictor> oneapi_predictor_;
#endif // defined(XGBOOST_USE_ONEAPI)
common::Monitor monitor_;
};

View File

@ -12,6 +12,12 @@ if (USE_CUDA)
file(GLOB_RECURSE CUDA_TEST_SOURCES "*.cu")
list(APPEND TEST_SOURCES ${CUDA_TEST_SOURCES})
endif (USE_CUDA)
file(GLOB_RECURSE ONEAPI_TEST_SOURCES "plugin/*_oneapi.cc")
if (NOT PLUGIN_UPDATER_ONEAPI)
list(REMOVE_ITEM TEST_SOURCES ${ONEAPI_TEST_SOURCES})
endif (NOT PLUGIN_UPDATER_ONEAPI)
add_executable(testxgboost ${TEST_SOURCES}
${xgboost_SOURCE_DIR}/plugin/example/custom_obj.cc)
target_link_libraries(testxgboost PRIVATE objxgboost)

View File

@ -0,0 +1,168 @@
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <xgboost/predictor.h>
#include "../helpers.h"
#include "../predictor/test_predictor.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/data/adapter.h"
namespace xgboost {
TEST(Plugin, OneAPIPredictorBasic) {
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> oneapi_predictor =
std::unique_ptr<Predictor>(Predictor::Create("oneapi_predictor", &lparam));
int kRows = 5;
int kCols = 5;
LearnerModelParam param;
param.num_feature = kCols;
param.base_score = 0.0;
param.num_output_group = 1;
gbm::GBTreeModel model = CreateTestModel(&param);
auto dmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
// Test predict batch
PredictionCacheEntry out_predictions;
oneapi_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
ASSERT_EQ(model.trees.size(), out_predictions.version);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();
for (size_t i = 0; i < out_predictions.predictions.Size(); i++) {
ASSERT_EQ(out_predictions_h[i], 1.5);
}
// Test predict instance
auto const &batch = *dmat->GetBatches<xgboost::SparsePage>().begin();
for (size_t i = 0; i < batch.Size(); i++) {
std::vector<float> instance_out_predictions;
oneapi_predictor->PredictInstance(batch[i], &instance_out_predictions, model);
ASSERT_EQ(instance_out_predictions[0], 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
oneapi_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
for (auto v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
oneapi_predictor->PredictContribution(dmat.get(), &out_contribution, model);
ASSERT_EQ(out_contribution.size(), kRows * (kCols + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i+1) % (kCols+1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
}
}
// Test predict contribution (approximate method)
oneapi_predictor->PredictContribution(dmat.get(), &out_contribution, model, 0, nullptr, true);
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i+1) % (kCols+1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
}
}
}
TEST(Plugin, OneAPIPredictorExternalMemory) {
dmlc::TemporaryDirectory tmpdir;
std::string filename = tmpdir.path + "/big.libsvm";
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(12, 64, filename);
auto lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> oneapi_predictor =
std::unique_ptr<Predictor>(Predictor::Create("oneapi_predictor", &lparam));
LearnerModelParam param;
param.base_score = 0;
param.num_feature = dmat->Info().num_col_;
param.num_output_group = 1;
gbm::GBTreeModel model = CreateTestModel(&param);
// Test predict batch
PredictionCacheEntry out_predictions;
oneapi_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
std::vector<float> &out_predictions_h = out_predictions.predictions.HostVector();
ASSERT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_);
for (const auto& v : out_predictions_h) {
ASSERT_EQ(v, 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
oneapi_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
ASSERT_EQ(leaf_out_predictions.size(), dmat->Info().num_row_);
for (const auto& v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
oneapi_predictor->PredictContribution(dmat.get(), &out_contribution, model);
ASSERT_EQ(out_contribution.size(), dmat->Info().num_row_ * (dmat->Info().num_col_ + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i + 1) % (dmat->Info().num_col_ + 1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
}
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
oneapi_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, 0, nullptr, true);
ASSERT_EQ(out_contribution_approximate.size(),
dmat->Info().num_row_ * (dmat->Info().num_col_ + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i + 1) % (dmat->Info().num_col_ + 1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
}
}
}
TEST(Plugin, OneAPIPredictorInplacePredict) {
bst_row_t constexpr kRows{128};
bst_feature_t constexpr kCols{64};
auto gen = RandomDataGenerator{kRows, kCols, 0.5}.Device(-1);
{
HostDeviceVector<float> data;
gen.GenerateDense(&data);
ASSERT_EQ(data.Size(), kRows * kCols);
std::shared_ptr<data::DenseAdapter> x{
new data::DenseAdapter(data.HostPointer(), kRows, kCols)};
TestInplacePrediction(x, "oneapi_predictor", kRows, kCols, -1);
}
{
HostDeviceVector<float> data;
HostDeviceVector<bst_row_t> rptrs;
HostDeviceVector<bst_feature_t> columns;
gen.GenerateCSR(&data, &rptrs, &columns);
std::shared_ptr<data::CSRAdapter> x{new data::CSRAdapter(
rptrs.HostPointer(), columns.HostPointer(), data.HostPointer(), kRows,
data.Size(), kCols)};
TestInplacePrediction(x, "oneapi_predictor", kRows, kCols, -1);
}
}
} // namespace xgboost

View File

@ -0,0 +1,176 @@
/*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <xgboost/objective.h>
#include <xgboost/generic_parameters.h>
#include <xgboost/json.h>
#include "../helpers.h"
namespace xgboost {
TEST(Plugin, LinearRegressionGPairOneAPI) {
GenericParameter tparam = CreateEmptyGenericParam(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("reg:squarederror_oneapi", &tparam)
};
obj->Configure(args);
CheckObjFunction(obj,
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
CheckObjFunction(obj,
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{}, // empty weight
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
ASSERT_NO_THROW(obj->DefaultEvalMetric());
}
TEST(Plugin, SquaredLogOneAPI) {
GenericParameter tparam = CreateEmptyGenericParam(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj { ObjFunction::Create("reg:squaredlogerror_oneapi", &tparam) };
obj->Configure(args);
CheckConfigReload(obj, "reg:squaredlogerror_oneapi");
CheckObjFunction(obj,
{0.1f, 0.2f, 0.4f, 0.8f, 1.6f}, // pred
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f}, // labels
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f}, // weights
{-0.5435f, -0.4257f, -0.25475f, -0.05855f, 0.1009f},
{ 1.3205f, 1.0492f, 0.69215f, 0.34115f, 0.1091f});
CheckObjFunction(obj,
{0.1f, 0.2f, 0.4f, 0.8f, 1.6f}, // pred
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f}, // labels
{}, // empty weights
{-0.5435f, -0.4257f, -0.25475f, -0.05855f, 0.1009f},
{ 1.3205f, 1.0492f, 0.69215f, 0.34115f, 0.1091f});
ASSERT_EQ(obj->DefaultEvalMetric(), std::string{"rmsle"});
}
TEST(Plugin, LogisticRegressionGPairOneAPI) {
GenericParameter tparam = CreateEmptyGenericParam(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj { ObjFunction::Create("reg:logistic_oneapi", &tparam) };
obj->Configure(args);
CheckConfigReload(obj, "reg:logistic_oneapi");
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1}, // preds
{ 0, 0, 0, 0, 1, 1, 1, 1}, // labels
{ 1, 1, 1, 1, 1, 1, 1, 1}, // weights
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f}, // out_grad
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f}); // out_hess
}
TEST(Plugin, LogisticRegressionBasicOneAPI) {
GenericParameter lparam = CreateEmptyGenericParam(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("reg:logistic_oneapi", &lparam)
};
obj->Configure(args);
CheckConfigReload(obj, "reg:logistic_oneapi");
// test label validation
EXPECT_ANY_THROW(CheckObjFunction(obj, {0}, {10}, {1}, {0}, {0}))
<< "Expected error when label not in range [0,1f] for LogisticRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.197f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), 0, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), 2.197f, 0.01f);
EXPECT_ANY_THROW(obj->ProbToMargin(10))
<< "Expected error when base_score not in range [0,1f] for LogisticRegression";
// test PredTransform
HostDeviceVector<bst_float> io_preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<bst_float> out_preds = {0.5f, 0.524f, 0.622f, 0.710f, 0.731f};
obj->PredTransform(&io_preds);
auto& preds = io_preds.HostVector();
for (int i = 0; i < static_cast<int>(io_preds.Size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
}
TEST(Plugin, LogisticRawGPairOneAPI) {
GenericParameter lparam = CreateEmptyGenericParam(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("binary:logitraw_oneapi", &lparam)
};
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
}
TEST(Plugin, CPUvsOneAPI) {
GenericParameter lparam = CreateEmptyGenericParam(0);
ObjFunction * obj_cpu =
ObjFunction::Create("reg:squarederror", &lparam);
ObjFunction * obj_oneapi =
ObjFunction::Create("reg:squarederror_oneapi", &lparam);
HostDeviceVector<GradientPair> cpu_out_preds;
HostDeviceVector<GradientPair> oneapi_out_preds;
constexpr size_t kRows = 400;
constexpr size_t kCols = 100;
auto pdmat = RandomDataGenerator(kRows, kCols, 0).Seed(0).GenerateDMatrix();
HostDeviceVector<float> preds;
preds.Resize(kRows);
auto& h_preds = preds.HostVector();
for (size_t i = 0; i < h_preds.size(); ++i) {
h_preds[i] = static_cast<float>(i);
}
auto& info = pdmat->Info();
info.labels_.Resize(kRows);
auto& h_labels = info.labels_.HostVector();
for (size_t i = 0; i < h_labels.size(); ++i) {
h_labels[i] = 1 / static_cast<float>(i+1);
}
{
// CPU
lparam.gpu_id = -1;
obj_cpu->GetGradient(preds, info, 0, &cpu_out_preds);
}
{
// oneapi
lparam.gpu_id = 0;
obj_oneapi->GetGradient(preds, info, 0, &oneapi_out_preds);
}
auto& h_cpu_out = cpu_out_preds.HostVector();
auto& h_oneapi_out = oneapi_out_preds.HostVector();
float sgrad = 0;
float shess = 0;
for (size_t i = 0; i < kRows; ++i) {
sgrad += std::pow(h_cpu_out[i].GetGrad() - h_oneapi_out[i].GetGrad(), 2);
shess += std::pow(h_cpu_out[i].GetHess() - h_oneapi_out[i].GetHess(), 2);
}
ASSERT_NEAR(sgrad, 0.0f, kRtEps);
ASSERT_NEAR(shess, 0.0f, kRtEps);
delete obj_cpu;
delete obj_oneapi;
}
} // namespace xgboost