Implement transform to reduce CPU/GPU code duplication. (#3643)

* Implement Transform class.
* Add tests for softmax.
* Use Transform in regression, softmax and hinge objectives, except for Cox.
* Mark old gpu objective functions deprecated.
* static_assert for softmax.
* Split up multi-gpu tests.
This commit is contained in:
trivialfis
2018-10-02 15:06:21 +13:00
committed by Rory Mitchell
parent 87aca8c244
commit d594b11f35
31 changed files with 1514 additions and 997 deletions

View File

@@ -14,7 +14,7 @@ struct WriteSymbolFunction {
WriteSymbolFunction(CompressedBufferWriter cbw, unsigned char* buffer_data_d,
int* input_data_d)
: cbw(cbw), buffer_data_d(buffer_data_d), input_data_d(input_data_d) {}
__device__ void operator()(size_t i) {
cbw.AtomicWriteSymbol(buffer_data_d, input_data_d[i], i);
}
@@ -28,7 +28,7 @@ struct ReadSymbolFunction {
__device__ void operator()(size_t i) {
output_data_d[i] = ci[i];
}
}
};
TEST(CompressedIterator, TestGPU) {

View File

@@ -10,7 +10,7 @@
namespace xgboost {
namespace common {
TEST(gpu_hist_util, TestDeviceSketch) {
void TestDeviceSketch(const GPUSet& devices) {
// create the data
int nrows = 10001;
std::vector<float> test_data(nrows);
@@ -28,7 +28,7 @@ TEST(gpu_hist_util, TestDeviceSketch) {
tree::TrainParam p;
p.max_bin = 20;
p.gpu_id = 0;
p.n_gpus = GPUSet::AllVisible().Size();
p.n_gpus = devices.Size();
// ensure that the exact quantiles are found
p.gpu_batch_nrows = nrows * 10;
@@ -54,5 +54,17 @@ TEST(gpu_hist_util, TestDeviceSketch) {
delete dmat;
}
TEST(gpu_hist_util, DeviceSketch) {
TestDeviceSketch(GPUSet::Range(0, 1));
}
#if defined(XGBOOST_USE_NCCL)
TEST(gpu_hist_util, MGPU_DeviceSketch) {
auto devices = GPUSet::AllVisible();
CHECK_GT(devices.Size(), 1);
TestDeviceSketch(devices);
}
#endif
} // namespace common
} // namespace xgboost

View File

@@ -178,18 +178,57 @@ TEST(HostDeviceVector, TestCopy) {
SetCudaSetDeviceHandler(nullptr);
}
// The test is not really useful if n_gpus < 2
TEST(HostDeviceVector, Reshard) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto devices = GPUSet::Range(0, 1);
vec.Reshard(devices);
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
ASSERT_EQ(vec.Size(), h_vec.size());
auto span = vec.DeviceSpan(0); // sync to device
vec.Reshard(GPUSet::Empty()); // pull back to cpu, empty devices.
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_TRUE(vec.Devices().IsEmpty());
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.Reshard(GPUSet{0, 1});
auto span = vec.DeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.DevicePointer(0), span.data());
auto const_span = vec.ConstDeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.ConstDevicePointer(0), span.data());
}
// Multi-GPUs' test
#if defined(XGBOOST_USE_NCCL)
TEST(HostDeviceVector, MGPU_Reshard) {
auto devices = GPUSet::AllVisible();
if (devices.Size() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
// Data size for each device.
std::vector<size_t> devices_size (devices.Size());
// From CPU to GPUs.
// Assuming we have > 1 devices.
vec.Reshard(devices);
size_t total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
@@ -198,42 +237,26 @@ TEST(HostDeviceVector, Reshard) {
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
vec.Reshard(GPUSet::Empty()); // clear out devices memory
// Reshard from devices to devices with different distribution.
EXPECT_ANY_THROW(
vec.Reshard(GPUDistribution::Granular(devices, 12)));
// Shrink down the number of devices.
vec.Reshard(GPUSet::Range(0, 1));
// All data is drawn back to CPU
vec.Reshard(GPUSet::Empty());
ASSERT_TRUE(vec.Devices().IsEmpty());
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
vec.Reshard(GPUSet::Empty()); // clear out devices memory
// Grow the number of devices.
vec.Reshard(devices);
vec.Reshard(GPUDistribution::Granular(devices, 12));
total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
total_size += vec.DeviceSize(i);
ASSERT_EQ(devices_size[i], vec.DeviceSize(i));
devices_size[i] = vec.DeviceSize(i);
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.Reshard(GPUSet{0, 1});
auto span = vec.DeviceSpan(0);
ASSERT_EQ(vec.Size(), span.size());
ASSERT_EQ(vec.DevicePointer(0), span.data());
auto const_span = vec.ConstDeviceSpan(0);
ASSERT_EQ(vec.Size(), span.size());
ASSERT_EQ(vec.ConstDevicePointer(0), span.data());
}
#endif
} // namespace common
} // namespace xgboost

View File

@@ -7,6 +7,14 @@
#include "../../include/xgboost/base.h"
#include "../../../src/common/span.h"
template <typename Iter>
XGBOOST_DEVICE void InitializeRange(Iter _begin, Iter _end) {
float j = 0;
for (Iter i = _begin; i != _end; ++i, ++j) {
*i = j;
}
}
namespace xgboost {
namespace common {
@@ -20,14 +28,6 @@ namespace common {
*(status) = -1; \
}
template <typename Iter>
XGBOOST_DEVICE void InitializeRange(Iter _begin, Iter _end) {
float j = 0;
for (Iter i = _begin; i != _end; ++i, ++j) {
*i = j;
}
}
struct TestTestStatus {
int * status_;

View File

@@ -0,0 +1,61 @@
#include <xgboost/base.h>
#include <gtest/gtest.h>
#include <vector>
#include "../../../src/common/host_device_vector.h"
#include "../../../src/common/transform.h"
#include "../../../src/common/span.h"
#include "../helpers.h"
#if defined(__CUDACC__)
#define TRANSFORM_GPU_RANGE GPUSet::Range(0, 1)
#define TRANSFORM_GPU_DIST GPUDistribution::Block(GPUSet::Range(0, 1))
#else
#define TRANSFORM_GPU_RANGE GPUSet::Empty()
#define TRANSFORM_GPU_DIST GPUDistribution::Block(GPUSet::Empty())
#endif
template <typename Iter>
void InitializeRange(Iter _begin, Iter _end) {
float j = 0;
for (Iter i = _begin; i != _end; ++i, ++j) {
*i = j;
}
}
namespace xgboost {
namespace common {
template <typename T>
struct TestTransformRange {
void XGBOOST_DEVICE operator()(size_t _idx,
Span<bst_float> _out, Span<const bst_float> _in) {
_out[_idx] = _in[_idx];
}
};
TEST(Transform, DeclareUnifiedTest(Basic)) {
const size_t size {256};
std::vector<bst_float> h_in(size);
std::vector<bst_float> h_out(size);
InitializeRange(h_in.begin(), h_in.end());
std::vector<bst_float> h_sol(size);
InitializeRange(h_sol.begin(), h_sol.end());
const HostDeviceVector<bst_float> in_vec{h_in, TRANSFORM_GPU_DIST};
HostDeviceVector<bst_float> out_vec{h_out, TRANSFORM_GPU_DIST};
out_vec.Fill(0);
Transform<>::Init(TestTransformRange<bst_float>{}, Range{0, size}, TRANSFORM_GPU_RANGE)
.Eval(&out_vec, &in_vec);
std::vector<bst_float> res = out_vec.HostVector();
ASSERT_TRUE(std::equal(h_sol.begin(), h_sol.end(), res.begin()));
}
} // namespace common
} // namespace xgboost

View File

@@ -0,0 +1,43 @@
// This converts all tests from CPU to GPU.
#include "test_transform_range.cc"
#if defined(XGBOOST_USE_NCCL)
namespace xgboost {
namespace common {
// Test here is multi gpu specific
TEST(Transform, MGPU_Basic) {
auto devices = GPUSet::AllVisible();
CHECK_GT(devices.Size(), 1);
const size_t size {256};
std::vector<bst_float> h_in(size);
std::vector<bst_float> h_out(size);
InitializeRange(h_in.begin(), h_in.end());
std::vector<bst_float> h_sol(size);
InitializeRange(h_sol.begin(), h_sol.end());
const HostDeviceVector<bst_float> in_vec {h_in,
GPUDistribution::Block(GPUSet::Empty())};
HostDeviceVector<bst_float> out_vec {h_out,
GPUDistribution::Block(GPUSet::Empty())};
out_vec.Fill(0);
in_vec.Reshard(GPUDistribution::Granular(devices, 8));
out_vec.Reshard(GPUDistribution::Block(devices));
// Granularity is different, resharding will throw.
EXPECT_ANY_THROW(
Transform<>::Init(TestTransformRange<bst_float>{}, Range{0, size}, devices)
.Eval(&out_vec, &in_vec));
Transform<>::Init(TestTransformRange<bst_float>{}, Range{0, size},
devices, false).Eval(&out_vec, &in_vec);
std::vector<bst_float> res = out_vec.HostVector();
ASSERT_TRUE(std::equal(h_sol.begin(), h_sol.end(), res.begin()));
}
} // namespace xgboost
} // namespace common
#endif

View File

@@ -125,3 +125,17 @@ std::shared_ptr<xgboost::DMatrix>* CreateDMatrix(int rows, int columns,
&handle);
return static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
}
namespace xgboost {
bool IsNear(std::vector<xgboost::bst_float>::const_iterator _beg1,
std::vector<xgboost::bst_float>::const_iterator _end1,
std::vector<xgboost::bst_float>::const_iterator _beg2) {
for (auto iter1 = _beg1, iter2 = _beg2; iter1 != _end1; ++iter1, ++iter2) {
if (std::abs(*iter1 - *iter2) > xgboost::kRtEps){
return false;
}
}
return true;
}
}

View File

@@ -15,6 +15,12 @@
#include <xgboost/objective.h>
#include <xgboost/metric.h>
#if defined(__CUDACC__)
#define DeclareUnifiedTest(name) GPU ## name
#else
#define DeclareUnifiedTest(name) name
#endif
std::string TempFileName();
bool FileExists(const std::string name);
@@ -46,6 +52,12 @@ xgboost::bst_float GetMetricEval(
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights = std::vector<xgboost::bst_float> ());
namespace xgboost {
bool IsNear(std::vector<xgboost::bst_float>::const_iterator _beg1,
std::vector<xgboost::bst_float>::const_iterator _end1,
std::vector<xgboost::bst_float>::const_iterator _beg2);
}
/**
* \fn std::shared_ptr<xgboost::DMatrix> CreateDMatrix(int rows, int columns, float sparsity, int seed);
*

View File

@@ -4,7 +4,7 @@
#include "../helpers.h"
TEST(Objective, HingeObj) {
TEST(Objective, DeclareUnifiedTest(HingeObj)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("binary:hinge");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -15,6 +15,12 @@ TEST(Objective, HingeObj) {
{ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f},
{ 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 0.0f},
{ eps, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, eps });
CheckObjFunction(obj,
{-1.0f, -0.5f, 0.5f, 1.0f, -1.0f, -0.5f, 0.5f, 1.0f},
{ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f},
{}, // Empty weight.
{ 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 0.0f},
{ eps, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, eps });
ASSERT_NO_THROW(obj->DefaultEvalMetric());

View File

@@ -0,0 +1 @@
#include "test_hinge.cc"

View File

@@ -0,0 +1,60 @@
/*!
* Copyright 2018 XGBoost contributors
*/
#include <xgboost/objective.h>
#include "../helpers.h"
TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassObjGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("multi:softmax");
std::vector<std::pair<std::string, std::string>> args {{"num_class", "3"}};
obj->Configure(args);
CheckObjFunction(obj,
{1, 0, 2, 2, 0, 1}, // preds
{1.0, 0.0}, // labels
{1.0, 1.0}, // weights
{0.24f, -0.91f, 0.66f, -0.33f, 0.09f, 0.24f}, // grad
{0.36, 0.16, 0.44, 0.45, 0.16, 0.37}); // hess
ASSERT_NO_THROW(obj->DefaultEvalMetric());
delete obj;
}
TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("multi:softmax");
std::vector<std::pair<std::string, std::string>> args
{std::pair<std::string, std::string>("num_class", "3")};
obj->Configure(args);
xgboost::HostDeviceVector<xgboost::bst_float> io_preds = {2.0f, 0.0f, 1.0f,
1.0f, 0.0f, 2.0f};
std::vector<xgboost::bst_float> out_preds = {0.0f, 2.0f};
obj->PredTransform(&io_preds);
auto& preds = io_preds.HostVector();
for (int i = 0; i < static_cast<int>(io_preds.Size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
delete obj;
}
TEST(Objective, DeclareUnifiedTest(SoftprobMultiClassBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("multi:softprob");
std::vector<std::pair<std::string, std::string>> args
{std::pair<std::string, std::string>("num_class", "3")};
obj->Configure(args);
xgboost::HostDeviceVector<xgboost::bst_float> io_preds = {2.0f, 0.0f, 1.0f};
std::vector<xgboost::bst_float> out_preds = {0.66524096f, 0.09003057f, 0.24472847f};
obj->PredTransform(&io_preds);
auto& preds = io_preds.HostVector();
for (int i = 0; i < static_cast<int>(io_preds.Size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
delete obj;
}

View File

@@ -0,0 +1 @@
#include "test_multiclass_obj.cc"

View File

@@ -1,9 +1,11 @@
// Copyright by Contributors
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <xgboost/objective.h>
#include "../helpers.h"
TEST(Objective, LinearRegressionGPair) {
TEST(Objective, DeclareUnifiedTest(LinearRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:linear");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -13,27 +15,32 @@ TEST(Objective, LinearRegressionGPair) {
{1, 1, 1, 1, 1, 1, 1, 1},
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
CheckObjFunction(obj,
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{}, // empty weight
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
ASSERT_NO_THROW(obj->DefaultEvalMetric());
delete obj;
}
TEST(Objective, LogisticRegressionGPair) {
TEST(Objective, DeclareUnifiedTest(LogisticRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:logistic");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1}, // preds
{ 0, 0, 0, 0, 1, 1, 1, 1}, // labels
{ 1, 1, 1, 1, 1, 1, 1, 1}, // weights
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f}, // out_grad
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f}); // out_hess
delete obj;
}
TEST(Objective, LogisticRegressionBasic) {
TEST(Objective, DeclareUnifiedTest(LogisticRegressionBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:logistic");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -61,7 +68,7 @@ TEST(Objective, LogisticRegressionBasic) {
delete obj;
}
TEST(Objective, LogisticRawGPair) {
TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("binary:logitraw");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -75,7 +82,7 @@ TEST(Objective, LogisticRawGPair) {
delete obj;
}
TEST(Objective, PoissonRegressionGPair) {
TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("max_delta_step", "0.1f"));
@@ -86,11 +93,16 @@ TEST(Objective, PoissonRegressionGPair) {
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.10f, 2.45f, 2.71f, 0, 0.10f, 1.45f, 1.71f},
{1.10f, 1.22f, 2.71f, 3.00f, 1.10f, 1.22f, 2.71f, 3.00f});
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{}, // Empty weight
{ 1, 1.10f, 2.45f, 2.71f, 0, 0.10f, 1.45f, 1.71f},
{1.10f, 1.22f, 2.71f, 3.00f, 1.10f, 1.22f, 2.71f, 3.00f});
delete obj;
}
TEST(Objective, PoissonRegressionBasic) {
TEST(Objective, DeclareUnifiedTest(PoissonRegressionBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -116,7 +128,7 @@ TEST(Objective, PoissonRegressionBasic) {
delete obj;
}
TEST(Objective, GammaRegressionGPair) {
TEST(Objective, DeclareUnifiedTest(GammaRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:gamma");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -126,11 +138,16 @@ TEST(Objective, GammaRegressionGPair) {
{1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 0, 0.09f, 0.59f, 0.63f},
{0, 0, 0, 0, 1, 0.90f, 0.40f, 0.36f});
CheckObjFunction(obj,
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{}, // Empty weight
{1, 1, 1, 1, 0, 0.09f, 0.59f, 0.63f},
{0, 0, 0, 0, 1, 0.90f, 0.40f, 0.36f});
delete obj;
}
TEST(Objective, GammaRegressionBasic) {
TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:gamma");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -156,7 +173,7 @@ TEST(Objective, GammaRegressionBasic) {
delete obj;
}
TEST(Objective, TweedieRegressionGPair) {
TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("tweedie_variance_power", "1.1f"));
@@ -167,11 +184,17 @@ TEST(Objective, TweedieRegressionGPair) {
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.09f, 2.24f, 2.45f, 0, 0.10f, 1.33f, 1.55f},
{0.89f, 0.98f, 2.02f, 2.21f, 1, 1.08f, 2.11f, 2.30f});
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{}, // Empty weight.
{ 1, 1.09f, 2.24f, 2.45f, 0, 0.10f, 1.33f, 1.55f},
{0.89f, 0.98f, 2.02f, 2.21f, 1, 1.08f, 2.11f, 2.30f});
delete obj;
}
TEST(Objective, TweedieRegressionBasic) {
TEST(Objective, DeclareUnifiedTest(TweedieRegressionBasic)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
@@ -197,6 +220,9 @@ TEST(Objective, TweedieRegressionBasic) {
delete obj;
}
// CoxRegression not implemented in GPU code, no need for testing.
#if !defined(__CUDACC__)
TEST(Objective, CoxRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("survival:cox");
std::vector<std::pair<std::string, std::string> > args;
@@ -210,3 +236,4 @@ TEST(Objective, CoxRegressionGPair) {
delete obj;
}
#endif

View File

@@ -1,78 +1,6 @@
/*!
* Copyright 2017 XGBoost contributors
* Copyright 2018 XGBoost contributors
*/
#include <xgboost/objective.h>
// Dummy file to keep the CUDA tests.
#include "../helpers.h"
TEST(Objective, GPULinearRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("gpu:reg:linear");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
ASSERT_NO_THROW(obj->DefaultEvalMetric());
delete obj;
}
TEST(Objective, GPULogisticRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("gpu:reg:logistic");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
delete obj;
}
TEST(Objective, GPULogisticRegressionBasic) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("gpu:reg:logistic");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
// test label validation
EXPECT_ANY_THROW(CheckObjFunction(obj, {0}, {10}, {1}, {0}, {0}))
<< "Expected error when label not in range [0,1f] for LogisticRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.197f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), 0, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), 2.197f, 0.01f);
EXPECT_ANY_THROW(obj->ProbToMargin(10))
<< "Expected error when base_score not in range [0,1f] for LogisticRegression";
// test PredTransform
xgboost::HostDeviceVector<xgboost::bst_float> io_preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {0.5f, 0.524f, 0.622f, 0.710f, 0.731f};
obj->PredTransform(&io_preds);
auto& preds = io_preds.HostVector();
for (int i = 0; i < static_cast<int>(io_preds.Size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
delete obj;
}
TEST(Objective, GPULogisticRawGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("gpu:binary:logitraw");
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
delete obj;
}
#include "test_regression_obj.cc"