Implement fit stump. (#8607)
This commit is contained in:
@@ -3,8 +3,10 @@
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
#include <xgboost/context.h>
|
||||
#include <xgboost/linalg.h> // Tensor,Vector
|
||||
|
||||
#include "../../../src/common/stats.h"
|
||||
#include "../../../src/common/transform_iterator.h" // common::MakeIndexTransformIter
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
@@ -69,5 +71,35 @@ TEST(Stats, Median) {
|
||||
ASSERT_EQ(m, .5f);
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
}
|
||||
namespace {
|
||||
void TestMean(Context const* ctx) {
|
||||
std::size_t n{128};
|
||||
linalg::Vector<float> data({n}, ctx->gpu_id);
|
||||
auto h_v = data.HostView().Values();
|
||||
std::iota(h_v.begin(), h_v.end(), .0f);
|
||||
|
||||
auto nf = static_cast<float>(n);
|
||||
float mean = nf * (nf - 1) / 2 / n;
|
||||
|
||||
linalg::Vector<float> res{{1}, ctx->gpu_id};
|
||||
Mean(ctx, data, &res);
|
||||
auto h_res = res.HostView();
|
||||
ASSERT_EQ(h_res.Size(), 1);
|
||||
ASSERT_EQ(mean, h_res(0));
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
TEST(Stats, Mean) {
|
||||
Context ctx;
|
||||
TestMean(&ctx);
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
TEST(Stats, GPUMean) {
|
||||
Context ctx;
|
||||
ctx.UpdateAllowUnknown(Args{{"gpu_id", "0"}});
|
||||
TestMean(&ctx);
|
||||
}
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
} // namespace common
|
||||
} // namespace xgboost
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "../../../src/common/stats.cuh"
|
||||
#include "../../../src/common/stats.h"
|
||||
#include "xgboost/base.h"
|
||||
#include "xgboost/context.h"
|
||||
#include "xgboost/host_device_vector.h"
|
||||
|
||||
@@ -66,7 +66,7 @@ TEST(Learner, CheckGroup) {
|
||||
|
||||
std::shared_ptr<DMatrix> p_mat{
|
||||
RandomDataGenerator{kNumRows, kNumCols, 0.0f}.GenerateDMatrix()};
|
||||
std::vector<bst_float> weight(kNumGroups);
|
||||
std::vector<bst_float> weight(kNumGroups, 1);
|
||||
std::vector<bst_int> group(kNumGroups);
|
||||
group[0] = 2;
|
||||
group[1] = 3;
|
||||
|
||||
48
tests/cpp/tree/test_fit_stump.cc
Normal file
48
tests/cpp/tree/test_fit_stump.cc
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Copyright 2022 by XGBoost Contributors
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
#include <xgboost/linalg.h>
|
||||
|
||||
#include "../../src/common/linalg_op.h"
|
||||
#include "../../src/tree/fit_stump.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
namespace {
|
||||
void TestFitStump(Context const *ctx) {
|
||||
std::size_t constexpr kRows = 16, kTargets = 2;
|
||||
HostDeviceVector<GradientPair> gpair;
|
||||
auto &h_gpair = gpair.HostVector();
|
||||
h_gpair.resize(kRows * kTargets);
|
||||
for (std::size_t i = 0; i < kRows; ++i) {
|
||||
for (std::size_t t = 0; t < kTargets; ++t) {
|
||||
h_gpair.at(i * kTargets + t) = GradientPair{static_cast<float>(i), 1};
|
||||
}
|
||||
}
|
||||
linalg::Vector<float> out;
|
||||
FitStump(ctx, gpair, kTargets, &out);
|
||||
auto h_out = out.HostView();
|
||||
for (auto it = linalg::cbegin(h_out); it != linalg::cend(h_out); ++it) {
|
||||
// sum_hess == kRows
|
||||
auto n = static_cast<float>(kRows);
|
||||
auto sum_grad = n * (n - 1) / 2;
|
||||
ASSERT_EQ(static_cast<float>(-sum_grad / n), *it);
|
||||
}
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
TEST(InitEstimation, FitStump) {
|
||||
Context ctx;
|
||||
TestFitStump(&ctx);
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
TEST(InitEstimation, GPUFitStump) {
|
||||
Context ctx;
|
||||
ctx.UpdateAllowUnknown(Args{{"gpu_id", "0"}});
|
||||
TestFitStump(&ctx);
|
||||
}
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
Reference in New Issue
Block a user