Cmake improvements (#2487)

* Cmake improvements
* Add google test to cmake
This commit is contained in:
Rory Mitchell 2017-07-06 18:05:11 +12:00 committed by GitHub
parent 8ceeb32bad
commit e939192978
13 changed files with 291 additions and 257 deletions

View File

@ -1,59 +1,32 @@
cmake_minimum_required (VERSION 3.2)
project(xgboost)
include(cmake/Utils.cmake)
find_package(OpenMP)
set_default_configuration_release()
msvc_use_static_runtime()
# Options
option(PLUGIN_UPDATER_GPU "Build GPU accelerated tree construction plugin")
if(PLUGIN_UPDATER_GPU)
cmake_minimum_required (VERSION 3.5)
endif()
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(GOOGLE_TEST "Build google tests" OFF)
set(GPU_COMPUTE_VER 35;50;52;60;61 CACHE STRING
"Space separated list of compute versions to be built against")
# Compiler flags
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -O3 -funroll-loops -msse2 -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
endif()
# Make sure we are using C++11
# Visual Studio 12.0 and newer supports enough c++11 to make this work
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(MSVC)
if(MSVC_VERSION LESS 1800)
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
# Multithreaded compilation
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
else()
# GCC 4.6 with c++0x supports enough to make this work
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
set(STD_FLAG "")
if(COMPILER_SUPPORTS_CXX11)
set(STD_FLAG "-std=c++11")
elseif(COMPILER_SUPPORTS_CXX0X)
set(STD_FLAG "-std=c++0x")
else()
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STD_FLAG}")
endif()
#Make sure we are using the static runtime
if(MSVC)
set(variables
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
endif()
endforeach()
# Correct error for GCC 5 and cuda
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
# Performance
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
endif()
include_directories (
@ -62,17 +35,14 @@ include_directories (
${PROJECT_SOURCE_DIR}/rabit/include
)
file(GLOB SOURCES
src/c_api/*.cc
src/common/*.cc
src/data/*.cc
src/gbm/*.cc
src/metric/*.cc
src/objective/*.cc
src/tree/*.cc
file(GLOB_RECURSE SOURCES
src/*.cc
src/*.h
)
# Only add main function for executable target
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
# TODO: Create rabit cmakelists.txt
set(RABIT_SOURCES
rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc
@ -80,81 +50,59 @@ set(RABIT_SOURCES
rabit/src/c_api.cc
)
file(GLOB CUDA_SOURCES
file(GLOB_RECURSE CUDA_SOURCES
plugin/updater_gpu/src/*.cu
plugin/updater_gpu/src/exact/*.cu
plugin/updater_gpu/src/*.cuh
)
add_subdirectory(dmlc-core)
add_library(rabit STATIC ${RABIT_SOURCES})
#Set library output directories
if(MSVC)
#With MSVC shared library is considered runtime
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${PROJECT_SOURCE_DIR}/lib)
else()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR})
endif()
set(LINK_LIBRARIES dmlccore rabit)
# GPU Plugin
if(PLUGIN_UPDATER_GPU)
find_package(CUDA REQUIRED)
# nccl
find_package(CUDA 7.5 REQUIRED)
cmake_minimum_required(VERSION 3.5)
add_subdirectory(nccl)
set(NCCL_DIRECTORY ${PROJECT_SOURCE_DIR}/nccl)
include_directories(${NCCL_DIRECTORY}/src)
#Find cub
set(CUB_DIRECTORY ${PROJECT_SOURCE_DIR}/cub/)
include_directories(${CUB_DIRECTORY})
#Find googletest
set(GTEST_DIRECTORY "${CACHE_PREFIX}" CACHE PATH "Googletest directory")
include_directories(${GTEST_DIRECTORY}/include)
#gencode flags
set(GPU_COMPUTE_VER 35;50;52;60;61 CACHE STRING
"Space separated list of compute versions to be built against")
include_directories(
nccl/src
cub
)
set(GENCODE_FLAGS "")
foreach(ver ${GPU_COMPUTE_VER})
set(GENCODE_FLAGS "${GENCODE_FLAGS}-gencode arch=compute_${ver},code=sm_${ver};")
endforeach()
format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;${GENCODE_FLAGS};-lineinfo;")
if(NOT MSVC)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -std=c++11")
endif()
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
target_link_libraries(gpuxgboost nccl)
list(APPEND LINK_LIBRARIES gpuxgboost)
list(APPEND SOURCES plugin/updater_gpu/src/register_updater_gpu.cc)
else()
set(CUDA_OBJS "")
endif()
add_library(objxgboost OBJECT ${SOURCES})
set_target_properties(${objxgboost} PROPERTIES POSITION_INDEPENDENT_CODE 1)
add_library(rabit STATIC ${RABIT_SOURCES})
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost>)
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
add_library(objxgboost OBJECT ${SOURCES})
# Executable
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
set_target_properties(runxgboost PROPERTIES
OUTPUT_NAME xgboost
)
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(runxgboost ${LINK_LIBRARIES})
# Shared library
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
target_link_libraries(xgboost ${LINK_LIBRARIES})
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
#Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost)
option(JVM_BINDINGS "Build JVM bindings" OFF)
# JVM
if(JVM_BINDINGS)
find_package(JNI QUIET REQUIRED)
@ -163,7 +111,35 @@ if(JVM_BINDINGS)
add_library(xgboost4j SHARED
$<TARGET_OBJECTS:objxgboost>
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
target_link_libraries(xgboost4j
${LINK_LIBRARIES}
${JAVA_JVM_LIBRARY})
endif()
# Test
if(GOOGLE_TEST)
enable_testing()
find_package(GTest REQUIRED)
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
auto_source_group("${TEST_SOURCES}")
include_directories(${GTEST_INCLUDE_DIRS})
if(PLUGIN_UPDATER_GPU)
file(GLOB_RECURSE CUDA_TEST_SOURCES "plugin/updater_gpu/test/cpp/*.cu")
set(CUDA_VERBOSE_BUILD ON)
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
else()
set(CUDA_TEST_OBJS "")
endif()
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(testxgboost ${GTEST_BOTH_LIBRARIES} ${LINK_LIBRARIES})
add_test(TestXGBoost testxgboost)
endif()
# Group sources
auto_source_group("${SOURCES}")

View File

@ -20,9 +20,9 @@ before_build:
- mkdir build2013
- mkdir build2015
- cd build2013
- cmake .. -G"Visual Studio 12 2013 Win64"
- cmake .. -G"Visual Studio 12 2013 Win64" -DCMAKE_CONFIGURATION_TYPES="Release;Debug;"
- cd ../build2015
- cmake .. -G"Visual Studio 14 2015 Win64"
- cmake .. -G"Visual Studio 14 2015 Win64" -DCMAKE_CONFIGURATION_TYPES="Release;Debug;"
build_script:
- cd %APPVEYOR_BUILD_FOLDER%

61
cmake/Utils.cmake Normal file
View File

@ -0,0 +1,61 @@
# Automatically set source group based on folder
function(auto_source_group SOURCES)
foreach(FILE ${SOURCES})
get_filename_component(PARENT_DIR "${FILE}" PATH)
# skip src or include and changes /'s to \\'s
string(REPLACE "${CMAKE_CURRENT_LIST_DIR}" "" GROUP "${PARENT_DIR}")
string(REPLACE "/" "\\\\" GROUP "${GROUP}")
string(REGEX REPLACE "^\\\\" "" GROUP "${GROUP}")
source_group("${GROUP}" FILES "${FILE}")
endforeach()
endfunction(auto_source_group)
# Force static runtime for MSVC
function(msvc_use_static_runtime)
if(MSVC)
set(variables
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
set(${variable} "${${variable}}" PARENT_SCOPE)
endif()
endforeach()
endif()
endfunction(msvc_use_static_runtime)
# Set output directory of target, ignoring debug or release
function(set_output_directory target dir)
set_target_properties(${target} PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${dir}
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
LIBRARY_OUTPUT_DIRECTORY ${dir}
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
)
endfunction(set_output_directory)
# Set a default build type to release if none was specified
function(set_default_configuration_release)
if(CMAKE_CONFIGURATION_TYPES STREQUAL "Debug;Release;MinSizeRel;RelWithDebInfo") # multiconfig generator?
set(CMAKE_CONFIGURATION_TYPES Release CACHE STRING "" FORCE)
elseif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting build type to 'Release' as none was specified.")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE )
endif()
endfunction(set_default_configuration_release)
function(format_gencode_flags flags out)
foreach(ver ${flags})
set(${out} "${${out}}-gencode arch=compute_${ver},code=sm_${ver};" PARENT_SCOPE)
endforeach()
endfunction(format_gencode_flags flags)

View File

@ -86,7 +86,7 @@ if __name__ == "__main__":
args = ["-D{0}:BOOL={1}".format(k, v) for k, v in CONFIG.items()]
run("cmake .. " + " ".join(args) + maybe_generator)
run("cmake --build .")
run("cmake --build . --config Release")
with cd("demo/regression"):
run(sys.executable + " mapfeat.py")

View File

@ -13,11 +13,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <gtest/gtest.h>
#include "../../src/exact/argmax_by_key.cuh"
#include "../../src/exact/gradients.cuh"
#include "../../src/exact/node.cuh"
#include "../../src/exact/loss_functions.cuh"
#include "utils.cuh"
@ -56,26 +54,26 @@ void argMaxTest(ArgMaxByKeyAlgo algo) {
const int nVals = 1024;
const int level = 0;
const int nKeys = 1 << level;
gpu_gpair* scans = new gpu_gpair[nVals];
bst_gpair* scans = new bst_gpair[nVals];
float* vals = new float[nVals];
int* colIds = new int[nVals];
scans[0] = gpu_gpair();
scans[0] = bst_gpair();
vals[0] = 0.f;
colIds[0] = 0;
for (int i = 1; i < nVals; ++i) {
scans[i].g = scans[i-1].g + (0.1f * 2.f);
scans[i].h = scans[i-1].h + (0.1f * 2.f);
scans[i].grad = scans[i-1].grad + (0.1f * 2.f);
scans[i].hess = scans[i-1].hess + (0.1f * 2.f);
vals[i] = static_cast<float>(i) * 0.1f;
colIds[i] = 0;
}
float* dVals;
allocateAndUpdateOnGpu<float>(dVals, vals, nVals);
gpu_gpair* dScans;
allocateAndUpdateOnGpu<gpu_gpair>(dScans, scans, nVals);
gpu_gpair* sums = new gpu_gpair[nKeys];
sums[0].g = sums[0].h = (0.1f * 2.f * nVals);
gpu_gpair* dSums;
allocateAndUpdateOnGpu<gpu_gpair>(dSums, sums, nKeys);
bst_gpair* dScans;
allocateAndUpdateOnGpu<bst_gpair>(dScans, scans, nVals);
bst_gpair* sums = new bst_gpair[nKeys];
sums[0].grad = sums[0].hess = (0.1f * 2.f * nVals);
bst_gpair* dSums;
allocateAndUpdateOnGpu<bst_gpair>(dSums, sums, nKeys);
int* dColIds;
allocateAndUpdateOnGpu<int>(dColIds, colIds, nVals);
Split* splits = new Split[nKeys];
@ -93,7 +91,7 @@ void argMaxTest(ArgMaxByKeyAlgo algo) {
param.reg_alpha = 0.f;
param.reg_lambda = 2.f;
param.max_delta_step = 0.f;
nodes[0].score = CalcGain(param, sums[0].g, sums[0].h);
nodes[0].score = CalcGain(param, sums[0].grad, sums[0].hess);
Node<node_id_t>* dNodes;
allocateAndUpdateOnGpu<Node<node_id_t> >(dNodes, nodes, nKeys);
argMaxByKey<node_id_t>(dSplits, dScans, dSums, dVals, dColIds, dNodeAssigns,

View File

@ -31,16 +31,16 @@ class ReduceScanByKey: public Generator<node_id_t> {
hSums(nullptr), dSums(nullptr), hScans(nullptr), dScans(nullptr),
outSize(this->size), nSegments(this->nKeys*this->nCols),
hOffsets(nullptr), dOffsets(nullptr) {
hSums = new gpu_gpair[nSegments];
allocateOnGpu<gpu_gpair>(dSums, nSegments);
hScans = new gpu_gpair[outSize];
allocateOnGpu<gpu_gpair>(dScans, outSize);
gpu_gpair* buckets = new gpu_gpair[nSegments];
hSums = new bst_gpair[nSegments];
allocateOnGpu<bst_gpair>(dSums, nSegments);
hScans = new bst_gpair[outSize];
allocateOnGpu<bst_gpair>(dScans, outSize);
bst_gpair* buckets = new bst_gpair[nSegments];
for (int i = 0; i < nSegments; i++) {
buckets[i] = gpu_gpair();
buckets[i] = bst_gpair();
}
for (int i = 0; i < nSegments; i++) {
hSums[i] = gpu_gpair();
hSums[i] = bst_gpair();
}
for (size_t i = 0; i < this->size; i++) {
if (this->hKeys[i] >= 0 && this->hKeys[i] < nSegments) {
@ -77,10 +77,10 @@ class ReduceScanByKey: public Generator<node_id_t> {
}
void run() {
gpu_gpair* tmpScans;
bst_gpair* tmpScans;
int* tmpKeys;
int tmpSize = scanTempBufferSize(this->size);
allocateOnGpu<gpu_gpair>(tmpScans, tmpSize);
allocateOnGpu<bst_gpair>(tmpScans, tmpSize);
allocateOnGpu<int>(tmpKeys, tmpSize);
TIMEIT(reduceScanByKey<node_id_t>
(dSums, dScans, this->dVals, this->dInstIds, this->dKeys,
@ -94,10 +94,10 @@ class ReduceScanByKey: public Generator<node_id_t> {
}
private:
gpu_gpair* hSums;
gpu_gpair* dSums;
gpu_gpair* hScans;
gpu_gpair* dScans;
bst_gpair* hSums;
bst_gpair* dSums;
bst_gpair* hScans;
bst_gpair* dScans;
int outSize;
int nSegments;
int* hOffsets;

View File

@ -47,20 +47,20 @@ void testSmallData() {
updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals);
int* tmpInst = new int[builder.nVals];
updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals);
gpu_gpair* tmpGrad = new gpu_gpair[builder.nRows];
updateHostPtr<gpu_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
bst_gpair* tmpGrad = new bst_gpair[builder.nRows];
updateHostPtr<bst_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
EXPECT_EQ(0, tmpInst[0]);
EXPECT_FLOAT_EQ(1.f, tmpVal[0]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).hess);
EXPECT_EQ(2, tmpInst[1]);
EXPECT_FLOAT_EQ(1.f, tmpVal[1]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).hess);
EXPECT_EQ(7, tmpInst[2]);
EXPECT_FLOAT_EQ(1.f, tmpVal[2]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).hess);
delete [] tmpGrad;
delete [] tmpOff;
delete [] tmpInst;
@ -106,22 +106,22 @@ void testLargeData() {
updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals);
int* tmpInst = new int[builder.nVals];
updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals);
gpu_gpair* tmpGrad = new gpu_gpair[builder.nRows];
updateHostPtr<gpu_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
bst_gpair* tmpGrad = new bst_gpair[builder.nRows];
updateHostPtr<bst_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
// the order of observations is messed up before the convertToCsc call!
// hence, the instance IDs have been manually checked and put here.
EXPECT_EQ(1164, tmpInst[0]);
EXPECT_FLOAT_EQ(1.f, tmpVal[0]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).hess);
EXPECT_EQ(1435, tmpInst[1]);
EXPECT_FLOAT_EQ(1.f, tmpVal[1]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).hess);
EXPECT_EQ(1421, tmpInst[2]);
EXPECT_FLOAT_EQ(1.f, tmpVal[2]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).g);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).h);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).hess);
delete [] tmpGrad;
delete [] tmpOff;
delete [] tmpInst;
@ -164,17 +164,17 @@ void testAllocate() {
EXPECT_FALSE(n[i].isUnused());
}
}
gpu_gpair sum;
sum.g = 0.f;
sum.h = 0.f;
bst_gpair sum;
sum.grad = 0.f;
sum.hess = 0.f;
for (int i = 0; i < builder.maxNodes; ++i) {
if (!n[i].isUnused()) {
sum += n[i].gradSum;
}
}
// law of conservation of gradients! :)
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.g, sum.g);
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.h, sum.h);
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.grad, sum.grad);
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.hess, sum.hess);
node_id_t* assigns = new node_id_t[builder.nVals];
int* offsets = new int[builder.nCols+1];
updateHostPtr<node_id_t>(assigns, builder.nodeAssigns.current(),
@ -199,8 +199,8 @@ TEST(CudaGPUBuilderTest, AllocateNodeDataInt32) {
template <typename node_id_t>
void assign(Node<node_id_t> *n, float g, float h, float sc, float wt,
DefaultDirection d, float th, int c, int i) {
n->gradSum.g = g;
n->gradSum.h = h;
n->gradSum.grad = g;
n->gradSum.hess = h;
n->score = sc;
n->weight = wt;
n->dir = d;
@ -290,7 +290,7 @@ void testDense2Sparse() {
updateDevicePtr<Node<node_id_t> >(builder.nodes.data(), hNodes, builder.maxNodes);
builder.markLeaves();
RegTree tree;
builder.dense2sparse(tree);
builder.dense2sparse(&tree);
EXPECT_EQ(9, tree.param.num_nodes);
delete [] hNodes;
}

View File

@ -16,7 +16,6 @@
#pragma once
#include <random>
#include "../../src/exact/gradients.cuh"
#include <memory>
#include <string>
#include <xgboost/data.h>
@ -95,8 +94,8 @@ protected:
int size;
T* hKeys;
T* dKeys;
gpu_gpair* hVals;
gpu_gpair* dVals;
bst_gpair* hVals;
bst_gpair* dVals;
std::string testName;
int* dColIds;
int* hColIds;
@ -132,17 +131,17 @@ protected:
}
}
void compare(gpu_gpair* exp, gpu_gpair* dAct, size_t len) {
gpu_gpair* act = new gpu_gpair[len];
updateHostPtr<gpu_gpair>(act, dAct, len);
void compare(bst_gpair* exp, bst_gpair* dAct, size_t len) {
bst_gpair* act = new bst_gpair[len];
updateHostPtr<bst_gpair>(act, dAct, len);
for (size_t i=0;i<len;++i) {
bool isSmall;
float ratioG = diffRatio(exp[i].g, act[i].g, isSmall);
float ratioH = diffRatio(exp[i].h, act[i].h, isSmall);
float ratioG = diffRatio(exp[i].grad, act[i].grad, isSmall);
float ratioH = diffRatio(exp[i].hess, act[i].hess, isSmall);
float thresh = isSmall? SuperSmallThresh : Thresh;
if ((ratioG >= Thresh) || (ratioH >= Thresh)) {
printf("(exp) %f %f -> (act) %f %f : rG=%f rH=%f th=%f @%lu\n",
exp[i].g, exp[i].h, act[i].g, act[i].h, ratioG, ratioH,
exp[i].grad, exp[i].hess, act[i].grad, act[i].hess, ratioG, ratioH,
thresh, i);
}
ASSERT_TRUE(ratioG < thresh);
@ -168,12 +167,12 @@ protected:
}
void generateVals() {
hVals = new gpu_gpair[size];
hVals = new bst_gpair[size];
for (size_t i=0;i<size;++i) {
hVals[i].g = randVal(-1.f, 1.f);
hVals[i].h = randVal(-1.f, 1.f);
hVals[i].grad = randVal(-1.f, 1.f);
hVals[i].hess = randVal(-1.f, 1.f);
}
allocateAndUpdateOnGpu<gpu_gpair>(dVals, hVals, size);
allocateAndUpdateOnGpu<bst_gpair>(dVals, hVals, size);
}
void sortKeyValues() {
@ -186,7 +185,7 @@ protected:
dh::safe_cuda(cub::DeviceRadixSort::SortPairs(tmpStorage, tmpSize, dKeys,
dKeys, dVals, dVals, size));
dh::safe_cuda(cudaFree(storage));
updateHostPtr<gpu_gpair>(hVals, dVals, size);
updateHostPtr<bst_gpair>(hVals, dVals, size);
updateHostPtr<T>(hKeys, dKeys, size);
}

View File

@ -8,9 +8,9 @@ TEST(Metric, RMSE) {
ASSERT_STREQ(metric->Name(), "rmse");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.6403, 0.001);
0.6403f, 0.001f);
}
TEST(Metric, MAE) {
@ -18,9 +18,9 @@ TEST(Metric, MAE) {
ASSERT_STREQ(metric->Name(), "mae");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
}
TEST(Metric, LogLoss) {
@ -28,9 +28,9 @@ TEST(Metric, LogLoss) {
ASSERT_STREQ(metric->Name(), "logloss");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
1.2039, 0.001);
1.2039f, 0.001f);
}
TEST(Metric, Error) {
@ -38,13 +38,13 @@ TEST(Metric, Error) {
ASSERT_STREQ(metric->Name(), "error");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
EXPECT_ANY_THROW(xgboost::Metric::Create("error@abc"));
delete metric;
metric = xgboost::Metric::Create("error@0.5");
metric = xgboost::Metric::Create("error@0.5f");
EXPECT_STREQ(metric->Name(), "error");
delete metric;
@ -53,17 +53,17 @@ TEST(Metric, Error) {
EXPECT_STREQ(metric->Name(), "error@0.1");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.2, 0.1, 0.2},
{0.1f, 0.2f, 0.1f, 0.2f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
}
TEST(Metric, PoissionNegLogLik) {
xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik");
ASSERT_STREQ(metric->Name(), "poisson-nloglik");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.2, 0.1, 0.2},
{0.1f, 0.2f, 0.1f, 0.2f},
{ 0, 0, 1, 1}),
1.1280, 0.001);
1.1280f, 0.001f);
}

View File

@ -7,5 +7,5 @@ TEST(Metric, UnknownMetric) {
EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name"));
EXPECT_NO_THROW(xgboost::Metric::Create("rmse"));
EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name@1"));
EXPECT_NO_THROW(xgboost::Metric::Create("error@0.5"));
EXPECT_NO_THROW(xgboost::Metric::Create("error@0.5f"));
}

View File

@ -5,18 +5,18 @@
TEST(Metric, AMS) {
EXPECT_ANY_THROW(xgboost::Metric::Create("ams"));
xgboost::Metric * metric = xgboost::Metric::Create("ams@0.5");
xgboost::Metric * metric = xgboost::Metric::Create("ams@0.5f");
ASSERT_STREQ(metric->Name(), "ams@0.5");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311, 0.001);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.29710, 0.001);
0.29710f, 0.001f);
delete metric;
metric = xgboost::Metric::Create("ams@0");
ASSERT_STREQ(metric->Name(), "ams@0");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311, 0.001);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f);
}
TEST(Metric, AUC) {
@ -24,9 +24,9 @@ TEST(Metric, AUC) {
ASSERT_STREQ(metric->Name(), "auc");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {}));
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 0}, {0, 0}));
}
@ -39,18 +39,18 @@ TEST(Metric, Precision) {
ASSERT_STREQ(metric->Name(), "pre");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-7);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0, 1e-7);
delete metric;
metric = xgboost::Metric::Create("pre@2");
ASSERT_STREQ(metric->Name(), "pre@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5, 1e-7);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-7);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {}));
}
@ -62,18 +62,18 @@ TEST(Metric, NDCG) {
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.6509, 0.001);
0.6509f, 0.001f);
delete metric;
metric = xgboost::Metric::Create("ndcg@2");
ASSERT_STREQ(metric->Name(), "ndcg@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.3868, 0.001);
0.3868f, 0.001f);
delete metric;
metric = xgboost::Metric::Create("ndcg@-");
@ -81,18 +81,18 @@ TEST(Metric, NDCG) {
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.6509, 0.001);
0.6509f, 0.001f);
delete metric;
metric = xgboost::Metric::Create("ndcg@2-");
ASSERT_STREQ(metric->Name(), "ndcg@2-");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.3868, 0.001);
0.3868f, 0.001f);
}
TEST(Metric, MAP) {
@ -100,9 +100,9 @@ TEST(Metric, MAP) {
ASSERT_STREQ(metric->Name(), "map");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.5, 0.001);
0.5f, 0.001f);
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10);
delete metric;
@ -115,7 +115,7 @@ TEST(Metric, MAP) {
ASSERT_STREQ(metric->Name(), "map@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9},
{0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}),
0.25, 0.001);
0.25f, 0.001f);
}

View File

@ -10,9 +10,9 @@ TEST(Metric, MultiClassError) {
EXPECT_NEAR(GetMetricEval(
metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1},
{0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f},
{0, 1, 2}),
0.666, 0.001);
0.666f, 0.001f);
}
TEST(Metric, MultiClassLogLoss) {
@ -22,7 +22,7 @@ TEST(Metric, MultiClassLogLoss) {
EXPECT_NEAR(GetMetricEval(
metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1},
{0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f},
{0, 1, 2}),
2.302, 0.001);
2.302f, 0.001f);
}

View File

@ -8,10 +8,10 @@ TEST(Objective, LinearRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{0, 0.1, 0.9, 1.0, -1.0, -0.9, -0.1, 0},
{0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1});
ASSERT_NO_THROW(obj->DefaultEvalMetric());
@ -22,11 +22,11 @@ TEST(Objective, LogisticRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5, 0.52, 0.71, 0.73, -0.5, -0.47, -0.28, -0.26},
{0.25, 0.24, 0.20, 0.19, 0.25, 0.24, 0.20, 0.19});
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
}
TEST(Objective, LogisticRegressionBasic) {
@ -36,21 +36,21 @@ TEST(Objective, LogisticRegressionBasic) {
// test label validation
EXPECT_ANY_THROW(CheckObjFunction(obj, {0}, {10}, {1}, {0}, {0}))
<< "Expected error when label not in range [0,1] for LogisticRegression";
<< "Expected error when label not in range [0,1f] for LogisticRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.197, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.5), 0, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.9), 2.197, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.197f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), 0, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), 2.197f, 0.01f);
EXPECT_ANY_THROW(obj->ProbToMargin(10))
<< "Expected error when base_score not in range [0,1] for LogisticRegression";
<< "Expected error when base_score not in range [0,1f] for LogisticRegression";
// test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1};
std::vector<xgboost::bst_float> out_preds = {0.5, 0.524, 0.622, 0.710, 0.731};
std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {0.5f, 0.524f, 0.622f, 0.710f, 0.731f};
obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01);
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
}
@ -59,24 +59,24 @@ TEST(Objective, LogisticRawGPair) {
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5, 0.52, 0.71, 0.73, -0.5, -0.47, -0.28, -0.26},
{0.25, 0.24, 0.20, 0.19, 0.25, 0.24, 0.20, 0.19});
{ 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
}
TEST(Objective, PoissonRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("max_delta_step", "0.1"));
args.push_back(std::make_pair("max_delta_step", "0.1f"));
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.10, 2.45, 2.71, 0, 0.10, 1.45, 1.71},
{1.10, 1.22, 2.71, 3.00, 1.10, 1.22, 2.71, 3.00});
{ 1, 1.10f, 2.45f, 2.71f, 0, 0.10f, 1.45f, 1.71f},
{1.10f, 1.22f, 2.71f, 3.00f, 1.10f, 1.22f, 2.71f, 3.00f});
}
TEST(Objective, PoissonRegressionBasic) {
@ -89,16 +89,16 @@ TEST(Objective, PoissonRegressionBasic) {
<< "Expected error when label < 0 for PoissonRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.30, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.5), -0.69, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.9), -0.10, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.30f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), -0.69f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), -0.10f, 0.01f);
// test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71};
std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01);
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
}
@ -107,11 +107,11 @@ TEST(Objective, GammaRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args);
CheckObjFunction(obj,
{0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 0, 0.09, 0.59, 0.63},
{0, 0, 0, 0, 1, 0.90, 0.40, 0.36});
{1, 1, 1, 1, 0, 0.09f, 0.59f, 0.63f},
{0, 0, 0, 0, 1, 0.90f, 0.40f, 0.36f});
}
TEST(Objective, GammaRegressionBasic) {
@ -124,30 +124,30 @@ TEST(Objective, GammaRegressionBasic) {
<< "Expected error when label < 0 for GammaRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.30, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.5), -0.69, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.9), -0.10, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.30f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), -0.69f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), -0.10f, 0.01f);
// test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71};
std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01);
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
}
TEST(Objective, TweedieRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("tweedie_variance_power", "1.1"));
args.push_back(std::make_pair("tweedie_variance_power", "1.1f"));
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1},
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.09, 2.24, 2.45, 0, 0.10, 1.33, 1.55},
{0.89, 0.98, 2.02, 2.21, 1, 1.08, 2.11, 2.30});
{ 1, 1.09f, 2.24f, 2.45f, 0, 0.10f, 1.33f, 1.55f},
{0.89f, 0.98f, 2.02f, 2.21f, 1, 1.08f, 2.11f, 2.30f});
}
TEST(Objective, TweedieRegressionBasic) {
@ -160,15 +160,15 @@ TEST(Objective, TweedieRegressionBasic) {
<< "Expected error when label < 0 for TweedieRegression";
// test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), 0.10, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.5), 0.5, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.9), 0.89, 0.01);
EXPECT_NEAR(obj->ProbToMargin(0.1f), 0.10f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5f), 0.5f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9f), 0.89f, 0.01f);
// test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71};
std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01);
EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
}
}