Cmake improvements (#2487)

* Cmake improvements
* Add google test to cmake
This commit is contained in:
Rory Mitchell 2017-07-06 18:05:11 +12:00 committed by GitHub
parent 8ceeb32bad
commit e939192978
13 changed files with 291 additions and 257 deletions

View File

@ -1,59 +1,32 @@
cmake_minimum_required (VERSION 3.2) cmake_minimum_required (VERSION 3.2)
project (xgboost) project(xgboost)
include(cmake/Utils.cmake)
find_package(OpenMP) find_package(OpenMP)
set_default_configuration_release()
msvc_use_static_runtime()
# Options
option(PLUGIN_UPDATER_GPU "Build GPU accelerated tree construction plugin") option(PLUGIN_UPDATER_GPU "Build GPU accelerated tree construction plugin")
if(PLUGIN_UPDATER_GPU) option(JVM_BINDINGS "Build JVM bindings" OFF)
cmake_minimum_required (VERSION 3.5) option(GOOGLE_TEST "Build google tests" OFF)
endif() set(GPU_COMPUTE_VER 35;50;52;60;61 CACHE STRING
"Space separated list of compute versions to be built against")
# Compiler flags
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -O3 -funroll-loops -msse2 -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
endif()
# Make sure we are using C++11
# Visual Studio 12.0 and newer supports enough c++11 to make this work
if(MSVC) if(MSVC)
if(MSVC_VERSION LESS 1800) # Multithreaded compilation
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
endif()
else() else()
# GCC 4.6 with c++0x supports enough to make this work # Correct error for GCC 5 and cuda
include(CheckCXXCompilerFlag) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) # Performance
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
set(STD_FLAG "")
if(COMPILER_SUPPORTS_CXX11)
set(STD_FLAG "-std=c++11")
elseif(COMPILER_SUPPORTS_CXX0X)
set(STD_FLAG "-std=c++0x")
else()
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STD_FLAG}")
endif()
#Make sure we are using the static runtime
if(MSVC)
set(variables
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
endif()
endforeach()
endif() endif()
include_directories ( include_directories (
@ -62,17 +35,14 @@ include_directories (
${PROJECT_SOURCE_DIR}/rabit/include ${PROJECT_SOURCE_DIR}/rabit/include
) )
file(GLOB SOURCES file(GLOB_RECURSE SOURCES
src/c_api/*.cc
src/common/*.cc
src/data/*.cc
src/gbm/*.cc
src/metric/*.cc
src/objective/*.cc
src/tree/*.cc
src/*.cc src/*.cc
src/*.h
) )
# Only add main function for executable target
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
# TODO: Create rabit cmakelists.txt
set(RABIT_SOURCES set(RABIT_SOURCES
rabit/src/allreduce_base.cc rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc rabit/src/allreduce_robust.cc
@ -80,81 +50,59 @@ set(RABIT_SOURCES
rabit/src/c_api.cc rabit/src/c_api.cc
) )
file(GLOB CUDA_SOURCES file(GLOB_RECURSE CUDA_SOURCES
plugin/updater_gpu/src/*.cu plugin/updater_gpu/src/*.cu
plugin/updater_gpu/src/exact/*.cu plugin/updater_gpu/src/*.cuh
) )
add_subdirectory(dmlc-core) add_subdirectory(dmlc-core)
add_library(rabit STATIC ${RABIT_SOURCES})
#Set library output directories
if(MSVC)
#With MSVC shared library is considered runtime
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${PROJECT_SOURCE_DIR}/lib)
else()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR})
endif()
set(LINK_LIBRARIES dmlccore rabit) set(LINK_LIBRARIES dmlccore rabit)
# GPU Plugin
if(PLUGIN_UPDATER_GPU) if(PLUGIN_UPDATER_GPU)
find_package(CUDA REQUIRED) find_package(CUDA 7.5 REQUIRED)
cmake_minimum_required(VERSION 3.5)
# nccl
add_subdirectory(nccl) add_subdirectory(nccl)
set(NCCL_DIRECTORY ${PROJECT_SOURCE_DIR}/nccl)
include_directories(${NCCL_DIRECTORY}/src)
#Find cub
set(CUB_DIRECTORY ${PROJECT_SOURCE_DIR}/cub/)
include_directories(${CUB_DIRECTORY})
#Find googletest
set(GTEST_DIRECTORY "${CACHE_PREFIX}" CACHE PATH "Googletest directory")
include_directories(${GTEST_DIRECTORY}/include)
#gencode flags
set(GPU_COMPUTE_VER 35;50;52;60;61 CACHE STRING
"Space separated list of compute versions to be built against")
include_directories(
nccl/src
cub
)
set(GENCODE_FLAGS "") set(GENCODE_FLAGS "")
foreach(ver ${GPU_COMPUTE_VER}) format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
set(GENCODE_FLAGS "${GENCODE_FLAGS}-gencode arch=compute_${ver},code=sm_${ver};")
endforeach()
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;${GENCODE_FLAGS};-lineinfo;") set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;${GENCODE_FLAGS};-lineinfo;")
if(NOT MSVC) if(NOT MSVC)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC") set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -std=c++11")
endif() endif()
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC) cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
target_link_libraries(gpuxgboost nccl) target_link_libraries(gpuxgboost nccl)
list(APPEND LINK_LIBRARIES gpuxgboost) list(APPEND LINK_LIBRARIES gpuxgboost)
list(APPEND SOURCES plugin/updater_gpu/src/register_updater_gpu.cc) list(APPEND SOURCES plugin/updater_gpu/src/register_updater_gpu.cc)
else()
set(CUDA_OBJS "")
endif() endif()
add_library(objxgboost OBJECT ${SOURCES}) add_library(rabit STATIC ${RABIT_SOURCES})
set_target_properties(${objxgboost} PROPERTIES POSITION_INDEPENDENT_CODE 1)
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost>) add_library(objxgboost OBJECT ${SOURCES})
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
# Executable
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
set_target_properties(runxgboost PROPERTIES
OUTPUT_NAME xgboost
)
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(runxgboost ${LINK_LIBRARIES}) target_link_libraries(runxgboost ${LINK_LIBRARIES})
# Shared library
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>) add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
target_link_libraries(xgboost ${LINK_LIBRARIES}) target_link_libraries(xgboost ${LINK_LIBRARIES})
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
#Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names #Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost) add_dependencies(xgboost runxgboost)
option(JVM_BINDINGS "Build JVM bindings" OFF) # JVM
if(JVM_BINDINGS) if(JVM_BINDINGS)
find_package(JNI QUIET REQUIRED) find_package(JNI QUIET REQUIRED)
@ -163,7 +111,35 @@ if(JVM_BINDINGS)
add_library(xgboost4j SHARED add_library(xgboost4j SHARED
$<TARGET_OBJECTS:objxgboost> $<TARGET_OBJECTS:objxgboost>
jvm-packages/xgboost4j/src/native/xgboost4j.cpp) jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
target_link_libraries(xgboost4j target_link_libraries(xgboost4j
${LINK_LIBRARIES} ${LINK_LIBRARIES}
${JAVA_JVM_LIBRARY}) ${JAVA_JVM_LIBRARY})
endif() endif()
# Test
if(GOOGLE_TEST)
enable_testing()
find_package(GTest REQUIRED)
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
auto_source_group("${TEST_SOURCES}")
include_directories(${GTEST_INCLUDE_DIRS})
if(PLUGIN_UPDATER_GPU)
file(GLOB_RECURSE CUDA_TEST_SOURCES "plugin/updater_gpu/test/cpp/*.cu")
set(CUDA_VERBOSE_BUILD ON)
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
else()
set(CUDA_TEST_OBJS "")
endif()
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(testxgboost ${GTEST_BOTH_LIBRARIES} ${LINK_LIBRARIES})
add_test(TestXGBoost testxgboost)
endif()
# Group sources
auto_source_group("${SOURCES}")

View File

@ -20,9 +20,9 @@ before_build:
- mkdir build2013 - mkdir build2013
- mkdir build2015 - mkdir build2015
- cd build2013 - cd build2013
- cmake .. -G"Visual Studio 12 2013 Win64" - cmake .. -G"Visual Studio 12 2013 Win64" -DCMAKE_CONFIGURATION_TYPES="Release;Debug;"
- cd ../build2015 - cd ../build2015
- cmake .. -G"Visual Studio 14 2015 Win64" - cmake .. -G"Visual Studio 14 2015 Win64" -DCMAKE_CONFIGURATION_TYPES="Release;Debug;"
build_script: build_script:
- cd %APPVEYOR_BUILD_FOLDER% - cd %APPVEYOR_BUILD_FOLDER%

61
cmake/Utils.cmake Normal file
View File

@ -0,0 +1,61 @@
# Automatically set source group based on folder
function(auto_source_group SOURCES)
foreach(FILE ${SOURCES})
get_filename_component(PARENT_DIR "${FILE}" PATH)
# skip src or include and changes /'s to \\'s
string(REPLACE "${CMAKE_CURRENT_LIST_DIR}" "" GROUP "${PARENT_DIR}")
string(REPLACE "/" "\\\\" GROUP "${GROUP}")
string(REGEX REPLACE "^\\\\" "" GROUP "${GROUP}")
source_group("${GROUP}" FILES "${FILE}")
endforeach()
endfunction(auto_source_group)
# Force static runtime for MSVC
function(msvc_use_static_runtime)
if(MSVC)
set(variables
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
set(${variable} "${${variable}}" PARENT_SCOPE)
endif()
endforeach()
endif()
endfunction(msvc_use_static_runtime)
# Set output directory of target, ignoring debug or release
function(set_output_directory target dir)
set_target_properties(${target} PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${dir}
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
LIBRARY_OUTPUT_DIRECTORY ${dir}
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
)
endfunction(set_output_directory)
# Set a default build type to release if none was specified
function(set_default_configuration_release)
if(CMAKE_CONFIGURATION_TYPES STREQUAL "Debug;Release;MinSizeRel;RelWithDebInfo") # multiconfig generator?
set(CMAKE_CONFIGURATION_TYPES Release CACHE STRING "" FORCE)
elseif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting build type to 'Release' as none was specified.")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE )
endif()
endfunction(set_default_configuration_release)
function(format_gencode_flags flags out)
foreach(ver ${flags})
set(${out} "${${out}}-gencode arch=compute_${ver},code=sm_${ver};" PARENT_SCOPE)
endforeach()
endfunction(format_gencode_flags flags)

View File

@ -86,7 +86,7 @@ if __name__ == "__main__":
args = ["-D{0}:BOOL={1}".format(k, v) for k, v in CONFIG.items()] args = ["-D{0}:BOOL={1}".format(k, v) for k, v in CONFIG.items()]
run("cmake .. " + " ".join(args) + maybe_generator) run("cmake .. " + " ".join(args) + maybe_generator)
run("cmake --build .") run("cmake --build . --config Release")
with cd("demo/regression"): with cd("demo/regression"):
run(sys.executable + " mapfeat.py") run(sys.executable + " mapfeat.py")

View File

@ -13,11 +13,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#include "gtest/gtest.h" #include <gtest/gtest.h>
#include "../../src/exact/argmax_by_key.cuh" #include "../../src/exact/argmax_by_key.cuh"
#include "../../src/exact/gradients.cuh"
#include "../../src/exact/node.cuh" #include "../../src/exact/node.cuh"
#include "../../src/exact/loss_functions.cuh"
#include "utils.cuh" #include "utils.cuh"
@ -56,26 +54,26 @@ void argMaxTest(ArgMaxByKeyAlgo algo) {
const int nVals = 1024; const int nVals = 1024;
const int level = 0; const int level = 0;
const int nKeys = 1 << level; const int nKeys = 1 << level;
gpu_gpair* scans = new gpu_gpair[nVals]; bst_gpair* scans = new bst_gpair[nVals];
float* vals = new float[nVals]; float* vals = new float[nVals];
int* colIds = new int[nVals]; int* colIds = new int[nVals];
scans[0] = gpu_gpair(); scans[0] = bst_gpair();
vals[0] = 0.f; vals[0] = 0.f;
colIds[0] = 0; colIds[0] = 0;
for (int i = 1; i < nVals; ++i) { for (int i = 1; i < nVals; ++i) {
scans[i].g = scans[i-1].g + (0.1f * 2.f); scans[i].grad = scans[i-1].grad + (0.1f * 2.f);
scans[i].h = scans[i-1].h + (0.1f * 2.f); scans[i].hess = scans[i-1].hess + (0.1f * 2.f);
vals[i] = static_cast<float>(i) * 0.1f; vals[i] = static_cast<float>(i) * 0.1f;
colIds[i] = 0; colIds[i] = 0;
} }
float* dVals; float* dVals;
allocateAndUpdateOnGpu<float>(dVals, vals, nVals); allocateAndUpdateOnGpu<float>(dVals, vals, nVals);
gpu_gpair* dScans; bst_gpair* dScans;
allocateAndUpdateOnGpu<gpu_gpair>(dScans, scans, nVals); allocateAndUpdateOnGpu<bst_gpair>(dScans, scans, nVals);
gpu_gpair* sums = new gpu_gpair[nKeys]; bst_gpair* sums = new bst_gpair[nKeys];
sums[0].g = sums[0].h = (0.1f * 2.f * nVals); sums[0].grad = sums[0].hess = (0.1f * 2.f * nVals);
gpu_gpair* dSums; bst_gpair* dSums;
allocateAndUpdateOnGpu<gpu_gpair>(dSums, sums, nKeys); allocateAndUpdateOnGpu<bst_gpair>(dSums, sums, nKeys);
int* dColIds; int* dColIds;
allocateAndUpdateOnGpu<int>(dColIds, colIds, nVals); allocateAndUpdateOnGpu<int>(dColIds, colIds, nVals);
Split* splits = new Split[nKeys]; Split* splits = new Split[nKeys];
@ -93,7 +91,7 @@ void argMaxTest(ArgMaxByKeyAlgo algo) {
param.reg_alpha = 0.f; param.reg_alpha = 0.f;
param.reg_lambda = 2.f; param.reg_lambda = 2.f;
param.max_delta_step = 0.f; param.max_delta_step = 0.f;
nodes[0].score = CalcGain(param, sums[0].g, sums[0].h); nodes[0].score = CalcGain(param, sums[0].grad, sums[0].hess);
Node<node_id_t>* dNodes; Node<node_id_t>* dNodes;
allocateAndUpdateOnGpu<Node<node_id_t> >(dNodes, nodes, nKeys); allocateAndUpdateOnGpu<Node<node_id_t> >(dNodes, nodes, nKeys);
argMaxByKey<node_id_t>(dSplits, dScans, dSums, dVals, dColIds, dNodeAssigns, argMaxByKey<node_id_t>(dSplits, dScans, dSums, dVals, dColIds, dNodeAssigns,

View File

@ -31,16 +31,16 @@ class ReduceScanByKey: public Generator<node_id_t> {
hSums(nullptr), dSums(nullptr), hScans(nullptr), dScans(nullptr), hSums(nullptr), dSums(nullptr), hScans(nullptr), dScans(nullptr),
outSize(this->size), nSegments(this->nKeys*this->nCols), outSize(this->size), nSegments(this->nKeys*this->nCols),
hOffsets(nullptr), dOffsets(nullptr) { hOffsets(nullptr), dOffsets(nullptr) {
hSums = new gpu_gpair[nSegments]; hSums = new bst_gpair[nSegments];
allocateOnGpu<gpu_gpair>(dSums, nSegments); allocateOnGpu<bst_gpair>(dSums, nSegments);
hScans = new gpu_gpair[outSize]; hScans = new bst_gpair[outSize];
allocateOnGpu<gpu_gpair>(dScans, outSize); allocateOnGpu<bst_gpair>(dScans, outSize);
gpu_gpair* buckets = new gpu_gpair[nSegments]; bst_gpair* buckets = new bst_gpair[nSegments];
for (int i = 0; i < nSegments; i++) { for (int i = 0; i < nSegments; i++) {
buckets[i] = gpu_gpair(); buckets[i] = bst_gpair();
} }
for (int i = 0; i < nSegments; i++) { for (int i = 0; i < nSegments; i++) {
hSums[i] = gpu_gpair(); hSums[i] = bst_gpair();
} }
for (size_t i = 0; i < this->size; i++) { for (size_t i = 0; i < this->size; i++) {
if (this->hKeys[i] >= 0 && this->hKeys[i] < nSegments) { if (this->hKeys[i] >= 0 && this->hKeys[i] < nSegments) {
@ -77,10 +77,10 @@ class ReduceScanByKey: public Generator<node_id_t> {
} }
void run() { void run() {
gpu_gpair* tmpScans; bst_gpair* tmpScans;
int* tmpKeys; int* tmpKeys;
int tmpSize = scanTempBufferSize(this->size); int tmpSize = scanTempBufferSize(this->size);
allocateOnGpu<gpu_gpair>(tmpScans, tmpSize); allocateOnGpu<bst_gpair>(tmpScans, tmpSize);
allocateOnGpu<int>(tmpKeys, tmpSize); allocateOnGpu<int>(tmpKeys, tmpSize);
TIMEIT(reduceScanByKey<node_id_t> TIMEIT(reduceScanByKey<node_id_t>
(dSums, dScans, this->dVals, this->dInstIds, this->dKeys, (dSums, dScans, this->dVals, this->dInstIds, this->dKeys,
@ -94,10 +94,10 @@ class ReduceScanByKey: public Generator<node_id_t> {
} }
private: private:
gpu_gpair* hSums; bst_gpair* hSums;
gpu_gpair* dSums; bst_gpair* dSums;
gpu_gpair* hScans; bst_gpair* hScans;
gpu_gpair* dScans; bst_gpair* dScans;
int outSize; int outSize;
int nSegments; int nSegments;
int* hOffsets; int* hOffsets;

View File

@ -47,20 +47,20 @@ void testSmallData() {
updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals); updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals);
int* tmpInst = new int[builder.nVals]; int* tmpInst = new int[builder.nVals];
updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals); updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals);
gpu_gpair* tmpGrad = new gpu_gpair[builder.nRows]; bst_gpair* tmpGrad = new bst_gpair[builder.nRows];
updateHostPtr<gpu_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows); updateHostPtr<bst_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
EXPECT_EQ(0, tmpInst[0]); EXPECT_EQ(0, tmpInst[0]);
EXPECT_FLOAT_EQ(1.f, tmpVal[0]); EXPECT_FLOAT_EQ(1.f, tmpVal[0]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).hess);
EXPECT_EQ(2, tmpInst[1]); EXPECT_EQ(2, tmpInst[1]);
EXPECT_FLOAT_EQ(1.f, tmpVal[1]); EXPECT_FLOAT_EQ(1.f, tmpVal[1]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).hess);
EXPECT_EQ(7, tmpInst[2]); EXPECT_EQ(7, tmpInst[2]);
EXPECT_FLOAT_EQ(1.f, tmpVal[2]); EXPECT_FLOAT_EQ(1.f, tmpVal[2]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).hess);
delete [] tmpGrad; delete [] tmpGrad;
delete [] tmpOff; delete [] tmpOff;
delete [] tmpInst; delete [] tmpInst;
@ -106,22 +106,22 @@ void testLargeData() {
updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals); updateHostPtr<float>(tmpVal, builder.vals.current(), builder.nVals);
int* tmpInst = new int[builder.nVals]; int* tmpInst = new int[builder.nVals];
updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals); updateHostPtr<int>(tmpInst, builder.instIds.current(), builder.nVals);
gpu_gpair* tmpGrad = new gpu_gpair[builder.nRows]; bst_gpair* tmpGrad = new bst_gpair[builder.nRows];
updateHostPtr<gpu_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows); updateHostPtr<bst_gpair>(tmpGrad, builder.gradsInst.data(), builder.nRows);
// the order of observations is messed up before the convertToCsc call! // the order of observations is messed up before the convertToCsc call!
// hence, the instance IDs have been manually checked and put here. // hence, the instance IDs have been manually checked and put here.
EXPECT_EQ(1164, tmpInst[0]); EXPECT_EQ(1164, tmpInst[0]);
EXPECT_FLOAT_EQ(1.f, tmpVal[0]); EXPECT_FLOAT_EQ(1.f, tmpVal[0]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[0]%10), get(0, tmpGrad, tmpInst).hess);
EXPECT_EQ(1435, tmpInst[1]); EXPECT_EQ(1435, tmpInst[1]);
EXPECT_FLOAT_EQ(1.f, tmpVal[1]); EXPECT_FLOAT_EQ(1.f, tmpVal[1]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[1]%10), get(1, tmpGrad, tmpInst).hess);
EXPECT_EQ(1421, tmpInst[2]); EXPECT_EQ(1421, tmpInst[2]);
EXPECT_FLOAT_EQ(1.f, tmpVal[2]); EXPECT_FLOAT_EQ(1.f, tmpVal[2]);
EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).g); EXPECT_FLOAT_EQ(1.f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).grad);
EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).h); EXPECT_FLOAT_EQ(.5f+(float)(tmpInst[2]%10), get(2, tmpGrad, tmpInst).hess);
delete [] tmpGrad; delete [] tmpGrad;
delete [] tmpOff; delete [] tmpOff;
delete [] tmpInst; delete [] tmpInst;
@ -164,17 +164,17 @@ void testAllocate() {
EXPECT_FALSE(n[i].isUnused()); EXPECT_FALSE(n[i].isUnused());
} }
} }
gpu_gpair sum; bst_gpair sum;
sum.g = 0.f; sum.grad = 0.f;
sum.h = 0.f; sum.hess = 0.f;
for (int i = 0; i < builder.maxNodes; ++i) { for (int i = 0; i < builder.maxNodes; ++i) {
if (!n[i].isUnused()) { if (!n[i].isUnused()) {
sum += n[i].gradSum; sum += n[i].gradSum;
} }
} }
// law of conservation of gradients! :) // law of conservation of gradients! :)
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.g, sum.g); EXPECT_FLOAT_EQ(2.f*n[0].gradSum.grad, sum.grad);
EXPECT_FLOAT_EQ(2.f*n[0].gradSum.h, sum.h); EXPECT_FLOAT_EQ(2.f*n[0].gradSum.hess, sum.hess);
node_id_t* assigns = new node_id_t[builder.nVals]; node_id_t* assigns = new node_id_t[builder.nVals];
int* offsets = new int[builder.nCols+1]; int* offsets = new int[builder.nCols+1];
updateHostPtr<node_id_t>(assigns, builder.nodeAssigns.current(), updateHostPtr<node_id_t>(assigns, builder.nodeAssigns.current(),
@ -199,8 +199,8 @@ TEST(CudaGPUBuilderTest, AllocateNodeDataInt32) {
template <typename node_id_t> template <typename node_id_t>
void assign(Node<node_id_t> *n, float g, float h, float sc, float wt, void assign(Node<node_id_t> *n, float g, float h, float sc, float wt,
DefaultDirection d, float th, int c, int i) { DefaultDirection d, float th, int c, int i) {
n->gradSum.g = g; n->gradSum.grad = g;
n->gradSum.h = h; n->gradSum.hess = h;
n->score = sc; n->score = sc;
n->weight = wt; n->weight = wt;
n->dir = d; n->dir = d;
@ -290,7 +290,7 @@ void testDense2Sparse() {
updateDevicePtr<Node<node_id_t> >(builder.nodes.data(), hNodes, builder.maxNodes); updateDevicePtr<Node<node_id_t> >(builder.nodes.data(), hNodes, builder.maxNodes);
builder.markLeaves(); builder.markLeaves();
RegTree tree; RegTree tree;
builder.dense2sparse(tree); builder.dense2sparse(&tree);
EXPECT_EQ(9, tree.param.num_nodes); EXPECT_EQ(9, tree.param.num_nodes);
delete [] hNodes; delete [] hNodes;
} }

View File

@ -16,7 +16,6 @@
#pragma once #pragma once
#include <random> #include <random>
#include "../../src/exact/gradients.cuh"
#include <memory> #include <memory>
#include <string> #include <string>
#include <xgboost/data.h> #include <xgboost/data.h>
@ -95,8 +94,8 @@ protected:
int size; int size;
T* hKeys; T* hKeys;
T* dKeys; T* dKeys;
gpu_gpair* hVals; bst_gpair* hVals;
gpu_gpair* dVals; bst_gpair* dVals;
std::string testName; std::string testName;
int* dColIds; int* dColIds;
int* hColIds; int* hColIds;
@ -132,17 +131,17 @@ protected:
} }
} }
void compare(gpu_gpair* exp, gpu_gpair* dAct, size_t len) { void compare(bst_gpair* exp, bst_gpair* dAct, size_t len) {
gpu_gpair* act = new gpu_gpair[len]; bst_gpair* act = new bst_gpair[len];
updateHostPtr<gpu_gpair>(act, dAct, len); updateHostPtr<bst_gpair>(act, dAct, len);
for (size_t i=0;i<len;++i) { for (size_t i=0;i<len;++i) {
bool isSmall; bool isSmall;
float ratioG = diffRatio(exp[i].g, act[i].g, isSmall); float ratioG = diffRatio(exp[i].grad, act[i].grad, isSmall);
float ratioH = diffRatio(exp[i].h, act[i].h, isSmall); float ratioH = diffRatio(exp[i].hess, act[i].hess, isSmall);
float thresh = isSmall? SuperSmallThresh : Thresh; float thresh = isSmall? SuperSmallThresh : Thresh;
if ((ratioG >= Thresh) || (ratioH >= Thresh)) { if ((ratioG >= Thresh) || (ratioH >= Thresh)) {
printf("(exp) %f %f -> (act) %f %f : rG=%f rH=%f th=%f @%lu\n", printf("(exp) %f %f -> (act) %f %f : rG=%f rH=%f th=%f @%lu\n",
exp[i].g, exp[i].h, act[i].g, act[i].h, ratioG, ratioH, exp[i].grad, exp[i].hess, act[i].grad, act[i].hess, ratioG, ratioH,
thresh, i); thresh, i);
} }
ASSERT_TRUE(ratioG < thresh); ASSERT_TRUE(ratioG < thresh);
@ -168,12 +167,12 @@ protected:
} }
void generateVals() { void generateVals() {
hVals = new gpu_gpair[size]; hVals = new bst_gpair[size];
for (size_t i=0;i<size;++i) { for (size_t i=0;i<size;++i) {
hVals[i].g = randVal(-1.f, 1.f); hVals[i].grad = randVal(-1.f, 1.f);
hVals[i].h = randVal(-1.f, 1.f); hVals[i].hess = randVal(-1.f, 1.f);
} }
allocateAndUpdateOnGpu<gpu_gpair>(dVals, hVals, size); allocateAndUpdateOnGpu<bst_gpair>(dVals, hVals, size);
} }
void sortKeyValues() { void sortKeyValues() {
@ -186,7 +185,7 @@ protected:
dh::safe_cuda(cub::DeviceRadixSort::SortPairs(tmpStorage, tmpSize, dKeys, dh::safe_cuda(cub::DeviceRadixSort::SortPairs(tmpStorage, tmpSize, dKeys,
dKeys, dVals, dVals, size)); dKeys, dVals, dVals, size));
dh::safe_cuda(cudaFree(storage)); dh::safe_cuda(cudaFree(storage));
updateHostPtr<gpu_gpair>(hVals, dVals, size); updateHostPtr<bst_gpair>(hVals, dVals, size);
updateHostPtr<T>(hKeys, dKeys, size); updateHostPtr<T>(hKeys, dKeys, size);
} }

View File

@ -8,9 +8,9 @@ TEST(Metric, RMSE) {
ASSERT_STREQ(metric->Name(), "rmse"); ASSERT_STREQ(metric->Name(), "rmse");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.6403, 0.001); 0.6403f, 0.001f);
} }
TEST(Metric, MAE) { TEST(Metric, MAE) {
@ -18,9 +18,9 @@ TEST(Metric, MAE) {
ASSERT_STREQ(metric->Name(), "mae"); ASSERT_STREQ(metric->Name(), "mae");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
} }
TEST(Metric, LogLoss) { TEST(Metric, LogLoss) {
@ -28,9 +28,9 @@ TEST(Metric, LogLoss) {
ASSERT_STREQ(metric->Name(), "logloss"); ASSERT_STREQ(metric->Name(), "logloss");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
1.2039, 0.001); 1.2039f, 0.001f);
} }
TEST(Metric, Error) { TEST(Metric, Error) {
@ -38,13 +38,13 @@ TEST(Metric, Error) {
ASSERT_STREQ(metric->Name(), "error"); ASSERT_STREQ(metric->Name(), "error");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
EXPECT_ANY_THROW(xgboost::Metric::Create("error@abc")); EXPECT_ANY_THROW(xgboost::Metric::Create("error@abc"));
delete metric; delete metric;
metric = xgboost::Metric::Create("error@0.5"); metric = xgboost::Metric::Create("error@0.5f");
EXPECT_STREQ(metric->Name(), "error"); EXPECT_STREQ(metric->Name(), "error");
delete metric; delete metric;
@ -53,17 +53,17 @@ TEST(Metric, Error) {
EXPECT_STREQ(metric->Name(), "error@0.1"); EXPECT_STREQ(metric->Name(), "error@0.1");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.2, 0.1, 0.2}, {0.1f, 0.2f, 0.1f, 0.2f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
} }
TEST(Metric, PoissionNegLogLik) { TEST(Metric, PoissionNegLogLik) {
xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik"); xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik");
ASSERT_STREQ(metric->Name(), "poisson-nloglik"); ASSERT_STREQ(metric->Name(), "poisson-nloglik");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.2, 0.1, 0.2}, {0.1f, 0.2f, 0.1f, 0.2f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
1.1280, 0.001); 1.1280f, 0.001f);
} }

View File

@ -7,5 +7,5 @@ TEST(Metric, UnknownMetric) {
EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name")); EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name"));
EXPECT_NO_THROW(xgboost::Metric::Create("rmse")); EXPECT_NO_THROW(xgboost::Metric::Create("rmse"));
EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name@1")); EXPECT_ANY_THROW(xgboost::Metric::Create("unknown_name@1"));
EXPECT_NO_THROW(xgboost::Metric::Create("error@0.5")); EXPECT_NO_THROW(xgboost::Metric::Create("error@0.5f"));
} }

View File

@ -5,18 +5,18 @@
TEST(Metric, AMS) { TEST(Metric, AMS) {
EXPECT_ANY_THROW(xgboost::Metric::Create("ams")); EXPECT_ANY_THROW(xgboost::Metric::Create("ams"));
xgboost::Metric * metric = xgboost::Metric::Create("ams@0.5"); xgboost::Metric * metric = xgboost::Metric::Create("ams@0.5f");
ASSERT_STREQ(metric->Name(), "ams@0.5"); ASSERT_STREQ(metric->Name(), "ams@0.5");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311, 0.001); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.29710, 0.001); 0.29710f, 0.001f);
delete metric; delete metric;
metric = xgboost::Metric::Create("ams@0"); metric = xgboost::Metric::Create("ams@0");
ASSERT_STREQ(metric->Name(), "ams@0"); ASSERT_STREQ(metric->Name(), "ams@0");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311, 0.001); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f);
} }
TEST(Metric, AUC) { TEST(Metric, AUC) {
@ -24,9 +24,9 @@ TEST(Metric, AUC) {
ASSERT_STREQ(metric->Name(), "auc"); ASSERT_STREQ(metric->Name(), "auc");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {})); EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {}));
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 0}, {0, 0})); EXPECT_ANY_THROW(GetMetricEval(metric, {0, 0}, {0, 0}));
} }
@ -39,18 +39,18 @@ TEST(Metric, Precision) {
ASSERT_STREQ(metric->Name(), "pre"); ASSERT_STREQ(metric->Name(), "pre");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-7); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-7);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0, 1e-7); 0, 1e-7);
delete metric; delete metric;
metric = xgboost::Metric::Create("pre@2"); metric = xgboost::Metric::Create("pre@2");
ASSERT_STREQ(metric->Name(), "pre@2"); ASSERT_STREQ(metric->Name(), "pre@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5, 1e-7); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-7);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {})); EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {}));
} }
@ -62,18 +62,18 @@ TEST(Metric, NDCG) {
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.6509, 0.001); 0.6509f, 0.001f);
delete metric; delete metric;
metric = xgboost::Metric::Create("ndcg@2"); metric = xgboost::Metric::Create("ndcg@2");
ASSERT_STREQ(metric->Name(), "ndcg@2"); ASSERT_STREQ(metric->Name(), "ndcg@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.3868, 0.001); 0.3868f, 0.001f);
delete metric; delete metric;
metric = xgboost::Metric::Create("ndcg@-"); metric = xgboost::Metric::Create("ndcg@-");
@ -81,18 +81,18 @@ TEST(Metric, NDCG) {
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 0, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {}, {}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.6509, 0.001); 0.6509f, 0.001f);
delete metric; delete metric;
metric = xgboost::Metric::Create("ndcg@2-"); metric = xgboost::Metric::Create("ndcg@2-");
ASSERT_STREQ(metric->Name(), "ndcg@2-"); ASSERT_STREQ(metric->Name(), "ndcg@2-");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.3868, 0.001); 0.3868f, 0.001f);
} }
TEST(Metric, MAP) { TEST(Metric, MAP) {
@ -100,9 +100,9 @@ TEST(Metric, MAP) {
ASSERT_STREQ(metric->Name(), "map"); ASSERT_STREQ(metric->Name(), "map");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.5, 0.001); 0.5f, 0.001f);
EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {}, {}), 1, 1e-10);
delete metric; delete metric;
@ -115,7 +115,7 @@ TEST(Metric, MAP) {
ASSERT_STREQ(metric->Name(), "map@2"); ASSERT_STREQ(metric->Name(), "map@2");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.9, 0.1, 0.9}, {0.1f, 0.9f, 0.1f, 0.9f},
{ 0, 0, 1, 1}), { 0, 0, 1, 1}),
0.25, 0.001); 0.25f, 0.001f);
} }

View File

@ -10,9 +10,9 @@ TEST(Metric, MultiClassError) {
EXPECT_NEAR(GetMetricEval( EXPECT_NEAR(GetMetricEval(
metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10); metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1}, {0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f},
{0, 1, 2}), {0, 1, 2}),
0.666, 0.001); 0.666f, 0.001f);
} }
TEST(Metric, MultiClassLogLoss) { TEST(Metric, MultiClassLogLoss) {
@ -22,7 +22,7 @@ TEST(Metric, MultiClassLogLoss) {
EXPECT_NEAR(GetMetricEval( EXPECT_NEAR(GetMetricEval(
metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10); metric, {1, 0, 0, 0, 1, 0, 0, 0, 1}, {0, 1, 2}), 0, 1e-10);
EXPECT_NEAR(GetMetricEval(metric, EXPECT_NEAR(GetMetricEval(metric,
{0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1}, {0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f},
{0, 1, 2}), {0, 1, 2}),
2.302, 0.001); 2.302f, 0.001f);
} }

View File

@ -8,10 +8,10 @@ TEST(Objective, LinearRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, {0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1}, {0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1},
{0, 0.1, 0.9, 1.0, -1.0, -0.9, -0.1, 0}, {0, 0.1f, 0.9f, 1.0f, -1.0f, -0.9f, -0.1f, 0},
{1, 1, 1, 1, 1, 1, 1, 1}); {1, 1, 1, 1, 1, 1, 1, 1});
ASSERT_NO_THROW(obj->DefaultEvalMetric()); ASSERT_NO_THROW(obj->DefaultEvalMetric());
@ -22,11 +22,11 @@ TEST(Objective, LogisticRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, { 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1}, { 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1}, { 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5, 0.52, 0.71, 0.73, -0.5, -0.47, -0.28, -0.26}, { 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25, 0.24, 0.20, 0.19, 0.25, 0.24, 0.20, 0.19}); {0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
} }
TEST(Objective, LogisticRegressionBasic) { TEST(Objective, LogisticRegressionBasic) {
@ -36,21 +36,21 @@ TEST(Objective, LogisticRegressionBasic) {
// test label validation // test label validation
EXPECT_ANY_THROW(CheckObjFunction(obj, {0}, {10}, {1}, {0}, {0})) EXPECT_ANY_THROW(CheckObjFunction(obj, {0}, {10}, {1}, {0}, {0}))
<< "Expected error when label not in range [0,1] for LogisticRegression"; << "Expected error when label not in range [0,1f] for LogisticRegression";
// test ProbToMargin // test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.197, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.197f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5), 0, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.5f), 0, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9), 2.197, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.9f), 2.197f, 0.01f);
EXPECT_ANY_THROW(obj->ProbToMargin(10)) EXPECT_ANY_THROW(obj->ProbToMargin(10))
<< "Expected error when base_score not in range [0,1] for LogisticRegression"; << "Expected error when base_score not in range [0,1f] for LogisticRegression";
// test PredTransform // test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1}; std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {0.5, 0.524, 0.622, 0.710, 0.731}; std::vector<xgboost::bst_float> out_preds = {0.5f, 0.524f, 0.622f, 0.710f, 0.731f};
obj->PredTransform(&preds); obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) { for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01); EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
} }
} }
@ -59,24 +59,24 @@ TEST(Objective, LogisticRawGPair) {
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, { 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1}, { 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1}, { 1, 1, 1, 1, 1, 1, 1, 1},
{ 0.5, 0.52, 0.71, 0.73, -0.5, -0.47, -0.28, -0.26}, { 0.5f, 0.52f, 0.71f, 0.73f, -0.5f, -0.47f, -0.28f, -0.26f},
{0.25, 0.24, 0.20, 0.19, 0.25, 0.24, 0.20, 0.19}); {0.25f, 0.24f, 0.20f, 0.19f, 0.25f, 0.24f, 0.20f, 0.19f});
} }
TEST(Objective, PoissonRegressionGPair) { TEST(Objective, PoissonRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson"); xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson");
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("max_delta_step", "0.1")); args.push_back(std::make_pair("max_delta_step", "0.1f"));
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, { 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1}, { 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1}, { 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.10, 2.45, 2.71, 0, 0.10, 1.45, 1.71}, { 1, 1.10f, 2.45f, 2.71f, 0, 0.10f, 1.45f, 1.71f},
{1.10, 1.22, 2.71, 3.00, 1.10, 1.22, 2.71, 3.00}); {1.10f, 1.22f, 2.71f, 3.00f, 1.10f, 1.22f, 2.71f, 3.00f});
} }
TEST(Objective, PoissonRegressionBasic) { TEST(Objective, PoissonRegressionBasic) {
@ -89,16 +89,16 @@ TEST(Objective, PoissonRegressionBasic) {
<< "Expected error when label < 0 for PoissonRegression"; << "Expected error when label < 0 for PoissonRegression";
// test ProbToMargin // test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.30, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.30f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5), -0.69, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.5f), -0.69f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9), -0.10, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.9f), -0.10f, 0.01f);
// test PredTransform // test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1}; std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71}; std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds); obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) { for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01); EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
} }
} }
@ -107,11 +107,11 @@ TEST(Objective, GammaRegressionGPair) {
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, {0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{0, 0, 0, 0, 1, 1, 1, 1}, {0, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 0, 0.09, 0.59, 0.63}, {1, 1, 1, 1, 0, 0.09f, 0.59f, 0.63f},
{0, 0, 0, 0, 1, 0.90, 0.40, 0.36}); {0, 0, 0, 0, 1, 0.90f, 0.40f, 0.36f});
} }
TEST(Objective, GammaRegressionBasic) { TEST(Objective, GammaRegressionBasic) {
@ -124,30 +124,30 @@ TEST(Objective, GammaRegressionBasic) {
<< "Expected error when label < 0 for GammaRegression"; << "Expected error when label < 0 for GammaRegression";
// test ProbToMargin // test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), -2.30, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.1f), -2.30f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5), -0.69, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.5f), -0.69f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9), -0.10, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.9f), -0.10f, 0.01f);
// test PredTransform // test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1}; std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71}; std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds); obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) { for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01); EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
} }
} }
TEST(Objective, TweedieRegressionGPair) { TEST(Objective, TweedieRegressionGPair) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie"); xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie");
std::vector<std::pair<std::string, std::string> > args; std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("tweedie_variance_power", "1.1")); args.push_back(std::make_pair("tweedie_variance_power", "1.1f"));
obj->Configure(args); obj->Configure(args);
CheckObjFunction(obj, CheckObjFunction(obj,
{ 0, 0.1, 0.9, 1, 0, 0.1, 0.9, 1}, { 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
{ 0, 0, 0, 0, 1, 1, 1, 1}, { 0, 0, 0, 0, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1}, { 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1.09, 2.24, 2.45, 0, 0.10, 1.33, 1.55}, { 1, 1.09f, 2.24f, 2.45f, 0, 0.10f, 1.33f, 1.55f},
{0.89, 0.98, 2.02, 2.21, 1, 1.08, 2.11, 2.30}); {0.89f, 0.98f, 2.02f, 2.21f, 1, 1.08f, 2.11f, 2.30f});
} }
TEST(Objective, TweedieRegressionBasic) { TEST(Objective, TweedieRegressionBasic) {
@ -160,15 +160,15 @@ TEST(Objective, TweedieRegressionBasic) {
<< "Expected error when label < 0 for TweedieRegression"; << "Expected error when label < 0 for TweedieRegression";
// test ProbToMargin // test ProbToMargin
EXPECT_NEAR(obj->ProbToMargin(0.1), 0.10, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.1f), 0.10f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.5), 0.5, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.5f), 0.5f, 0.01f);
EXPECT_NEAR(obj->ProbToMargin(0.9), 0.89, 0.01); EXPECT_NEAR(obj->ProbToMargin(0.9f), 0.89f, 0.01f);
// test PredTransform // test PredTransform
std::vector<xgboost::bst_float> preds = {0, 0.1, 0.5, 0.9, 1}; std::vector<xgboost::bst_float> preds = {0, 0.1f, 0.5f, 0.9f, 1};
std::vector<xgboost::bst_float> out_preds = {1, 1.10, 1.64, 2.45, 2.71}; std::vector<xgboost::bst_float> out_preds = {1, 1.10f, 1.64f, 2.45f, 2.71f};
obj->PredTransform(&preds); obj->PredTransform(&preds);
for (int i = 0; i < static_cast<int>(preds.size()); ++i) { for (int i = 0; i < static_cast<int>(preds.size()); ++i) {
EXPECT_NEAR(preds[i], out_preds[i], 0.01); EXPECT_NEAR(preds[i], out_preds[i], 0.01f);
} }
} }