enable ROCm on latest XGBoost
This commit is contained in:
commit
55994b1ac7
@ -2,15 +2,14 @@ cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
|||||||
project(xgboost LANGUAGES CXX C VERSION 2.1.0)
|
project(xgboost LANGUAGES CXX C VERSION 2.1.0)
|
||||||
include(cmake/Utils.cmake)
|
include(cmake/Utils.cmake)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||||
cmake_policy(SET CMP0022 NEW)
|
|
||||||
cmake_policy(SET CMP0079 NEW)
|
|
||||||
cmake_policy(SET CMP0076 NEW)
|
|
||||||
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
|
||||||
cmake_policy(SET CMP0063 NEW)
|
|
||||||
|
|
||||||
if((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
# These policies are already set from 3.18 but we still need to set the policy
|
||||||
cmake_policy(SET CMP0077 NEW)
|
# default variables here for lower minimum versions in the submodules
|
||||||
endif()
|
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||||
|
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
|
||||||
|
set(CMAKE_POLICY_DEFAULT_CMP0076 NEW)
|
||||||
|
set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
|
||||||
|
set(CMAKE_POLICY_DEFAULT_CMP0079 NEW)
|
||||||
|
|
||||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||||
|
|
||||||
@ -41,6 +40,8 @@ write_version()
|
|||||||
set_default_configuration_release()
|
set_default_configuration_release()
|
||||||
|
|
||||||
#-- Options
|
#-- Options
|
||||||
|
include(CMakeDependentOption)
|
||||||
|
|
||||||
## User options
|
## User options
|
||||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||||
@ -69,8 +70,24 @@ option(USE_CUDA "Build with GPU acceleration" OFF)
|
|||||||
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
|
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
|
||||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||||
|
if(USE_CUDA)
|
||||||
|
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND NOT DEFINED ENV{CUDAARCHS})
|
||||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||||
|
else()
|
||||||
|
# Clear any cached values from previous runs
|
||||||
|
unset(GPU_COMPUTE_VER)
|
||||||
|
unset(GPU_COMPUTE_VER CACHE)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
# CUDA device LTO was introduced in CMake v3.25 and requires host LTO to also be enabled but can still
|
||||||
|
# be explicitly disabled allowing for LTO on host only, host and device, or neither, but device-only LTO
|
||||||
|
# is not a supproted configuration
|
||||||
|
cmake_dependent_option(USE_CUDA_LTO
|
||||||
|
"Enable link-time optimization for CUDA device code"
|
||||||
|
"${CMAKE_INTERPROCEDURAL_OPTIMIZATION}"
|
||||||
|
"CMAKE_VERSION VERSION_GREATER_EQUAL 3.25;USE_CUDA;CMAKE_INTERPROCEDURAL_OPTIMIZATION"
|
||||||
|
OFF)
|
||||||
## HIP
|
## HIP
|
||||||
option(USE_HIP "Build with GPU acceleration" OFF)
|
option(USE_HIP "Build with GPU acceleration" OFF)
|
||||||
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
|
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
|
||||||
@ -178,15 +195,24 @@ endif()
|
|||||||
if(USE_CUDA)
|
if(USE_CUDA)
|
||||||
set(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
set(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||||
# `export CXX=' is ignored by CMake CUDA.
|
# `export CXX=' is ignored by CMake CUDA.
|
||||||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
if(NOT DEFINED CMAKE_CUDA_HOST_COMPILER AND NOT DEFINED ENV{CUDAHOSTCXX})
|
||||||
|
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER} CACHE FILEPATH
|
||||||
|
"The compiler executable to use when compiling host code for CUDA or HIP language files.")
|
||||||
|
mark_as_advanced(CMAKE_CUDA_HOST_COMPILER)
|
||||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(NOT DEFINED CMAKE_CUDA_RUNTIME_LIBRARY)
|
||||||
|
set(CMAKE_CUDA_RUNTIME_LIBRARY Static)
|
||||||
|
endif()
|
||||||
|
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 11.0)
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 11.0)
|
||||||
message(FATAL_ERROR "CUDA version must be at least 11.0!")
|
message(FATAL_ERROR "CUDA version must be at least 11.0!")
|
||||||
endif()
|
endif()
|
||||||
set(GEN_CODE "")
|
if(DEFINED GPU_COMPUTE_VER)
|
||||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
compute_cmake_cuda_archs("${GPU_COMPUTE_VER}")
|
||||||
|
endif()
|
||||||
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
|
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
|
||||||
|
|
||||||
find_package(CUDAToolkit REQUIRED)
|
find_package(CUDAToolkit REQUIRED)
|
||||||
|
|||||||
@ -102,6 +102,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/collective/allreduce.o \
|
$(PKGROOT)/src/collective/allreduce.o \
|
||||||
$(PKGROOT)/src/collective/broadcast.o \
|
$(PKGROOT)/src/collective/broadcast.o \
|
||||||
$(PKGROOT)/src/collective/comm.o \
|
$(PKGROOT)/src/collective/comm.o \
|
||||||
|
$(PKGROOT)/src/collective/coll.o \
|
||||||
$(PKGROOT)/src/collective/tracker.o \
|
$(PKGROOT)/src/collective/tracker.o \
|
||||||
$(PKGROOT)/src/collective/communicator.o \
|
$(PKGROOT)/src/collective/communicator.o \
|
||||||
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
||||||
|
|||||||
@ -102,6 +102,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/collective/allreduce.o \
|
$(PKGROOT)/src/collective/allreduce.o \
|
||||||
$(PKGROOT)/src/collective/broadcast.o \
|
$(PKGROOT)/src/collective/broadcast.o \
|
||||||
$(PKGROOT)/src/collective/comm.o \
|
$(PKGROOT)/src/collective/comm.o \
|
||||||
|
$(PKGROOT)/src/collective/coll.o \
|
||||||
$(PKGROOT)/src/collective/tracker.o \
|
$(PKGROOT)/src/collective/tracker.o \
|
||||||
$(PKGROOT)/src/collective/communicator.o \
|
$(PKGROOT)/src/collective/communicator.o \
|
||||||
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
||||||
|
|||||||
@ -82,46 +82,35 @@ function(set_default_configuration_release)
|
|||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Generate nvcc compiler flags given a list of architectures
|
# Generate CMAKE_CUDA_ARCHITECTURES form a list of architectures
|
||||||
# Also generates PTX for the most recent architecture for forwards compatibility
|
# Also generates PTX for the most recent architecture for forwards compatibility
|
||||||
function(format_gencode_flags flags out)
|
function(compute_cmake_cuda_archs archs)
|
||||||
if(CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
|
if(CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
|
||||||
set(CUDA_VERSION "${CMAKE_MATCH_1}")
|
set(CUDA_VERSION "${CMAKE_MATCH_1}")
|
||||||
endif()
|
endif()
|
||||||
# Set up architecture flags
|
list(SORT archs)
|
||||||
if(NOT flags)
|
unset(CMAKE_CUDA_ARCHITECTURES CACHE)
|
||||||
|
set(CMAKE_CUDA_ARCHITECTURES ${archs})
|
||||||
|
|
||||||
|
# Set up defaults based on CUDA varsion
|
||||||
|
if(NOT CMAKE_CUDA_ARCHITECTURES)
|
||||||
if(CUDA_VERSION VERSION_GREATER_EQUAL "11.8")
|
if(CUDA_VERSION VERSION_GREATER_EQUAL "11.8")
|
||||||
set(flags "50;60;70;80;90")
|
set(CMAKE_CUDA_ARCHITECTURES 50 60 70 80 90)
|
||||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
||||||
set(flags "50;60;70;80")
|
set(CMAKE_CUDA_ARCHITECTURES 50 60 70 80)
|
||||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
||||||
set(flags "35;50;60;70")
|
set(CMAKE_CUDA_ARCHITECTURES 35 50 60 70)
|
||||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "9.0")
|
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "9.0")
|
||||||
set(flags "35;50;60;70")
|
set(CMAKE_CUDA_ARCHITECTURES 35 50 60 70)
|
||||||
else()
|
else()
|
||||||
set(flags "35;50;60")
|
set(CMAKE_CUDA_ARCHITECTURES 35 50 60)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
list(TRANSFORM CMAKE_CUDA_ARCHITECTURES APPEND "-real")
|
||||||
cmake_policy(SET CMP0104 NEW)
|
list(TRANSFORM CMAKE_CUDA_ARCHITECTURES REPLACE "([0-9]+)-real" "\\0;\\1-virtual" AT -1)
|
||||||
list(GET flags -1 latest_arch)
|
|
||||||
list(TRANSFORM flags APPEND "-real")
|
|
||||||
list(APPEND flags ${latest_arch})
|
|
||||||
set(CMAKE_CUDA_ARCHITECTURES ${flags})
|
|
||||||
set(CMAKE_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}" PARENT_SCOPE)
|
set(CMAKE_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}" PARENT_SCOPE)
|
||||||
message(STATUS "CMAKE_CUDA_ARCHITECTURES: ${CMAKE_CUDA_ARCHITECTURES}")
|
message(STATUS "CMAKE_CUDA_ARCHITECTURES: ${CMAKE_CUDA_ARCHITECTURES}")
|
||||||
else()
|
|
||||||
# Generate SASS
|
|
||||||
foreach(ver ${flags})
|
|
||||||
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=sm_${ver};")
|
|
||||||
endforeach()
|
|
||||||
# Generate PTX for last architecture
|
|
||||||
list(GET flags -1 ver)
|
|
||||||
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=compute_${ver};")
|
|
||||||
set(${out} "${${out}}" PARENT_SCOPE)
|
|
||||||
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
|
||||||
endif()
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Set CUDA related flags to target. Must be used after code `format_gencode_flags`.
|
# Set CUDA related flags to target. Must be used after code `format_gencode_flags`.
|
||||||
@ -129,7 +118,6 @@ function(xgboost_set_cuda_flags target)
|
|||||||
target_compile_options(${target} PRIVATE
|
target_compile_options(${target} PRIVATE
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:--expt-extended-lambda>
|
$<$<COMPILE_LANGUAGE:CUDA>:--expt-extended-lambda>
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:--expt-relaxed-constexpr>
|
$<$<COMPILE_LANGUAGE:CUDA>:--expt-relaxed-constexpr>
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:${GEN_CODE}>
|
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
||||||
|
|
||||||
@ -138,10 +126,6 @@ function(xgboost_set_cuda_flags target)
|
|||||||
$<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
|
$<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
|
||||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(FORCE_COLORED_OUTPUT)
|
if(FORCE_COLORED_OUTPUT)
|
||||||
if(FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
if(FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||||
@ -176,9 +160,15 @@ function(xgboost_set_cuda_flags target)
|
|||||||
|
|
||||||
set_target_properties(${target} PROPERTIES
|
set_target_properties(${target} PROPERTIES
|
||||||
CUDA_STANDARD 17
|
CUDA_STANDARD 17
|
||||||
CUDA_STANDARD_REQUIRED ON
|
CUDA_STANDARD_REQUIRED ON)
|
||||||
CUDA_SEPARABLE_COMPILATION OFF
|
if(USE_CUDA_LTO)
|
||||||
CUDA_RUNTIME_LIBRARY Static)
|
set_target_properties(${target} PROPERTIES
|
||||||
|
INTERPROCEDURAL_OPTIMIZATION ON
|
||||||
|
CUDA_SEPARABLE_COMPILATION ON)
|
||||||
|
else()
|
||||||
|
set_target_properties(${target} PROPERTIES
|
||||||
|
CUDA_SEPARABLE_COMPILATION OFF)
|
||||||
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Set HIP related flags to target.
|
# Set HIP related flags to target.
|
||||||
|
|||||||
@ -25,4 +25,3 @@ target_include_directories(xgboost4j
|
|||||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||||
|
|
||||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||||
target_link_libraries(xgboost4j PRIVATE ${JAVA_JVM_LIBRARY})
|
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import importlib.util
|
|||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
import queue
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
@ -942,13 +943,20 @@ def project_root(path: str) -> str:
|
|||||||
return normpath(os.path.join(demo_dir(path), os.path.pardir))
|
return normpath(os.path.join(demo_dir(path), os.path.pardir))
|
||||||
|
|
||||||
|
|
||||||
def run_with_rabit(world_size: int, test_fn: Callable) -> None:
|
def run_with_rabit(
|
||||||
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
|
world_size: int, test_fn: Callable[..., Any], *args: Any, **kwargs: Any
|
||||||
tracker.start(world_size)
|
) -> None:
|
||||||
|
exception_queue: queue.Queue = queue.Queue()
|
||||||
|
|
||||||
def run_worker(rabit_env: Dict[str, Union[str, int]]) -> None:
|
def run_worker(rabit_env: Dict[str, Union[str, int]]) -> None:
|
||||||
|
try:
|
||||||
with xgb.collective.CommunicatorContext(**rabit_env):
|
with xgb.collective.CommunicatorContext(**rabit_env):
|
||||||
test_fn()
|
test_fn(*args, **kwargs)
|
||||||
|
except Exception as e: # pylint: disable=broad-except
|
||||||
|
exception_queue.put(e)
|
||||||
|
|
||||||
|
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
|
||||||
|
tracker.start(world_size)
|
||||||
|
|
||||||
workers = []
|
workers = []
|
||||||
for _ in range(world_size):
|
for _ in range(world_size):
|
||||||
@ -957,5 +965,20 @@ def run_with_rabit(world_size: int, test_fn: Callable) -> None:
|
|||||||
worker.start()
|
worker.start()
|
||||||
for worker in workers:
|
for worker in workers:
|
||||||
worker.join()
|
worker.join()
|
||||||
|
assert exception_queue.empty(), f"Worker failed: {exception_queue.get()}"
|
||||||
|
|
||||||
tracker.join()
|
tracker.join()
|
||||||
|
|
||||||
|
|
||||||
|
def column_split_feature_names(
|
||||||
|
feature_names: List[Union[str, int]], world_size: int
|
||||||
|
) -> List[str]:
|
||||||
|
"""Get the global list of feature names from the local feature names."""
|
||||||
|
return [
|
||||||
|
f"{rank}.{feature}" for rank in range(world_size) for feature in feature_names
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def is_windows() -> bool:
|
||||||
|
"""Check if the current platform is Windows."""
|
||||||
|
return platform.system() == "Windows"
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
*/
|
*/
|
||||||
#include "allgather.h"
|
#include "allgather.h"
|
||||||
|
|
||||||
#include <algorithm> // for min, copy_n
|
#include <algorithm> // for min, copy_n, fill_n
|
||||||
#include <cstddef> // for size_t
|
#include <cstddef> // for size_t
|
||||||
#include <cstdint> // for int8_t, int32_t, int64_t
|
#include <cstdint> // for int8_t, int32_t, int64_t
|
||||||
#include <memory> // for shared_ptr
|
#include <memory> // for shared_ptr
|
||||||
@ -45,6 +45,7 @@ Result RingAllgather(Comm const& comm, common::Span<std::int8_t> data, std::size
|
|||||||
|
|
||||||
[[nodiscard]] Result RingAllgatherV(Comm const& comm, common::Span<std::int64_t const> sizes,
|
[[nodiscard]] Result RingAllgatherV(Comm const& comm, common::Span<std::int64_t const> sizes,
|
||||||
common::Span<std::int8_t const> data,
|
common::Span<std::int8_t const> data,
|
||||||
|
common::Span<std::int64_t> offset,
|
||||||
common::Span<std::int8_t> erased_result) {
|
common::Span<std::int8_t> erased_result) {
|
||||||
auto world = comm.World();
|
auto world = comm.World();
|
||||||
auto rank = comm.Rank();
|
auto rank = comm.Rank();
|
||||||
@ -56,7 +57,8 @@ Result RingAllgather(Comm const& comm, common::Span<std::int8_t> data, std::size
|
|||||||
auto next_ch = comm.Chan(next);
|
auto next_ch = comm.Chan(next);
|
||||||
|
|
||||||
// get worker offset
|
// get worker offset
|
||||||
std::vector<std::int64_t> offset(world + 1, 0);
|
CHECK_EQ(world + 1, offset.size());
|
||||||
|
std::fill_n(offset.data(), offset.size(), 0);
|
||||||
std::partial_sum(sizes.cbegin(), sizes.cend(), offset.begin() + 1);
|
std::partial_sum(sizes.cbegin(), sizes.cend(), offset.begin() + 1);
|
||||||
CHECK_EQ(*offset.cbegin(), 0);
|
CHECK_EQ(*offset.cbegin(), 0);
|
||||||
|
|
||||||
|
|||||||
@ -26,6 +26,7 @@ namespace cpu_impl {
|
|||||||
|
|
||||||
[[nodiscard]] Result RingAllgatherV(Comm const& comm, common::Span<std::int64_t const> sizes,
|
[[nodiscard]] Result RingAllgatherV(Comm const& comm, common::Span<std::int64_t const> sizes,
|
||||||
common::Span<std::int8_t const> data,
|
common::Span<std::int8_t const> data,
|
||||||
|
common::Span<std::int64_t> offset,
|
||||||
common::Span<std::int8_t> erased_result);
|
common::Span<std::int8_t> erased_result);
|
||||||
} // namespace cpu_impl
|
} // namespace cpu_impl
|
||||||
|
|
||||||
@ -66,7 +67,9 @@ template <typename T>
|
|||||||
auto h_result = common::Span{result.data(), result.size()};
|
auto h_result = common::Span{result.data(), result.size()};
|
||||||
auto erased_result = EraseType(h_result);
|
auto erased_result = EraseType(h_result);
|
||||||
auto erased_data = EraseType(data);
|
auto erased_data = EraseType(data);
|
||||||
|
std::vector<std::int64_t> offset(world + 1);
|
||||||
|
|
||||||
return cpu_impl::RingAllgatherV(comm, sizes, erased_data, erased_result);
|
return cpu_impl::RingAllgatherV(comm, sizes, erased_data,
|
||||||
|
common::Span{offset.data(), offset.size()}, erased_result);
|
||||||
}
|
}
|
||||||
} // namespace xgboost::collective
|
} // namespace xgboost::collective
|
||||||
|
|||||||
75
src/collective/coll.cc
Normal file
75
src/collective/coll.cc
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2023, XGBoost Contributors
|
||||||
|
*/
|
||||||
|
#include "coll.h"
|
||||||
|
|
||||||
|
#include <algorithm> // for min, max
|
||||||
|
#include <cstddef> // for size_t
|
||||||
|
#include <cstdint> // for int8_t, int64_t
|
||||||
|
#include <functional> // for bit_and, bit_or, bit_xor, plus
|
||||||
|
|
||||||
|
#include "allgather.h" // for RingAllgatherV, RingAllgather
|
||||||
|
#include "allreduce.h" // for Allreduce
|
||||||
|
#include "broadcast.h" // for Broadcast
|
||||||
|
#include "comm.h" // for Comm
|
||||||
|
#include "xgboost/context.h" // for Context
|
||||||
|
|
||||||
|
namespace xgboost::collective {
|
||||||
|
[[nodiscard]] Result Coll::Allreduce(Context const*, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data, ArrayInterfaceHandler::Type,
|
||||||
|
Op op) {
|
||||||
|
namespace coll = ::xgboost::collective;
|
||||||
|
|
||||||
|
auto redop_fn = [](auto lhs, auto out, auto elem_op) {
|
||||||
|
auto p_lhs = lhs.data();
|
||||||
|
auto p_out = out.data();
|
||||||
|
for (std::size_t i = 0; i < lhs.size(); ++i) {
|
||||||
|
p_out[i] = elem_op(p_lhs[i], p_out[i]);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
auto fn = [&](auto elem_op) {
|
||||||
|
return coll::Allreduce(
|
||||||
|
comm, data, [redop_fn, elem_op](auto lhs, auto rhs) { redop_fn(lhs, rhs, elem_op); });
|
||||||
|
};
|
||||||
|
|
||||||
|
switch (op) {
|
||||||
|
case Op::kMax: {
|
||||||
|
return fn([](auto l, auto r) { return std::max(l, r); });
|
||||||
|
}
|
||||||
|
case Op::kMin: {
|
||||||
|
return fn([](auto l, auto r) { return std::min(l, r); });
|
||||||
|
}
|
||||||
|
case Op::kSum: {
|
||||||
|
return fn(std::plus<>{});
|
||||||
|
}
|
||||||
|
case Op::kBitwiseAND: {
|
||||||
|
return fn(std::bit_and<>{});
|
||||||
|
}
|
||||||
|
case Op::kBitwiseOR: {
|
||||||
|
return fn(std::bit_or<>{});
|
||||||
|
}
|
||||||
|
case Op::kBitwiseXOR: {
|
||||||
|
return fn(std::bit_xor<>{});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return comm.Block();
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] Result Coll::Broadcast(Context const*, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data, std::int32_t root) {
|
||||||
|
return cpu_impl::Broadcast(comm, data, root);
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] Result Coll::Allgather(Context const*, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data, std::size_t size) {
|
||||||
|
return RingAllgather(comm, data, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] Result Coll::AllgatherV(Context const*, Comm const& comm,
|
||||||
|
common::Span<std::int8_t const> data,
|
||||||
|
common::Span<std::int64_t const> sizes,
|
||||||
|
common::Span<std::int64_t> recv_segments,
|
||||||
|
common::Span<std::int8_t> recv) {
|
||||||
|
return cpu_impl::RingAllgatherV(comm, sizes, data, recv_segments, recv);
|
||||||
|
}
|
||||||
|
} // namespace xgboost::collective
|
||||||
66
src/collective/coll.h
Normal file
66
src/collective/coll.h
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2023, XGBoost Contributors
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <cstddef> // for size_t
|
||||||
|
#include <cstdint> // for int8_t, int64_t
|
||||||
|
#include <memory> // for enable_shared_from_this
|
||||||
|
|
||||||
|
#include "../data/array_interface.h" // for ArrayInterfaceHandler
|
||||||
|
#include "comm.h" // for Comm
|
||||||
|
#include "xgboost/collective/result.h" // for Result
|
||||||
|
#include "xgboost/context.h" // for Context
|
||||||
|
#include "xgboost/span.h" // for Span
|
||||||
|
|
||||||
|
namespace xgboost::collective {
|
||||||
|
/**
|
||||||
|
* @brief Interface and base implementation for collective.
|
||||||
|
*/
|
||||||
|
class Coll : public std::enable_shared_from_this<Coll> {
|
||||||
|
public:
|
||||||
|
Coll() = default;
|
||||||
|
virtual ~Coll() noexcept(false) {} // NOLINT
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Allreduce
|
||||||
|
*
|
||||||
|
* @param [in,out] data Data buffer for input and output.
|
||||||
|
* @param [in] type data type.
|
||||||
|
* @param [in] op Reduce operation. For custom operation, user needs to reach down to
|
||||||
|
* the CPU implementation.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual Result Allreduce(Context const* ctx, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data,
|
||||||
|
ArrayInterfaceHandler::Type type, Op op);
|
||||||
|
/**
|
||||||
|
* @brief Broadcast
|
||||||
|
*
|
||||||
|
* @param [in,out] data Data buffer for input and output.
|
||||||
|
* @param [in] root Root rank for broadcast.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual Result Broadcast(Context const* ctx, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data, std::int32_t root);
|
||||||
|
/**
|
||||||
|
* @brief Allgather
|
||||||
|
*
|
||||||
|
* @param [in,out] data Data buffer for input and output.
|
||||||
|
* @param [in] size Size of data for each worker.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual Result Allgather(Context const* ctx, Comm const& comm,
|
||||||
|
common::Span<std::int8_t> data, std::size_t size);
|
||||||
|
/**
|
||||||
|
* @brief Allgather with variable length.
|
||||||
|
*
|
||||||
|
* @param [in] data Input data for the current worker.
|
||||||
|
* @param [in] sizes Size of the input from each worker.
|
||||||
|
* @param [out] recv_segments pre-allocated offset for each worker in the output, size
|
||||||
|
* should be equal to (world + 1).
|
||||||
|
* @param [out] recv pre-allocated buffer for output.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual Result AllgatherV(Context const* ctx, Comm const& comm,
|
||||||
|
common::Span<std::int8_t const> data,
|
||||||
|
common::Span<std::int64_t const> sizes,
|
||||||
|
common::Span<std::int64_t> recv_segments,
|
||||||
|
common::Span<std::int8_t> recv);
|
||||||
|
};
|
||||||
|
} // namespace xgboost::collective
|
||||||
@ -23,7 +23,7 @@ Comm::Comm(std::string const& host, std::int32_t port, std::chrono::seconds time
|
|||||||
retry_{retry},
|
retry_{retry},
|
||||||
tracker_{host, port, -1},
|
tracker_{host, port, -1},
|
||||||
task_id_{std::move(task_id)},
|
task_id_{std::move(task_id)},
|
||||||
loop_{std::make_shared<Loop>(timeout)} {}
|
loop_{std::shared_ptr<Loop>{new Loop{timeout}}} {}
|
||||||
|
|
||||||
Result ConnectTrackerImpl(proto::PeerInfo info, std::chrono::seconds timeout, std::int32_t retry,
|
Result ConnectTrackerImpl(proto::PeerInfo info, std::chrono::seconds timeout, std::int32_t retry,
|
||||||
std::string const& task_id, TCPSocket* out, std::int32_t rank,
|
std::string const& task_id, TCPSocket* out, std::int32_t rank,
|
||||||
|
|||||||
8
tests/buildkite/pipeline-mac-m1.yml
Normal file
8
tests/buildkite/pipeline-mac-m1.yml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
steps:
|
||||||
|
- block: ":rocket: Run this test job"
|
||||||
|
if: build.pull_request.id != null || build.branch =~ /^dependabot\//
|
||||||
|
- label: ":macos: Build and Test XGBoost for MacOS M1 with Clang 11"
|
||||||
|
command: "tests/buildkite/test-macos-m1-clang11.sh"
|
||||||
|
key: mac-m1-appleclang11
|
||||||
|
agents:
|
||||||
|
queue: mac-mini-m1
|
||||||
33
tests/buildkite/test-macos-m1-clang11.sh
Executable file
33
tests/buildkite/test-macos-m1-clang11.sh
Executable file
@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
source tests/buildkite/conftest.sh
|
||||||
|
|
||||||
|
# Display system info
|
||||||
|
echo "--- Display system information"
|
||||||
|
set -x
|
||||||
|
system_profiler SPSoftwareDataType
|
||||||
|
sysctl -n machdep.cpu.brand_string
|
||||||
|
uname -m
|
||||||
|
set +x
|
||||||
|
|
||||||
|
# Create new Conda env
|
||||||
|
echo "--- Set up Conda env"
|
||||||
|
. $HOME/mambaforge/etc/profile.d/conda.sh
|
||||||
|
. $HOME/mambaforge/etc/profile.d/mamba.sh
|
||||||
|
conda_env=xgboost_dev_$(uuidgen | tr '[:upper:]' '[:lower:]' | tr -d '-')
|
||||||
|
mamba create -y -n ${conda_env} python=3.8
|
||||||
|
conda activate ${conda_env}
|
||||||
|
mamba env update -n ${conda_env} --file tests/ci_build/conda_env/macos_cpu_test.yml
|
||||||
|
|
||||||
|
# Ensure that XGBoost can be built with Clang 11
|
||||||
|
echo "--- Build and Test XGBoost with MacOS M1, Clang 11"
|
||||||
|
set -x
|
||||||
|
LLVM11_PATH=$(brew --prefix llvm\@11)
|
||||||
|
mkdir build
|
||||||
|
pushd build
|
||||||
|
cmake .. -GNinja -DCMAKE_C_COMPILER=${LLVM11_PATH}/bin/clang \
|
||||||
|
-DCMAKE_CXX_COMPILER=${LLVM11_PATH}/bin/clang++ -DGOOGLE_TEST=ON \
|
||||||
|
-DUSE_DMLC_GTEST=ON
|
||||||
|
ninja -v
|
||||||
@ -32,11 +32,10 @@ dependencies:
|
|||||||
- jsonschema
|
- jsonschema
|
||||||
- boto3
|
- boto3
|
||||||
- awscli
|
- awscli
|
||||||
- py-ubjson
|
|
||||||
- cffi
|
- cffi
|
||||||
- pyarrow
|
- pyarrow
|
||||||
- pyspark>=3.4.0
|
- pyspark>=3.4.0
|
||||||
- cloudpickle
|
- cloudpickle
|
||||||
- pip:
|
- pip:
|
||||||
- sphinx_rtd_theme
|
- sphinx_rtd_theme
|
||||||
- datatable
|
- py-ubjson
|
||||||
|
|||||||
@ -19,11 +19,13 @@ class LintersPaths:
|
|||||||
# tests
|
# tests
|
||||||
"tests/python/test_config.py",
|
"tests/python/test_config.py",
|
||||||
"tests/python/test_data_iterator.py",
|
"tests/python/test_data_iterator.py",
|
||||||
|
"tests/python/test_dmatrix.py",
|
||||||
"tests/python/test_dt.py",
|
"tests/python/test_dt.py",
|
||||||
"tests/python/test_predict.py",
|
"tests/python/test_predict.py",
|
||||||
"tests/python/test_quantile_dmatrix.py",
|
"tests/python/test_quantile_dmatrix.py",
|
||||||
"tests/python/test_tree_regularization.py",
|
"tests/python/test_tree_regularization.py",
|
||||||
"tests/python/test_shap.py",
|
"tests/python/test_shap.py",
|
||||||
|
"tests/python/test_with_pandas.py",
|
||||||
"tests/python-gpu/test_gpu_data_iterator.py",
|
"tests/python-gpu/test_gpu_data_iterator.py",
|
||||||
"tests/python-gpu/test_gpu_prediction.py",
|
"tests/python-gpu/test_gpu_prediction.py",
|
||||||
"tests/python-gpu/load_pickle.py",
|
"tests/python-gpu/load_pickle.py",
|
||||||
|
|||||||
@ -19,10 +19,8 @@ cmake_policy(SET CMP0104 NEW)
|
|||||||
set(CMAKE_CUDA_HOST_COMPILER \${CMAKE_CXX_COMPILER})
|
set(CMAKE_CUDA_HOST_COMPILER \${CMAKE_CXX_COMPILER})
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
include(../cmake/Utils.cmake)
|
include(../cmake/Utils.cmake)
|
||||||
set(GEN_CODE "")
|
compute_cmake_cuda_archs("")
|
||||||
format_gencode_flags("" GEN_CODE)
|
|
||||||
add_library(test OBJECT test.cu)
|
add_library(test OBJECT test.cu)
|
||||||
set_property(TARGET test PROPERTY CUDA_ARCHITECTURES \${CMAKE_CUDA_ARCHITECTURES})
|
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|||||||
@ -4,6 +4,7 @@
|
|||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include "../../../src/collective/allreduce.h"
|
#include "../../../src/collective/allreduce.h"
|
||||||
|
#include "../../../src/collective/coll.h" // for Coll
|
||||||
#include "../../../src/collective/tracker.h"
|
#include "../../../src/collective/tracker.h"
|
||||||
#include "test_worker.h" // for WorkerForTest, TestDistributed
|
#include "test_worker.h" // for WorkerForTest, TestDistributed
|
||||||
|
|
||||||
@ -47,6 +48,19 @@ class AllreduceWorker : public WorkerForTest {
|
|||||||
ASSERT_EQ(v, 1.5 * static_cast<double>(comm_.World())) << i;
|
ASSERT_EQ(v, 1.5 * static_cast<double>(comm_.World())) << i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BitOr() {
|
||||||
|
Context ctx;
|
||||||
|
std::vector<std::uint32_t> data(comm_.World(), 0);
|
||||||
|
data[comm_.Rank()] = ~std::uint32_t{0};
|
||||||
|
auto pcoll = std::shared_ptr<Coll>{new Coll{}};
|
||||||
|
auto rc = pcoll->Allreduce(&ctx, comm_, EraseType(common::Span{data.data(), data.size()}),
|
||||||
|
ArrayInterfaceHandler::kU4, Op::kBitwiseOR);
|
||||||
|
ASSERT_TRUE(rc.OK()) << rc.Report();
|
||||||
|
for (auto v : data) {
|
||||||
|
ASSERT_EQ(v, ~std::uint32_t{0});
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class AllreduceTest : public SocketTest {};
|
class AllreduceTest : public SocketTest {};
|
||||||
@ -69,4 +83,13 @@ TEST_F(AllreduceTest, Sum) {
|
|||||||
worker.Acc();
|
worker.Acc();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(AllreduceTest, BitOr) {
|
||||||
|
std::int32_t n_workers = std::min(7u, std::thread::hardware_concurrency());
|
||||||
|
TestDistributed(n_workers, [=](std::string host, std::int32_t port, std::chrono::seconds timeout,
|
||||||
|
std::int32_t r) {
|
||||||
|
AllreduceWorker worker{host, port, timeout, n_workers, r};
|
||||||
|
worker.BitOr();
|
||||||
|
});
|
||||||
|
}
|
||||||
} // namespace xgboost::collective
|
} // namespace xgboost::collective
|
||||||
|
|||||||
@ -41,7 +41,7 @@ class LoopTest : public ::testing::Test {
|
|||||||
rc = pair_.first.NonBlocking(true);
|
rc = pair_.first.NonBlocking(true);
|
||||||
ASSERT_TRUE(rc.OK());
|
ASSERT_TRUE(rc.OK());
|
||||||
|
|
||||||
loop_ = std::make_shared<Loop>(timeout);
|
loop_ = std::shared_ptr<Loop>{new Loop{timeout}};
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown() override {
|
void TearDown() override {
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import csv
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
@ -15,7 +16,7 @@ from xgboost.testing.data import np_dtypes
|
|||||||
|
|
||||||
rng = np.random.RandomState(1)
|
rng = np.random.RandomState(1)
|
||||||
|
|
||||||
dpath = 'demo/data/'
|
dpath = "demo/data/"
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
|
|
||||||
|
|
||||||
@ -67,12 +68,13 @@ def set_base_margin_info(DType, DMatrixT, tm: str):
|
|||||||
class TestDMatrix:
|
class TestDMatrix:
|
||||||
def test_warn_missing(self):
|
def test_warn_missing(self):
|
||||||
from xgboost import data
|
from xgboost import data
|
||||||
|
|
||||||
with pytest.warns(UserWarning):
|
with pytest.warns(UserWarning):
|
||||||
data._warn_unused_missing('uri', 4)
|
data._warn_unused_missing("uri", 4)
|
||||||
|
|
||||||
with pytest.warns(None) as record:
|
with pytest.warns(None) as record:
|
||||||
data._warn_unused_missing('uri', None)
|
data._warn_unused_missing("uri", None)
|
||||||
data._warn_unused_missing('uri', np.nan)
|
data._warn_unused_missing("uri", np.nan)
|
||||||
|
|
||||||
assert len(record) == 0
|
assert len(record) == 0
|
||||||
|
|
||||||
@ -106,7 +108,7 @@ class TestDMatrix:
|
|||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(data)
|
xgb.DMatrix(data)
|
||||||
# object dtype
|
# object dtype
|
||||||
data = np.array([['a', 'b'], ['c', 'd']])
|
data = np.array([["a", "b"], ["c", "d"]])
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(data)
|
xgb.DMatrix(data)
|
||||||
|
|
||||||
@ -148,18 +150,18 @@ class TestDMatrix:
|
|||||||
y = np.array([12, 34, 56], np.float32)[::2]
|
y = np.array([12, 34, 56], np.float32)[::2]
|
||||||
from_view = xgb.DMatrix(np.array([[]]), label=y).get_label()
|
from_view = xgb.DMatrix(np.array([[]]), label=y).get_label()
|
||||||
from_array = xgb.DMatrix(np.array([[]]), label=y + 0).get_label()
|
from_array = xgb.DMatrix(np.array([[]]), label=y + 0).get_label()
|
||||||
assert (from_view.shape == from_array.shape)
|
assert from_view.shape == from_array.shape
|
||||||
assert (from_view == from_array).all()
|
assert (from_view == from_array).all()
|
||||||
|
|
||||||
# Sliced UInt array
|
# Sliced UInt array
|
||||||
z = np.array([12, 34, 56], np.uint32)[::2]
|
z = np.array([12, 34, 56], np.uint32)[::2]
|
||||||
dmat = xgb.DMatrix(np.array([[]]))
|
dmat = xgb.DMatrix(np.array([[]]))
|
||||||
dmat.set_uint_info('group', z)
|
dmat.set_uint_info("group", z)
|
||||||
from_view = dmat.get_uint_info('group_ptr')
|
from_view = dmat.get_uint_info("group_ptr")
|
||||||
dmat = xgb.DMatrix(np.array([[]]))
|
dmat = xgb.DMatrix(np.array([[]]))
|
||||||
dmat.set_uint_info('group', z + 0)
|
dmat.set_uint_info("group", z + 0)
|
||||||
from_array = dmat.get_uint_info('group_ptr')
|
from_array = dmat.get_uint_info("group_ptr")
|
||||||
assert (from_view.shape == from_array.shape)
|
assert from_view.shape == from_array.shape
|
||||||
assert (from_view == from_array).all()
|
assert (from_view == from_array).all()
|
||||||
|
|
||||||
def test_slice(self):
|
def test_slice(self):
|
||||||
@ -181,9 +183,11 @@ class TestDMatrix:
|
|||||||
|
|
||||||
# Slicing works with label and other meta info fields
|
# Slicing works with label and other meta info fields
|
||||||
np.testing.assert_equal(sliced.get_label(), y[1:7])
|
np.testing.assert_equal(sliced.get_label(), y[1:7])
|
||||||
np.testing.assert_equal(sliced.get_float_info('feature_weights'), fw)
|
np.testing.assert_equal(sliced.get_float_info("feature_weights"), fw)
|
||||||
np.testing.assert_equal(sliced.get_base_margin(), base_margin[1:7, :].flatten())
|
np.testing.assert_equal(sliced.get_base_margin(), base_margin[1:7, :].flatten())
|
||||||
np.testing.assert_equal(sliced.get_base_margin(), sliced.get_float_info('base_margin'))
|
np.testing.assert_equal(
|
||||||
|
sliced.get_base_margin(), sliced.get_float_info("base_margin")
|
||||||
|
)
|
||||||
|
|
||||||
# Slicing a DMatrix results into a DMatrix that's equivalent to a DMatrix that's
|
# Slicing a DMatrix results into a DMatrix that's equivalent to a DMatrix that's
|
||||||
# constructed from the corresponding NumPy slice
|
# constructed from the corresponding NumPy slice
|
||||||
@ -191,11 +195,15 @@ class TestDMatrix:
|
|||||||
d2.set_base_margin(base_margin[1:7, :])
|
d2.set_base_margin(base_margin[1:7, :])
|
||||||
eval_res = {}
|
eval_res = {}
|
||||||
_ = xgb.train(
|
_ = xgb.train(
|
||||||
{'num_class': 3, 'objective': 'multi:softprob',
|
{"num_class": 3, "objective": "multi:softprob", "eval_metric": "mlogloss"},
|
||||||
'eval_metric': 'mlogloss'},
|
|
||||||
d,
|
d,
|
||||||
num_boost_round=2, evals=[(d2, 'd2'), (sliced, 'sliced')], evals_result=eval_res)
|
num_boost_round=2,
|
||||||
np.testing.assert_equal(eval_res['d2']['mlogloss'], eval_res['sliced']['mlogloss'])
|
evals=[(d2, "d2"), (sliced, "sliced")],
|
||||||
|
evals_result=eval_res,
|
||||||
|
)
|
||||||
|
np.testing.assert_equal(
|
||||||
|
eval_res["d2"]["mlogloss"], eval_res["sliced"]["mlogloss"]
|
||||||
|
)
|
||||||
|
|
||||||
ridxs_arr = np.array(ridxs)[1:] # handles numpy slice correctly
|
ridxs_arr = np.array(ridxs)[1:] # handles numpy slice correctly
|
||||||
sliced = d.slice(ridxs_arr)
|
sliced = d.slice(ridxs_arr)
|
||||||
@ -206,17 +214,17 @@ class TestDMatrix:
|
|||||||
|
|
||||||
# different length
|
# different length
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(data, feature_names=list('abcdef'))
|
xgb.DMatrix(data, feature_names=list("abcdef"))
|
||||||
# contains duplicates
|
# contains duplicates
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'd'])
|
xgb.DMatrix(data, feature_names=["a", "b", "c", "d", "d"])
|
||||||
# contains symbol
|
# contains symbol
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'e<1'])
|
xgb.DMatrix(data, feature_names=["a", "b", "c", "d", "e<1"])
|
||||||
|
|
||||||
dm = xgb.DMatrix(data)
|
dm = xgb.DMatrix(data)
|
||||||
dm.feature_names = list('abcde')
|
dm.feature_names = list("abcde")
|
||||||
assert dm.feature_names == list('abcde')
|
assert dm.feature_names == list("abcde")
|
||||||
|
|
||||||
assert dm.slice([0, 1]).num_col() == dm.num_col()
|
assert dm.slice([0, 1]).num_col() == dm.num_col()
|
||||||
assert dm.slice([0, 1]).feature_names == dm.feature_names
|
assert dm.slice([0, 1]).feature_names == dm.feature_names
|
||||||
@ -224,11 +232,11 @@ class TestDMatrix:
|
|||||||
with pytest.raises(ValueError, match=r"Duplicates found: \['bar'\]"):
|
with pytest.raises(ValueError, match=r"Duplicates found: \['bar'\]"):
|
||||||
dm.feature_names = ["bar"] * (data.shape[1] - 2) + ["a", "b"]
|
dm.feature_names = ["bar"] * (data.shape[1] - 2) + ["a", "b"]
|
||||||
|
|
||||||
dm.feature_types = list('qiqiq')
|
dm.feature_types = list("qiqiq")
|
||||||
assert dm.feature_types == list('qiqiq')
|
assert dm.feature_types == list("qiqiq")
|
||||||
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
dm.feature_types = list('abcde')
|
dm.feature_types = list("abcde")
|
||||||
|
|
||||||
# reset
|
# reset
|
||||||
dm.feature_names = None
|
dm.feature_names = None
|
||||||
@ -240,20 +248,23 @@ class TestDMatrix:
|
|||||||
data = np.random.randn(100, 5)
|
data = np.random.randn(100, 5)
|
||||||
target = np.array([0, 1] * 50)
|
target = np.array([0, 1] * 50)
|
||||||
|
|
||||||
cases = [['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5'],
|
cases = [
|
||||||
[u'要因1', u'要因2', u'要因3', u'要因4', u'要因5']]
|
["Feature1", "Feature2", "Feature3", "Feature4", "Feature5"],
|
||||||
|
["要因1", "要因2", "要因3", "要因4", "要因5"],
|
||||||
|
]
|
||||||
|
|
||||||
for features in cases:
|
for features in cases:
|
||||||
dm = xgb.DMatrix(data, label=target,
|
dm = xgb.DMatrix(data, label=target, feature_names=features)
|
||||||
feature_names=features)
|
|
||||||
assert dm.feature_names == features
|
assert dm.feature_names == features
|
||||||
assert dm.num_row() == 100
|
assert dm.num_row() == 100
|
||||||
assert dm.num_col() == 5
|
assert dm.num_col() == 5
|
||||||
|
|
||||||
params = {'objective': 'multi:softprob',
|
params = {
|
||||||
'eval_metric': 'mlogloss',
|
"objective": "multi:softprob",
|
||||||
'eta': 0.3,
|
"eval_metric": "mlogloss",
|
||||||
'num_class': 3}
|
"eta": 0.3,
|
||||||
|
"num_class": 3,
|
||||||
|
}
|
||||||
|
|
||||||
bst = xgb.train(params, dm, num_boost_round=10)
|
bst = xgb.train(params, dm, num_boost_round=10)
|
||||||
scores = bst.get_fscore()
|
scores = bst.get_fscore()
|
||||||
@ -264,22 +275,19 @@ class TestDMatrix:
|
|||||||
bst.predict(dm)
|
bst.predict(dm)
|
||||||
|
|
||||||
# different feature name must raises error
|
# different feature name must raises error
|
||||||
dm = xgb.DMatrix(dummy, feature_names=list('abcde'))
|
dm = xgb.DMatrix(dummy, feature_names=list("abcde"))
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
bst.predict(dm)
|
bst.predict(dm)
|
||||||
|
|
||||||
@pytest.mark.skipif(**tm.no_pandas())
|
@pytest.mark.skipif(**tm.no_pandas())
|
||||||
def test_save_binary(self):
|
def test_save_binary(self):
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
path = os.path.join(tmpdir, 'm.dmatrix')
|
path = os.path.join(tmpdir, "m.dmatrix")
|
||||||
data = pd.DataFrame({
|
data = pd.DataFrame({"a": [0, 1], "b": [2, 3], "c": [4, 5]})
|
||||||
"a": [0, 1],
|
|
||||||
"b": [2, 3],
|
|
||||||
"c": [4, 5]
|
|
||||||
})
|
|
||||||
m0 = xgb.DMatrix(data.loc[:, ["a", "b"]], data["c"])
|
m0 = xgb.DMatrix(data.loc[:, ["a", "b"]], data["c"])
|
||||||
assert m0.feature_names == ['a', 'b']
|
assert m0.feature_names == ["a", "b"]
|
||||||
m0.save_binary(path)
|
m0.save_binary(path)
|
||||||
m1 = xgb.DMatrix(path)
|
m1 = xgb.DMatrix(path)
|
||||||
assert m0.feature_names == m1.feature_names
|
assert m0.feature_names == m1.feature_names
|
||||||
@ -287,10 +295,10 @@ class TestDMatrix:
|
|||||||
|
|
||||||
def test_get_info(self):
|
def test_get_info(self):
|
||||||
dtrain, _ = tm.load_agaricus(__file__)
|
dtrain, _ = tm.load_agaricus(__file__)
|
||||||
dtrain.get_float_info('label')
|
dtrain.get_float_info("label")
|
||||||
dtrain.get_float_info('weight')
|
dtrain.get_float_info("weight")
|
||||||
dtrain.get_float_info('base_margin')
|
dtrain.get_float_info("base_margin")
|
||||||
dtrain.get_uint_info('group_ptr')
|
dtrain.get_uint_info("group_ptr")
|
||||||
|
|
||||||
group_len = np.array([2, 3, 4])
|
group_len = np.array([2, 3, 4])
|
||||||
dtrain.set_group(group_len)
|
dtrain.set_group(group_len)
|
||||||
@ -305,7 +313,7 @@ class TestDMatrix:
|
|||||||
|
|
||||||
Xy = xgb.DMatrix(X, y)
|
Xy = xgb.DMatrix(X, y)
|
||||||
Xy.set_info(qid=qid)
|
Xy.set_info(qid=qid)
|
||||||
group_ptr = Xy.get_uint_info('group_ptr')
|
group_ptr = Xy.get_uint_info("group_ptr")
|
||||||
assert group_ptr[0] == 0
|
assert group_ptr[0] == 0
|
||||||
assert group_ptr[-1] == rows
|
assert group_ptr[-1] == rows
|
||||||
|
|
||||||
@ -317,11 +325,11 @@ class TestDMatrix:
|
|||||||
X = rng.randn(kRows, kCols)
|
X = rng.randn(kRows, kCols)
|
||||||
m = xgb.DMatrix(X)
|
m = xgb.DMatrix(X)
|
||||||
m.set_info(feature_weights=fw)
|
m.set_info(feature_weights=fw)
|
||||||
np.testing.assert_allclose(fw, m.get_float_info('feature_weights'))
|
np.testing.assert_allclose(fw, m.get_float_info("feature_weights"))
|
||||||
# Handle empty
|
# Handle empty
|
||||||
m.set_info(feature_weights=np.empty((0,)))
|
m.set_info(feature_weights=np.empty((0,)))
|
||||||
|
|
||||||
assert m.get_float_info('feature_weights').shape[0] == 0
|
assert m.get_float_info("feature_weights").shape[0] == 0
|
||||||
|
|
||||||
fw -= 1
|
fw -= 1
|
||||||
|
|
||||||
@ -331,13 +339,13 @@ class TestDMatrix:
|
|||||||
def test_sparse_dmatrix_csr(self):
|
def test_sparse_dmatrix_csr(self):
|
||||||
nrow = 100
|
nrow = 100
|
||||||
ncol = 1000
|
ncol = 1000
|
||||||
x = rand(nrow, ncol, density=0.0005, format='csr', random_state=rng)
|
x = rand(nrow, ncol, density=0.0005, format="csr", random_state=rng)
|
||||||
assert x.indices.max() < ncol
|
assert x.indices.max() < ncol
|
||||||
x.data[:] = 1
|
x.data[:] = 1
|
||||||
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
|
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
|
||||||
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
|
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
|
||||||
watchlist = [(dtrain, 'train')]
|
watchlist = [(dtrain, "train")]
|
||||||
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
|
param = {"max_depth": 3, "objective": "binary:logistic", "verbosity": 0}
|
||||||
bst = xgb.train(param, dtrain, 5, watchlist)
|
bst = xgb.train(param, dtrain, 5, watchlist)
|
||||||
bst.predict(dtrain)
|
bst.predict(dtrain)
|
||||||
|
|
||||||
@ -369,13 +377,13 @@ class TestDMatrix:
|
|||||||
def test_sparse_dmatrix_csc(self):
|
def test_sparse_dmatrix_csc(self):
|
||||||
nrow = 1000
|
nrow = 1000
|
||||||
ncol = 100
|
ncol = 100
|
||||||
x = rand(nrow, ncol, density=0.0005, format='csc', random_state=rng)
|
x = rand(nrow, ncol, density=0.0005, format="csc", random_state=rng)
|
||||||
assert x.indices.max() < nrow - 1
|
assert x.indices.max() < nrow - 1
|
||||||
x.data[:] = 1
|
x.data[:] = 1
|
||||||
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
|
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
|
||||||
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
|
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
|
||||||
watchlist = [(dtrain, 'train')]
|
watchlist = [(dtrain, "train")]
|
||||||
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
|
param = {"max_depth": 3, "objective": "binary:logistic", "verbosity": 0}
|
||||||
bst = xgb.train(param, dtrain, 5, watchlist)
|
bst = xgb.train(param, dtrain, 5, watchlist)
|
||||||
bst.predict(dtrain)
|
bst.predict(dtrain)
|
||||||
|
|
||||||
@ -389,6 +397,7 @@ class TestDMatrix:
|
|||||||
xgb.DMatrix(d)
|
xgb.DMatrix(d)
|
||||||
|
|
||||||
from scipy import sparse
|
from scipy import sparse
|
||||||
|
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
X = rng.rand(10, 10)
|
X = rng.rand(10, 10)
|
||||||
y = rng.rand(10)
|
y = rng.rand(10)
|
||||||
@ -402,7 +411,7 @@ class TestDMatrix:
|
|||||||
n_features = 10
|
n_features = 10
|
||||||
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
|
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
|
||||||
X = X.values.astype(np.float32)
|
X = X.values.astype(np.float32)
|
||||||
feature_types = ['c'] * n_features
|
feature_types = ["c"] * n_features
|
||||||
|
|
||||||
assert isinstance(X, np.ndarray)
|
assert isinstance(X, np.ndarray)
|
||||||
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
|
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
|
||||||
@ -410,10 +419,11 @@ class TestDMatrix:
|
|||||||
|
|
||||||
def test_scipy_categorical(self):
|
def test_scipy_categorical(self):
|
||||||
from scipy import sparse
|
from scipy import sparse
|
||||||
|
|
||||||
n_features = 10
|
n_features = 10
|
||||||
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
|
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
|
||||||
X = X.values.astype(np.float32)
|
X = X.values.astype(np.float32)
|
||||||
feature_types = ['c'] * n_features
|
feature_types = ["c"] * n_features
|
||||||
|
|
||||||
X[1, 3] = np.NAN
|
X[1, 3] = np.NAN
|
||||||
X[2, 4] = np.NAN
|
X[2, 4] = np.NAN
|
||||||
@ -433,7 +443,7 @@ class TestDMatrix:
|
|||||||
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
|
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
|
||||||
|
|
||||||
def test_uri_categorical(self):
|
def test_uri_categorical(self):
|
||||||
path = os.path.join(dpath, 'agaricus.txt.train')
|
path = os.path.join(dpath, "agaricus.txt.train")
|
||||||
feature_types = ["q"] * 5 + ["c"] + ["q"] * 120
|
feature_types = ["q"] * 5 + ["c"] + ["q"] * 120
|
||||||
Xy = xgb.DMatrix(
|
Xy = xgb.DMatrix(
|
||||||
path + "?indexing_mode=1&format=libsvm", feature_types=feature_types
|
path + "?indexing_mode=1&format=libsvm", feature_types=feature_types
|
||||||
@ -471,6 +481,7 @@ class TestDMatrix:
|
|||||||
assert tm.predictor_equal(m0, m1)
|
assert tm.predictor_equal(m0, m1)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
class TestDMatrixColumnSplit:
|
class TestDMatrixColumnSplit:
|
||||||
def test_numpy(self):
|
def test_numpy(self):
|
||||||
def verify_numpy():
|
def verify_numpy():
|
||||||
@ -487,14 +498,22 @@ class TestDMatrixColumnSplit:
|
|||||||
def verify_numpy_feature_names():
|
def verify_numpy_feature_names():
|
||||||
world_size = xgb.collective.get_world_size()
|
world_size = xgb.collective.get_world_size()
|
||||||
data = np.random.randn(5, 5)
|
data = np.random.randn(5, 5)
|
||||||
feature_names = [f'feature{x}' for x in range(5)]
|
feature_names = [f"feature{x}" for x in range(5)]
|
||||||
feature_types = ['float'] * 5
|
feature_types = ["float"] * 5
|
||||||
dm = xgb.DMatrix(data, feature_names=feature_names, feature_types=feature_types,
|
dm = xgb.DMatrix(
|
||||||
data_split_mode=DataSplitMode.COL)
|
data,
|
||||||
|
feature_names=feature_names,
|
||||||
|
feature_types=feature_types,
|
||||||
|
data_split_mode=DataSplitMode.COL,
|
||||||
|
)
|
||||||
assert dm.num_row() == 5
|
assert dm.num_row() == 5
|
||||||
assert dm.num_col() == 5 * world_size
|
assert dm.num_col() == 5 * world_size
|
||||||
assert len(dm.feature_names) == 5 * world_size
|
assert len(dm.feature_names) == 5 * world_size
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
feature_names, world_size
|
||||||
|
)
|
||||||
assert len(dm.feature_types) == 5 * world_size
|
assert len(dm.feature_types) == 5 * world_size
|
||||||
|
assert dm.feature_types == ["float"] * 5 * world_size
|
||||||
|
|
||||||
tm.run_with_rabit(world_size=3, test_fn=verify_numpy_feature_names)
|
tm.run_with_rabit(world_size=3, test_fn=verify_numpy_feature_names)
|
||||||
|
|
||||||
@ -534,6 +553,23 @@ class TestDMatrixColumnSplit:
|
|||||||
|
|
||||||
tm.run_with_rabit(world_size=3, test_fn=verify_coo)
|
tm.run_with_rabit(world_size=3, test_fn=verify_coo)
|
||||||
|
|
||||||
|
def test_uri(self):
|
||||||
|
def verify_uri():
|
||||||
|
rank = xgb.collective.get_rank()
|
||||||
|
data = np.random.rand(5, 5)
|
||||||
|
filename = f"test_data_{rank}.csv"
|
||||||
|
with open(filename, mode="w", newline="") as file:
|
||||||
|
writer = csv.writer(file)
|
||||||
|
for row in data:
|
||||||
|
writer.writerow(row)
|
||||||
|
dtrain = xgb.DMatrix(
|
||||||
|
f"{filename}?format=csv", data_split_mode=DataSplitMode.COL
|
||||||
|
)
|
||||||
|
assert dtrain.num_row() == 5
|
||||||
|
assert dtrain.num_col() == 5 * xgb.collective.get_world_size()
|
||||||
|
|
||||||
|
tm.run_with_rabit(world_size=3, test_fn=verify_uri)
|
||||||
|
|
||||||
def test_list(self):
|
def test_list(self):
|
||||||
def verify_list():
|
def verify_list():
|
||||||
data = [
|
data = [
|
||||||
@ -541,7 +577,7 @@ class TestDMatrixColumnSplit:
|
|||||||
[6, 7, 8, 9, 10],
|
[6, 7, 8, 9, 10],
|
||||||
[11, 12, 13, 14, 15],
|
[11, 12, 13, 14, 15],
|
||||||
[16, 17, 18, 19, 20],
|
[16, 17, 18, 19, 20],
|
||||||
[21, 22, 23, 24, 25]
|
[21, 22, 23, 24, 25],
|
||||||
]
|
]
|
||||||
dm = xgb.DMatrix(data, data_split_mode=DataSplitMode.COL)
|
dm = xgb.DMatrix(data, data_split_mode=DataSplitMode.COL)
|
||||||
assert dm.num_row() == 5
|
assert dm.num_row() == 5
|
||||||
@ -556,7 +592,7 @@ class TestDMatrixColumnSplit:
|
|||||||
(6, 7, 8, 9, 10),
|
(6, 7, 8, 9, 10),
|
||||||
(11, 12, 13, 14, 15),
|
(11, 12, 13, 14, 15),
|
||||||
(16, 17, 18, 19, 20),
|
(16, 17, 18, 19, 20),
|
||||||
(21, 22, 23, 24, 25)
|
(21, 22, 23, 24, 25),
|
||||||
)
|
)
|
||||||
dm = xgb.DMatrix(data, data_split_mode=DataSplitMode.COL)
|
dm = xgb.DMatrix(data, data_split_mode=DataSplitMode.COL)
|
||||||
assert dm.num_row() == 5
|
assert dm.num_row() == 5
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
@ -101,6 +100,7 @@ class TestArrowTable:
|
|||||||
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
|
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
class TestArrowTableColumnSplit:
|
class TestArrowTableColumnSplit:
|
||||||
def test_arrow_table(self):
|
def test_arrow_table(self):
|
||||||
def verify_arrow_table():
|
def verify_arrow_table():
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import sys
|
||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -6,6 +7,7 @@ from test_dmatrix import set_base_margin_info
|
|||||||
|
|
||||||
import xgboost as xgb
|
import xgboost as xgb
|
||||||
from xgboost import testing as tm
|
from xgboost import testing as tm
|
||||||
|
from xgboost.core import DataSplitMode
|
||||||
from xgboost.testing.data import pd_arrow_dtypes, pd_dtypes
|
from xgboost.testing.data import pd_arrow_dtypes, pd_dtypes
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -17,114 +19,194 @@ except ImportError:
|
|||||||
pytestmark = pytest.mark.skipif(**tm.no_pandas())
|
pytestmark = pytest.mark.skipif(**tm.no_pandas())
|
||||||
|
|
||||||
|
|
||||||
dpath = 'demo/data/'
|
dpath = "demo/data/"
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
|
|
||||||
|
|
||||||
class TestPandas:
|
class TestPandas:
|
||||||
def test_pandas(self):
|
def test_pandas(self, data_split_mode=DataSplitMode.ROW):
|
||||||
df = pd.DataFrame([[1, 2., True], [2, 3., False]],
|
world_size = xgb.collective.get_world_size()
|
||||||
columns=['a', 'b', 'c'])
|
df = pd.DataFrame([[1, 2.0, True], [2, 3.0, False]], columns=["a", "b", "c"])
|
||||||
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
|
dm = xgb.DMatrix(df, label=pd.Series([1, 2]), data_split_mode=data_split_mode)
|
||||||
assert dm.feature_names == ['a', 'b', 'c']
|
|
||||||
assert dm.feature_types == ['int', 'float', 'i']
|
|
||||||
assert dm.num_row() == 2
|
assert dm.num_row() == 2
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["a", "b", "c"]
|
||||||
|
assert dm.feature_types == ["int", "float", "i"]
|
||||||
assert dm.num_col() == 3
|
assert dm.num_col() == 3
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["a", "b", "c"], world_size
|
||||||
|
)
|
||||||
|
assert dm.feature_types == ["int", "float", "i"] * world_size
|
||||||
|
assert dm.num_col() == 3 * world_size
|
||||||
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
|
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
|
||||||
|
|
||||||
# overwrite feature_names and feature_types
|
# overwrite feature_names and feature_types
|
||||||
dm = xgb.DMatrix(df, label=pd.Series([1, 2]),
|
dm = xgb.DMatrix(
|
||||||
feature_names=['x', 'y', 'z'],
|
df,
|
||||||
feature_types=['q', 'q', 'q'])
|
label=pd.Series([1, 2]),
|
||||||
assert dm.feature_names == ['x', 'y', 'z']
|
feature_names=["x", "y", "z"],
|
||||||
assert dm.feature_types == ['q', 'q', 'q']
|
feature_types=["q", "q", "q"],
|
||||||
|
data_split_mode=data_split_mode,
|
||||||
|
)
|
||||||
assert dm.num_row() == 2
|
assert dm.num_row() == 2
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["x", "y", "z"]
|
||||||
|
assert dm.feature_types == ["q", "q", "q"]
|
||||||
assert dm.num_col() == 3
|
assert dm.num_col() == 3
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["x", "y", "z"], world_size
|
||||||
|
)
|
||||||
|
assert dm.feature_types == ["q", "q", "q"] * world_size
|
||||||
|
assert dm.num_col() == 3 * world_size
|
||||||
|
|
||||||
# incorrect dtypes
|
# incorrect dtypes
|
||||||
df = pd.DataFrame([[1, 2., 'x'], [2, 3., 'y']],
|
df = pd.DataFrame([[1, 2.0, "x"], [2, 3.0, "y"]], columns=["a", "b", "c"])
|
||||||
columns=['a', 'b', 'c'])
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.DMatrix(df)
|
xgb.DMatrix(df, data_split_mode=data_split_mode)
|
||||||
|
|
||||||
# numeric columns
|
# numeric columns
|
||||||
df = pd.DataFrame([[1, 2., True], [2, 3., False]])
|
df = pd.DataFrame([[1, 2.0, True], [2, 3.0, False]])
|
||||||
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
|
dm = xgb.DMatrix(df, label=pd.Series([1, 2]), data_split_mode=data_split_mode)
|
||||||
assert dm.feature_names == ['0', '1', '2']
|
|
||||||
assert dm.feature_types == ['int', 'float', 'i']
|
|
||||||
assert dm.num_row() == 2
|
assert dm.num_row() == 2
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["0", "1", "2"]
|
||||||
|
assert dm.feature_types == ["int", "float", "i"]
|
||||||
assert dm.num_col() == 3
|
assert dm.num_col() == 3
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["0", "1", "2"], world_size
|
||||||
|
)
|
||||||
|
assert dm.feature_types == ["int", "float", "i"] * world_size
|
||||||
|
assert dm.num_col() == 3 * world_size
|
||||||
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
|
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
|
||||||
|
|
||||||
df = pd.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6])
|
df = pd.DataFrame([[1, 2.0, 1], [2, 3.0, 1]], columns=[4, 5, 6])
|
||||||
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
|
dm = xgb.DMatrix(df, label=pd.Series([1, 2]), data_split_mode=data_split_mode)
|
||||||
assert dm.feature_names == ['4', '5', '6']
|
|
||||||
assert dm.feature_types == ['int', 'float', 'int']
|
|
||||||
assert dm.num_row() == 2
|
assert dm.num_row() == 2
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["4", "5", "6"]
|
||||||
|
assert dm.feature_types == ["int", "float", "int"]
|
||||||
assert dm.num_col() == 3
|
assert dm.num_col() == 3
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["4", "5", "6"], world_size
|
||||||
|
)
|
||||||
|
assert dm.feature_types == ["int", "float", "int"] * world_size
|
||||||
|
assert dm.num_col() == 3 * world_size
|
||||||
|
|
||||||
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
|
df = pd.DataFrame({"A": ["X", "Y", "Z"], "B": [1, 2, 3]})
|
||||||
dummies = pd.get_dummies(df)
|
dummies = pd.get_dummies(df)
|
||||||
# B A_X A_Y A_Z
|
# B A_X A_Y A_Z
|
||||||
# 0 1 1 0 0
|
# 0 1 1 0 0
|
||||||
# 1 2 0 1 0
|
# 1 2 0 1 0
|
||||||
# 2 3 0 0 1
|
# 2 3 0 0 1
|
||||||
result, _, _ = xgb.data._transform_pandas_df(dummies,
|
result, _, _ = xgb.data._transform_pandas_df(dummies, enable_categorical=False)
|
||||||
enable_categorical=False)
|
exp = np.array(
|
||||||
exp = np.array([[1., 1., 0., 0.],
|
[[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]]
|
||||||
[2., 0., 1., 0.],
|
)
|
||||||
[3., 0., 0., 1.]])
|
|
||||||
np.testing.assert_array_equal(result, exp)
|
np.testing.assert_array_equal(result, exp)
|
||||||
dm = xgb.DMatrix(dummies)
|
dm = xgb.DMatrix(dummies, data_split_mode=data_split_mode)
|
||||||
assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z']
|
assert dm.num_row() == 3
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["B", "A_X", "A_Y", "A_Z"]
|
||||||
if int(pd.__version__[0]) >= 2:
|
if int(pd.__version__[0]) >= 2:
|
||||||
assert dm.feature_types == ['int', 'i', 'i', 'i']
|
assert dm.feature_types == ["int", "i", "i", "i"]
|
||||||
else:
|
else:
|
||||||
assert dm.feature_types == ['int', 'int', 'int', 'int']
|
assert dm.feature_types == ["int", "int", "int", "int"]
|
||||||
assert dm.num_row() == 3
|
|
||||||
assert dm.num_col() == 4
|
assert dm.num_col() == 4
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["B", "A_X", "A_Y", "A_Z"], world_size
|
||||||
|
)
|
||||||
|
if int(pd.__version__[0]) >= 2:
|
||||||
|
assert dm.feature_types == ["int", "i", "i", "i"] * world_size
|
||||||
|
else:
|
||||||
|
assert dm.feature_types == ["int", "int", "int", "int"] * world_size
|
||||||
|
assert dm.num_col() == 4 * world_size
|
||||||
|
|
||||||
df = pd.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]})
|
df = pd.DataFrame({"A=1": [1, 2, 3], "A=2": [4, 5, 6]})
|
||||||
dm = xgb.DMatrix(df)
|
dm = xgb.DMatrix(df, data_split_mode=data_split_mode)
|
||||||
assert dm.feature_names == ['A=1', 'A=2']
|
|
||||||
assert dm.feature_types == ['int', 'int']
|
|
||||||
assert dm.num_row() == 3
|
assert dm.num_row() == 3
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["A=1", "A=2"]
|
||||||
|
assert dm.feature_types == ["int", "int"]
|
||||||
assert dm.num_col() == 2
|
assert dm.num_col() == 2
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["A=1", "A=2"], world_size
|
||||||
|
)
|
||||||
|
assert dm.feature_types == ["int", "int"] * world_size
|
||||||
|
assert dm.num_col() == 2 * world_size
|
||||||
|
|
||||||
df_int = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10])
|
df_int = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10])
|
||||||
dm_int = xgb.DMatrix(df_int)
|
dm_int = xgb.DMatrix(df_int, data_split_mode=data_split_mode)
|
||||||
df_range = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1))
|
df_range = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1))
|
||||||
dm_range = xgb.DMatrix(df_range)
|
dm_range = xgb.DMatrix(df_range, data_split_mode=data_split_mode)
|
||||||
assert dm_int.feature_names == ['9', '10'] # assert not "9 "
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm_int.feature_names == ["9", "10"] # assert not "9 "
|
||||||
|
else:
|
||||||
|
assert dm_int.feature_names == tm.column_split_feature_names(
|
||||||
|
["9", "10"], world_size
|
||||||
|
)
|
||||||
assert dm_int.feature_names == dm_range.feature_names
|
assert dm_int.feature_names == dm_range.feature_names
|
||||||
|
|
||||||
# test MultiIndex as columns
|
# test MultiIndex as columns
|
||||||
df = pd.DataFrame(
|
df = pd.DataFrame(
|
||||||
[
|
[(1, 2, 3, 4, 5, 6), (6, 5, 4, 3, 2, 1)],
|
||||||
(1, 2, 3, 4, 5, 6),
|
columns=pd.MultiIndex.from_tuples(
|
||||||
(6, 5, 4, 3, 2, 1)
|
(
|
||||||
],
|
("a", 1),
|
||||||
columns=pd.MultiIndex.from_tuples((
|
("a", 2),
|
||||||
('a', 1), ('a', 2), ('a', 3),
|
("a", 3),
|
||||||
('b', 1), ('b', 2), ('b', 3),
|
("b", 1),
|
||||||
))
|
("b", 2),
|
||||||
|
("b", 3),
|
||||||
)
|
)
|
||||||
dm = xgb.DMatrix(df)
|
),
|
||||||
assert dm.feature_names == ['a 1', 'a 2', 'a 3', 'b 1', 'b 2', 'b 3']
|
)
|
||||||
assert dm.feature_types == ['int', 'int', 'int', 'int', 'int', 'int']
|
dm = xgb.DMatrix(df, data_split_mode=data_split_mode)
|
||||||
assert dm.num_row() == 2
|
assert dm.num_row() == 2
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.feature_names == ["a 1", "a 2", "a 3", "b 1", "b 2", "b 3"]
|
||||||
|
assert dm.feature_types == ["int", "int", "int", "int", "int", "int"]
|
||||||
assert dm.num_col() == 6
|
assert dm.num_col() == 6
|
||||||
|
else:
|
||||||
|
assert dm.feature_names == tm.column_split_feature_names(
|
||||||
|
["a 1", "a 2", "a 3", "b 1", "b 2", "b 3"], world_size
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
dm.feature_types
|
||||||
|
== ["int", "int", "int", "int", "int", "int"] * world_size
|
||||||
|
)
|
||||||
|
assert dm.num_col() == 6 * world_size
|
||||||
|
|
||||||
# test Index as columns
|
# test Index as columns
|
||||||
df = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=pd.Index([1, 2]))
|
df = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=pd.Index([1, 2]))
|
||||||
Xy = xgb.DMatrix(df)
|
Xy = xgb.DMatrix(df, data_split_mode=data_split_mode)
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
np.testing.assert_equal(np.array(Xy.feature_names), np.array(["1", "2"]))
|
np.testing.assert_equal(np.array(Xy.feature_names), np.array(["1", "2"]))
|
||||||
|
else:
|
||||||
|
np.testing.assert_equal(
|
||||||
|
np.array(Xy.feature_names),
|
||||||
|
np.array(tm.column_split_feature_names(["1", "2"], world_size)),
|
||||||
|
)
|
||||||
|
|
||||||
|
# test pandas series
|
||||||
|
data_series = pd.Series([1, 2, 3, 4, 5])
|
||||||
|
dm = xgb.DMatrix(data_series, data_split_mode=data_split_mode)
|
||||||
|
assert dm.num_row() == 5
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
|
assert dm.num_col() == 1
|
||||||
|
else:
|
||||||
|
assert dm.num_col() == 1 * world_size
|
||||||
|
|
||||||
def test_slice(self):
|
def test_slice(self):
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
rows = 100
|
rows = 100
|
||||||
X = rng.randint(3, 7, size=rows)
|
X = rng.randint(3, 7, size=rows)
|
||||||
X = pd.DataFrame({'f0': X})
|
X = pd.DataFrame({"f0": X})
|
||||||
y = rng.randn(rows)
|
y = rng.randn(rows)
|
||||||
ridxs = [1, 2, 3, 4, 5, 6]
|
ridxs = [1, 2, 3, 4, 5, 6]
|
||||||
m = xgb.DMatrix(X, y)
|
m = xgb.DMatrix(X, y)
|
||||||
@ -132,15 +214,16 @@ class TestPandas:
|
|||||||
|
|
||||||
assert m.feature_types == sliced.feature_types
|
assert m.feature_types == sliced.feature_types
|
||||||
|
|
||||||
def test_pandas_categorical(self):
|
def test_pandas_categorical(self, data_split_mode=DataSplitMode.ROW):
|
||||||
|
world_size = xgb.collective.get_world_size()
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
rows = 100
|
rows = 100
|
||||||
X = rng.randint(3, 7, size=rows)
|
X = rng.randint(3, 7, size=rows)
|
||||||
X = pd.Series(X, dtype="category")
|
X = pd.Series(X, dtype="category")
|
||||||
X = pd.DataFrame({'f0': X})
|
X = pd.DataFrame({"f0": X})
|
||||||
y = rng.randn(rows)
|
y = rng.randn(rows)
|
||||||
m = xgb.DMatrix(X, y, enable_categorical=True)
|
m = xgb.DMatrix(X, y, enable_categorical=True, data_split_mode=data_split_mode)
|
||||||
assert m.feature_types[0] == 'c'
|
assert m.feature_types[0] == "c"
|
||||||
|
|
||||||
X_0 = ["f", "o", "o"]
|
X_0 = ["f", "o", "o"]
|
||||||
X_1 = [4, 3, 2]
|
X_1 = [4, 3, 2]
|
||||||
@ -161,20 +244,27 @@ class TestPandas:
|
|||||||
X = X["f0"]
|
X = X["f0"]
|
||||||
y = y[: X.shape[0]]
|
y = y[: X.shape[0]]
|
||||||
with pytest.raises(ValueError, match=r".*enable_categorical.*"):
|
with pytest.raises(ValueError, match=r".*enable_categorical.*"):
|
||||||
xgb.DMatrix(X, y)
|
xgb.DMatrix(X, y, data_split_mode=data_split_mode)
|
||||||
|
|
||||||
Xy = xgb.DMatrix(X, y, enable_categorical=True)
|
Xy = xgb.DMatrix(X, y, enable_categorical=True, data_split_mode=data_split_mode)
|
||||||
assert Xy.num_row() == 3
|
assert Xy.num_row() == 3
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
assert Xy.num_col() == 1
|
assert Xy.num_col() == 1
|
||||||
|
else:
|
||||||
|
assert Xy.num_col() == 1 * world_size
|
||||||
|
|
||||||
def test_pandas_sparse(self):
|
def test_pandas_sparse(self):
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
rows = 100
|
rows = 100
|
||||||
X = pd.DataFrame(
|
X = pd.DataFrame(
|
||||||
{"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)),
|
{
|
||||||
|
"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)),
|
||||||
"B": pd.arrays.SparseArray(np.random.randn(rows)),
|
"B": pd.arrays.SparseArray(np.random.randn(rows)),
|
||||||
"C": pd.arrays.SparseArray(np.random.permutation(
|
"C": pd.arrays.SparseArray(
|
||||||
[True, False] * (rows // 2)))}
|
np.random.permutation([True, False] * (rows // 2))
|
||||||
|
),
|
||||||
|
}
|
||||||
)
|
)
|
||||||
y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows)))
|
y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows)))
|
||||||
dtrain = xgb.DMatrix(X, y)
|
dtrain = xgb.DMatrix(X, y)
|
||||||
@ -183,27 +273,36 @@ class TestPandas:
|
|||||||
predt_dense = booster.predict(xgb.DMatrix(X.sparse.to_dense()))
|
predt_dense = booster.predict(xgb.DMatrix(X.sparse.to_dense()))
|
||||||
np.testing.assert_allclose(predt_sparse, predt_dense)
|
np.testing.assert_allclose(predt_sparse, predt_dense)
|
||||||
|
|
||||||
def test_pandas_label(self):
|
def test_pandas_label(self, data_split_mode=DataSplitMode.ROW):
|
||||||
|
world_size = xgb.collective.get_world_size()
|
||||||
# label must be a single column
|
# label must be a single column
|
||||||
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
|
df = pd.DataFrame({"A": ["X", "Y", "Z"], "B": [1, 2, 3]})
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
|
xgb.data._transform_pandas_df(df, False, None, None, "label", "float")
|
||||||
|
|
||||||
# label must be supported dtype
|
# label must be supported dtype
|
||||||
df = pd.DataFrame({'A': np.array(['a', 'b', 'c'], dtype=object)})
|
df = pd.DataFrame({"A": np.array(["a", "b", "c"], dtype=object)})
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
|
xgb.data._transform_pandas_df(df, False, None, None, "label", "float")
|
||||||
|
|
||||||
df = pd.DataFrame({'A': np.array([1, 2, 3], dtype=int)})
|
df = pd.DataFrame({"A": np.array([1, 2, 3], dtype=int)})
|
||||||
result, _, _ = xgb.data._transform_pandas_df(df, False, None, None,
|
result, _, _ = xgb.data._transform_pandas_df(
|
||||||
'label', 'float')
|
df, False, None, None, "label", "float"
|
||||||
np.testing.assert_array_equal(result, np.array([[1.], [2.], [3.]],
|
)
|
||||||
dtype=float))
|
np.testing.assert_array_equal(
|
||||||
dm = xgb.DMatrix(np.random.randn(3, 2), label=df)
|
result, np.array([[1.0], [2.0], [3.0]], dtype=float)
|
||||||
|
)
|
||||||
|
dm = xgb.DMatrix(
|
||||||
|
np.random.randn(3, 2), label=df, data_split_mode=data_split_mode
|
||||||
|
)
|
||||||
assert dm.num_row() == 3
|
assert dm.num_row() == 3
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
assert dm.num_col() == 2
|
assert dm.num_col() == 2
|
||||||
|
else:
|
||||||
|
assert dm.num_col() == 2 * world_size
|
||||||
|
|
||||||
def test_pandas_weight(self):
|
def test_pandas_weight(self, data_split_mode=DataSplitMode.ROW):
|
||||||
|
world_size = xgb.collective.get_world_size()
|
||||||
kRows = 32
|
kRows = 32
|
||||||
kCols = 8
|
kCols = 8
|
||||||
|
|
||||||
@ -211,11 +310,13 @@ class TestPandas:
|
|||||||
y = np.random.randn(kRows)
|
y = np.random.randn(kRows)
|
||||||
w = np.random.uniform(size=kRows).astype(np.float32)
|
w = np.random.uniform(size=kRows).astype(np.float32)
|
||||||
w_pd = pd.DataFrame(w)
|
w_pd = pd.DataFrame(w)
|
||||||
data = xgb.DMatrix(X, y, weight=w_pd)
|
data = xgb.DMatrix(X, y, weight=w_pd, data_split_mode=data_split_mode)
|
||||||
|
|
||||||
assert data.num_row() == kRows
|
assert data.num_row() == kRows
|
||||||
|
if data_split_mode == DataSplitMode.ROW:
|
||||||
assert data.num_col() == kCols
|
assert data.num_col() == kCols
|
||||||
|
else:
|
||||||
|
assert data.num_col() == kCols * world_size
|
||||||
np.testing.assert_array_equal(data.get_weight(), w)
|
np.testing.assert_array_equal(data.get_weight(), w)
|
||||||
|
|
||||||
def test_base_margin(self):
|
def test_base_margin(self):
|
||||||
@ -223,81 +324,128 @@ class TestPandas:
|
|||||||
|
|
||||||
def test_cv_as_pandas(self):
|
def test_cv_as_pandas(self):
|
||||||
dm, _ = tm.load_agaricus(__file__)
|
dm, _ = tm.load_agaricus(__file__)
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic', 'eval_metric': 'error'}
|
"max_depth": 2,
|
||||||
|
"eta": 1,
|
||||||
|
"verbosity": 0,
|
||||||
|
"objective": "binary:logistic",
|
||||||
|
"eval_metric": "error",
|
||||||
|
}
|
||||||
|
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10)
|
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10)
|
||||||
assert isinstance(cv, pd.DataFrame)
|
assert isinstance(cv, pd.DataFrame)
|
||||||
exp = pd.Index([u'test-error-mean', u'test-error-std',
|
exp = pd.Index(
|
||||||
u'train-error-mean', u'train-error-std'])
|
["test-error-mean", "test-error-std", "train-error-mean", "train-error-std"]
|
||||||
|
)
|
||||||
assert len(cv.columns.intersection(exp)) == 4
|
assert len(cv.columns.intersection(exp)) == 4
|
||||||
|
|
||||||
# show progress log (result is the same as above)
|
# show progress log (result is the same as above)
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, verbose_eval=True)
|
||||||
verbose_eval=True)
|
|
||||||
assert isinstance(cv, pd.DataFrame)
|
assert isinstance(cv, pd.DataFrame)
|
||||||
exp = pd.Index([u'test-error-mean', u'test-error-std',
|
exp = pd.Index(
|
||||||
u'train-error-mean', u'train-error-std'])
|
["test-error-mean", "test-error-std", "train-error-mean", "train-error-std"]
|
||||||
|
)
|
||||||
assert len(cv.columns.intersection(exp)) == 4
|
assert len(cv.columns.intersection(exp)) == 4
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
cv = xgb.cv(
|
||||||
verbose_eval=True, show_stdv=False)
|
params, dm, num_boost_round=10, nfold=10, verbose_eval=True, show_stdv=False
|
||||||
|
)
|
||||||
assert isinstance(cv, pd.DataFrame)
|
assert isinstance(cv, pd.DataFrame)
|
||||||
exp = pd.Index([u'test-error-mean', u'test-error-std',
|
exp = pd.Index(
|
||||||
u'train-error-mean', u'train-error-std'])
|
["test-error-mean", "test-error-std", "train-error-mean", "train-error-std"]
|
||||||
|
)
|
||||||
assert len(cv.columns.intersection(exp)) == 4
|
assert len(cv.columns.intersection(exp)) == 4
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic', 'eval_metric': 'auc'}
|
"max_depth": 2,
|
||||||
|
"eta": 1,
|
||||||
|
"verbosity": 0,
|
||||||
|
"objective": "binary:logistic",
|
||||||
|
"eval_metric": "auc",
|
||||||
|
}
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
|
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
|
||||||
assert 'eval_metric' in params
|
assert "eval_metric" in params
|
||||||
assert 'auc' in cv.columns[0]
|
assert "auc" in cv.columns[0]
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic', 'eval_metric': ['auc']}
|
"max_depth": 2,
|
||||||
|
"eta": 1,
|
||||||
|
"verbosity": 0,
|
||||||
|
"objective": "binary:logistic",
|
||||||
|
"eval_metric": ["auc"],
|
||||||
|
}
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
|
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
|
||||||
assert 'eval_metric' in params
|
assert "eval_metric" in params
|
||||||
assert 'auc' in cv.columns[0]
|
assert "auc" in cv.columns[0]
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic', 'eval_metric': ['auc']}
|
"max_depth": 2,
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
"eta": 1,
|
||||||
as_pandas=True, early_stopping_rounds=1)
|
"verbosity": 0,
|
||||||
assert 'eval_metric' in params
|
"objective": "binary:logistic",
|
||||||
assert 'auc' in cv.columns[0]
|
"eval_metric": ["auc"],
|
||||||
|
}
|
||||||
|
cv = xgb.cv(
|
||||||
|
params,
|
||||||
|
dm,
|
||||||
|
num_boost_round=10,
|
||||||
|
nfold=10,
|
||||||
|
as_pandas=True,
|
||||||
|
early_stopping_rounds=1,
|
||||||
|
)
|
||||||
|
assert "eval_metric" in params
|
||||||
|
assert "auc" in cv.columns[0]
|
||||||
assert cv.shape[0] < 10
|
assert cv.shape[0] < 10
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic'}
|
"max_depth": 2,
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
"eta": 1,
|
||||||
as_pandas=True, metrics='auc')
|
"verbosity": 0,
|
||||||
assert 'auc' in cv.columns[0]
|
"objective": "binary:logistic",
|
||||||
|
}
|
||||||
|
cv = xgb.cv(
|
||||||
|
params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics="auc"
|
||||||
|
)
|
||||||
|
assert "auc" in cv.columns[0]
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic'}
|
"max_depth": 2,
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
"eta": 1,
|
||||||
as_pandas=True, metrics=['auc'])
|
"verbosity": 0,
|
||||||
assert 'auc' in cv.columns[0]
|
"objective": "binary:logistic",
|
||||||
|
}
|
||||||
|
cv = xgb.cv(
|
||||||
|
params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=["auc"]
|
||||||
|
)
|
||||||
|
assert "auc" in cv.columns[0]
|
||||||
|
|
||||||
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
|
params = {
|
||||||
'objective': 'binary:logistic', 'eval_metric': ['auc']}
|
"max_depth": 2,
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
"eta": 1,
|
||||||
as_pandas=True, metrics='error')
|
"verbosity": 0,
|
||||||
assert 'eval_metric' in params
|
"objective": "binary:logistic",
|
||||||
assert 'auc' not in cv.columns[0]
|
"eval_metric": ["auc"],
|
||||||
assert 'error' in cv.columns[0]
|
}
|
||||||
|
cv = xgb.cv(
|
||||||
|
params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics="error"
|
||||||
|
)
|
||||||
|
assert "eval_metric" in params
|
||||||
|
assert "auc" not in cv.columns[0]
|
||||||
|
assert "error" in cv.columns[0]
|
||||||
|
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
cv = xgb.cv(
|
||||||
as_pandas=True, metrics=['error'])
|
params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=["error"]
|
||||||
assert 'eval_metric' in params
|
)
|
||||||
assert 'auc' not in cv.columns[0]
|
assert "eval_metric" in params
|
||||||
assert 'error' in cv.columns[0]
|
assert "auc" not in cv.columns[0]
|
||||||
|
assert "error" in cv.columns[0]
|
||||||
|
|
||||||
params = list(params.items())
|
params = list(params.items())
|
||||||
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
|
cv = xgb.cv(
|
||||||
as_pandas=True, metrics=['error'])
|
params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=["error"]
|
||||||
|
)
|
||||||
assert isinstance(params, list)
|
assert isinstance(params, list)
|
||||||
assert 'auc' not in cv.columns[0]
|
assert "auc" not in cv.columns[0]
|
||||||
assert 'error' in cv.columns[0]
|
assert "error" in cv.columns[0]
|
||||||
|
|
||||||
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
|
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
|
||||||
def test_nullable_type(self, DMatrixT) -> None:
|
def test_nullable_type(self, DMatrixT) -> None:
|
||||||
@ -358,3 +506,60 @@ class TestPandas:
|
|||||||
if y is not None:
|
if y is not None:
|
||||||
np.testing.assert_allclose(m_orig.get_label(), m_etype.get_label())
|
np.testing.assert_allclose(m_orig.get_label(), m_etype.get_label())
|
||||||
np.testing.assert_allclose(m_etype.get_label(), y.values)
|
np.testing.assert_allclose(m_etype.get_label(), y.values)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
|
def test_pandas_column_split(self):
|
||||||
|
tm.run_with_rabit(
|
||||||
|
world_size=3, test_fn=self.test_pandas, data_split_mode=DataSplitMode.COL
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
|
def test_pandas_categorical_column_split(self):
|
||||||
|
tm.run_with_rabit(
|
||||||
|
world_size=3,
|
||||||
|
test_fn=self.test_pandas_categorical,
|
||||||
|
data_split_mode=DataSplitMode.COL,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
|
def test_pandas_sparse_column_split(self):
|
||||||
|
rows = 100
|
||||||
|
X = pd.DataFrame(
|
||||||
|
{
|
||||||
|
"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)),
|
||||||
|
"B": pd.arrays.SparseArray(np.random.randn(rows)),
|
||||||
|
"C": pd.arrays.SparseArray(
|
||||||
|
np.random.permutation([True, False] * (rows // 2))
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows)))
|
||||||
|
|
||||||
|
def verify_pandas_sparse():
|
||||||
|
dtrain = xgb.DMatrix(X, y, data_split_mode=DataSplitMode.COL)
|
||||||
|
booster = xgb.train({}, dtrain, num_boost_round=4)
|
||||||
|
predt_sparse = booster.predict(
|
||||||
|
xgb.DMatrix(X, data_split_mode=DataSplitMode.COL)
|
||||||
|
)
|
||||||
|
predt_dense = booster.predict(
|
||||||
|
xgb.DMatrix(X.sparse.to_dense(), data_split_mode=DataSplitMode.COL)
|
||||||
|
)
|
||||||
|
np.testing.assert_allclose(predt_sparse, predt_dense)
|
||||||
|
|
||||||
|
tm.run_with_rabit(world_size=3, test_fn=verify_pandas_sparse)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
|
def test_pandas_label_column_split(self):
|
||||||
|
tm.run_with_rabit(
|
||||||
|
world_size=3,
|
||||||
|
test_fn=self.test_pandas_label,
|
||||||
|
data_split_mode=DataSplitMode.COL,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
|
||||||
|
def test_pandas_weight_column_split(self):
|
||||||
|
tm.run_with_rabit(
|
||||||
|
world_size=3,
|
||||||
|
test_fn=self.test_pandas_weight,
|
||||||
|
data_split_mode=DataSplitMode.COL,
|
||||||
|
)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user