Use matrix for gradient. (#9508)
- Use the `linalg::Matrix` for storing gradients. - New API for the custom objective. - Custom objective for multi-class/multi-target is now required to return the correct shape. - Custom objective for Python can accept arrays with any strides. (row-major, column-major)
This commit is contained in:
@@ -384,7 +384,7 @@ inline bool ArrayInterfaceHandler::IsCudaPtr(void const *) { return false; }
|
||||
* numpy has the proper support even though it's in the __cuda_array_interface__
|
||||
* protocol defined by numba.
|
||||
*/
|
||||
template <int32_t D, bool allow_mask = (D == 1)>
|
||||
template <std::int32_t D, bool allow_mask = (D == 1)>
|
||||
class ArrayInterface {
|
||||
static_assert(D > 0, "Invalid dimension for array interface.");
|
||||
|
||||
@@ -588,7 +588,7 @@ class ArrayInterface {
|
||||
};
|
||||
|
||||
template <std::int32_t D, typename Fn>
|
||||
void DispatchDType(ArrayInterface<D> const array, std::int32_t device, Fn fn) {
|
||||
void DispatchDType(ArrayInterface<D> const array, DeviceOrd device, Fn fn) {
|
||||
// Only used for cuDF at the moment.
|
||||
CHECK_EQ(array.valid.Capacity(), 0);
|
||||
auto dispatch = [&](auto t) {
|
||||
|
||||
@@ -448,7 +448,7 @@ void CopyTensorInfoImpl(Context const& ctx, Json arr_interface, linalg::Tensor<T
|
||||
auto t_out = p_out->View(Context::kCpuId);
|
||||
CHECK(t_out.CContiguous());
|
||||
auto const shape = t_out.Shape();
|
||||
DispatchDType(array, Context::kCpuId, [&](auto&& in) {
|
||||
DispatchDType(array, DeviceOrd::CPU(), [&](auto&& in) {
|
||||
linalg::ElementWiseTransformHost(t_out, ctx.Threads(), [&](auto i, auto) {
|
||||
return std::apply(in, linalg::UnravelIndex<D>(i, shape));
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user