Fix spelling in documents (#6948)
* Update roxygen2 doc. Co-authored-by: fis <jm.yuan@outlook.com>
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
/*!
|
||||
* Copyright 2020 by XGBoost Contributors
|
||||
*
|
||||
* \brief An implemenation of Ryu algorithm:
|
||||
* \brief An implementation of Ryu algorithm:
|
||||
*
|
||||
* https://dl.acm.org/citation.cfm?id=3192369
|
||||
*
|
||||
@@ -686,7 +686,7 @@ int32_t ToCharsFloatImpl(float f, char * const result) {
|
||||
|
||||
// This is an implementation for base 10 inspired by the one in libstdc++v3. The general
|
||||
// scheme is by decomposing the value into multiple combination of base (which is 10) by
|
||||
// mod, until the value is lesser than 10, then last char is just char '0' (ascii 48) plus
|
||||
// mod, until the value is lesser than 10, then last char is just char '0' (ASCII 48) plus
|
||||
// that value. Other popular implementations can be found in RapidJson and libc++ (in
|
||||
// llvm-project), which uses the same general work flow with the same look up table, but
|
||||
// probably with better performance as they are more complicated.
|
||||
|
||||
@@ -55,7 +55,7 @@ namespace xgboost {
|
||||
namespace common {
|
||||
/*!
|
||||
* \brief Split a string by delimiter
|
||||
* \param s String to be splitted.
|
||||
* \param s String to be split.
|
||||
* \param delim The delimiter.
|
||||
*/
|
||||
inline std::vector<std::string> Split(const std::string& s, char delim) {
|
||||
|
||||
@@ -253,7 +253,7 @@ __global__ void LaunchNKernel(int device_idx, size_t begin, size_t end,
|
||||
* function as argument. Hence functions like `LaunchN` cannot use this wrapper.
|
||||
*
|
||||
* - With c++ initialization list `{}` syntax, you are forced to comply with the CUDA type
|
||||
* spcification.
|
||||
* specification.
|
||||
*/
|
||||
class LaunchKernel {
|
||||
size_t shmem_size_;
|
||||
@@ -930,7 +930,7 @@ class SegmentSorter {
|
||||
// Items sorted within the group
|
||||
caching_device_vector<T> ditems_;
|
||||
|
||||
// Original position of the items before they are sorted descendingly within its groups
|
||||
// Original position of the items before they are sorted descending within their groups
|
||||
caching_device_vector<uint32_t> doriginal_pos_;
|
||||
|
||||
// Segments within the original list that delineates the different groups
|
||||
|
||||
@@ -81,7 +81,7 @@ class HistogramCuts {
|
||||
}
|
||||
|
||||
// Getters. Cuts should be of no use after building histogram indices, but currently
|
||||
// it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve
|
||||
// they are deeply linked with quantile_hist, gpu sketcher and gpu_hist, so we preserve
|
||||
// these for now.
|
||||
std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_.ConstHostVector(); }
|
||||
std::vector<float> const& Values() const { return cut_values_.ConstHostVector(); }
|
||||
@@ -247,7 +247,7 @@ struct GHistIndexMatrix {
|
||||
// Create a global histogram matrix, given cut
|
||||
void Init(DMatrix* p_fmat, int max_num_bins);
|
||||
|
||||
// specific method for sparse data as no posibility to reduce allocated memory
|
||||
// specific method for sparse data as no possibility to reduce allocated memory
|
||||
template <typename BinIdxType, typename GetOffset>
|
||||
void SetIndexData(common::Span<BinIdxType> index_data_span,
|
||||
size_t batch_threads, const SparsePage &batch,
|
||||
|
||||
@@ -394,7 +394,7 @@ Json JsonReader::Parse() {
|
||||
return ParseArray();
|
||||
} else if ( c == '-' || std::isdigit(c) ||
|
||||
c == 'N' || c == 'I') {
|
||||
// For now we only accept `NaN`, not `nan` as the later violiates LR(1) with `null`.
|
||||
// For now we only accept `NaN`, not `nan` as the later violates LR(1) with `null`.
|
||||
return ParseNumber();
|
||||
} else if ( c == '\"' ) {
|
||||
return ParseString();
|
||||
|
||||
@@ -77,7 +77,7 @@ XGBOOST_DEVICE inline void Softmax(Iterator start, Iterator end) {
|
||||
|
||||
/*!
|
||||
* \brief Find the maximum iterator within the iterators
|
||||
* \param begin The begining iterator.
|
||||
* \param begin The beginning iterator.
|
||||
* \param end The end iterator.
|
||||
* \return the iterator point to the maximum value.
|
||||
* \tparam Iterator The type of the iterator.
|
||||
@@ -107,7 +107,7 @@ inline float LogSum(float x, float y) {
|
||||
|
||||
/*!
|
||||
* \brief perform numerically safe logsum
|
||||
* \param begin The begining iterator.
|
||||
* \param begin The beginning iterator.
|
||||
* \param end The end iterator.
|
||||
* \return the iterator point to the maximum value.
|
||||
* \tparam Iterator The type of the iterator.
|
||||
@@ -135,7 +135,7 @@ inline static bool CmpSecond(const std::pair<float, unsigned> &a,
|
||||
return a.second > b.second;
|
||||
}
|
||||
|
||||
// Redefined here to workaround a VC bug that doesn't support overloadng for integer
|
||||
// Redefined here to workaround a VC bug that doesn't support overloading for integer
|
||||
// types.
|
||||
template <typename T>
|
||||
XGBOOST_DEVICE typename std::enable_if<
|
||||
|
||||
@@ -55,7 +55,7 @@ HostSketchContainer::CalcColumnSize(SparsePage const &batch,
|
||||
std::vector<bst_feature_t> HostSketchContainer::LoadBalance(
|
||||
SparsePage const &batch, bst_feature_t n_columns, size_t const nthreads) {
|
||||
/* Some sparse datasets have their mass concentrating on small number of features. To
|
||||
* avoid wating for a few threads running forever, we here distirbute different number
|
||||
* avoid waiting for a few threads running forever, we here distribute different number
|
||||
* of columns to different threads according to number of entries.
|
||||
*/
|
||||
auto page = batch.GetView();
|
||||
|
||||
@@ -184,9 +184,9 @@ common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath(
|
||||
});
|
||||
|
||||
// Compute the index for both x and y (which of the element in a and b are used in each
|
||||
// comparison) by scaning the binary merge path. Take output [(x_0, y_0), (x_0, y_1),
|
||||
// comparison) by scanning the binary merge path. Take output [(x_0, y_0), (x_0, y_1),
|
||||
// ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path.
|
||||
// Asumming y_0 is less than x_0 so this step is torward the end of y. After the
|
||||
// Assuming y_0 is less than x_0 so this step is toward the end of y. After the
|
||||
// comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0
|
||||
// is landed into output as the first element in merge result. The scan result is the
|
||||
// subscript of x and y.
|
||||
@@ -367,7 +367,7 @@ void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr,
|
||||
size_t SketchContainer::ScanInput(Span<SketchEntry> entries, Span<OffsetT> d_columns_ptr_in) {
|
||||
/* There are 2 types of duplication. First is duplicated feature values, which comes
|
||||
* from user input data. Second is duplicated sketching entries, which is generated by
|
||||
* prunning or merging. We preserve the first type and remove the second type.
|
||||
* pruning or merging. We preserve the first type and remove the second type.
|
||||
*/
|
||||
timer_.Start(__func__);
|
||||
dh::safe_cuda(cudaSetDevice(device_));
|
||||
|
||||
@@ -44,7 +44,7 @@ constexpr double kMaxGradient = 15.0;
|
||||
constexpr double kMinHessian = 1e-16; // Ensure that no data point gets zero hessian
|
||||
constexpr double kMaxHessian = 15.0;
|
||||
|
||||
constexpr double kEps = 1e-12; // A denomitor in a fraction should not be too small
|
||||
constexpr double kEps = 1e-12; // A denominator in a fraction should not be too small
|
||||
|
||||
// Clip (limit) x to fit range [x_min, x_max].
|
||||
// If x < x_min, return x_min; if x > x_max, return x_max; if x_min <= x <= x_max, return x.
|
||||
|
||||
@@ -52,7 +52,7 @@ __global__ void LaunchCUDAKernel(Functor _func, Range _range,
|
||||
*
|
||||
* If you use it in a function that can be compiled by both nvcc and host
|
||||
* compiler, the behaviour is un-defined! Because your function is NOT
|
||||
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
|
||||
* duplicated by `CompiledWithCuda`. At link time, CUDA compiler resolution
|
||||
* will merge functions with same signature.
|
||||
*/
|
||||
template <bool CompiledWithCuda = WITH_CUDA()>
|
||||
@@ -155,7 +155,7 @@ class Transform {
|
||||
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
|
||||
}
|
||||
#else
|
||||
/*! \brief Dummy funtion defined when compiling for CPU. */
|
||||
/*! \brief Dummy function defined when compiling for CPU. */
|
||||
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
|
||||
typename... HDV>
|
||||
void LaunchCUDA(Functor _func, HDV*...) const {
|
||||
|
||||
@@ -36,7 +36,7 @@ Version::TripletT Version::Load(Json const& in) {
|
||||
|
||||
Version::TripletT Version::Load(dmlc::Stream* fi) {
|
||||
XGBoostVersionT major{0}, minor{0}, patch{0};
|
||||
// This is only used in DMatrix serialization, so doesn't break model compability.
|
||||
// This is only used in DMatrix serialization, so doesn't break model compatibility.
|
||||
std::string msg { "Incorrect version format found in binary file. "
|
||||
"Binary file from XGBoost < 1.0.0 is no longer supported. "
|
||||
"Please generate it again." };
|
||||
|
||||
@@ -17,7 +17,7 @@ struct Version {
|
||||
using TripletT = std::tuple<XGBoostVersionT, XGBoostVersionT, XGBoostVersionT>;
|
||||
static const TripletT kInvalid;
|
||||
|
||||
// Save/Load version info to Json document
|
||||
// Save/Load version info to JSON document
|
||||
static TripletT Load(Json const& in);
|
||||
static void Save(Json* out);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user