/** * Copyright 2014-2023, XGBoost Contributors * \file sparse_page_source.h */ #ifndef XGBOOST_DATA_SPARSE_PAGE_SOURCE_H_ #define XGBOOST_DATA_SPARSE_PAGE_SOURCE_H_ #include // for min #include // for atomic #include // for async #include #include #include // for mutex #include #include #include // for pair, move #include #include "../common/common.h" #include "../common/io.h" // for PrivateMmapConstStream #include "../common/timer.h" // for Monitor, Timer #include "adapter.h" #include "proxy_dmatrix.h" // for DMatrixProxy #include "sparse_page_writer.h" // for SparsePageFormat #include "xgboost/base.h" #include "xgboost/data.h" namespace xgboost::data { inline void TryDeleteCacheFile(const std::string& file) { if (std::remove(file.c_str()) != 0) { // Don't throw, this is called in a destructor. LOG(WARNING) << "Couldn't remove external memory cache file " << file << "; you may want to remove it manually"; } } /** * @brief Information about the cache including path and page offsets. */ struct Cache { // whether the write to the cache is complete bool written; std::string name; std::string format; // offset into binary cache file. std::vector offset; Cache(bool w, std::string n, std::string fmt) : written{w}, name{std::move(n)}, format{std::move(fmt)} { offset.push_back(0); } static std::string ShardName(std::string name, std::string format) { CHECK_EQ(format.front(), '.'); return name + format; } [[nodiscard]] std::string ShardName() const { return ShardName(this->name, this->format); } /** * @brief Record a page with size of n_bytes. */ void Push(std::size_t n_bytes) { offset.push_back(n_bytes); } /** * @brief Returns the view start and length for the i^th page. */ [[nodiscard]] auto View(std::size_t i) const { std::uint64_t off = offset.at(i); std::uint64_t len = offset.at(i + 1) - offset[i]; return std::pair{off, len}; } /** * @brief Call this once the write for the cache is complete. */ void Commit() { if (!written) { std::partial_sum(offset.begin(), offset.end(), offset.begin()); written = true; } } }; // Prevents multi-threaded call to `GetBatches`. class TryLockGuard { std::mutex& lock_; public: explicit TryLockGuard(std::mutex& lock) : lock_{lock} { // NOLINT CHECK(lock_.try_lock()) << "Multiple threads attempting to use Sparse DMatrix."; } ~TryLockGuard() { lock_.unlock(); } }; // Similar to `dmlc::OMPException`, but doesn't need the threads to be joined before rethrow class ExceHandler { std::mutex mutex_; std::atomic flag_{false}; std::exception_ptr curr_exce_{nullptr}; public: template decltype(auto) Run(Fn&& fn) noexcept(true) { try { return fn(); } catch (dmlc::Error const& e) { std::lock_guard guard{mutex_}; if (!curr_exce_) { curr_exce_ = std::current_exception(); } flag_ = true; } catch (std::exception const& e) { std::lock_guard guard{mutex_}; if (!curr_exce_) { curr_exce_ = std::current_exception(); } flag_ = true; } catch (...) { std::lock_guard guard{mutex_}; if (!curr_exce_) { curr_exce_ = std::current_exception(); } flag_ = true; } return std::invoke_result_t(); } void Rethrow() noexcept(false) { if (flag_) { CHECK(curr_exce_); std::rethrow_exception(curr_exce_); } } }; /** * @brief Base class for all page sources. Handles fetching, writing, and iteration. */ template class SparsePageSourceImpl : public BatchIteratorImpl { protected: // Prevents calling this iterator from multiple places(or threads). std::mutex single_threaded_; // The current page. std::shared_ptr page_; bool at_end_ {false}; float missing_; std::int32_t nthreads_; bst_feature_t n_features_; // Index to the current page. std::uint32_t count_{0}; // Total number of batches. std::uint32_t n_batches_{0}; std::shared_ptr cache_info_; using Ring = std::vector>>; // A ring storing futures to data. Since the DMatrix iterator is forward only, so we // can pre-fetch data in a ring. std::unique_ptr ring_{new Ring}; // Catching exception in pre-fetch threads to prevent segfault. Not always work though, // OOM error can be delayed due to lazy commit. On the bright side, if mmap is used then // OOM error should be rare. ExceHandler exce_; common::Monitor monitor_; bool ReadCache() { CHECK(!at_end_); if (!cache_info_->written) { return false; } if (ring_->empty()) { ring_->resize(n_batches_); } // An heuristic for number of pre-fetched batches. We can make it part of BatchParam // to let user adjust number of pre-fetched batches when needed. std::int32_t n_prefetches = std::max(nthreads_, 3); std::int32_t n_prefetch_batches = std::min(static_cast(n_prefetches), n_batches_); CHECK_GT(n_prefetch_batches, 0) << "total batches:" << n_batches_; std::size_t fetch_it = count_; exce_.Rethrow(); for (std::int32_t i = 0; i < n_prefetch_batches; ++i, ++fetch_it) { fetch_it %= n_batches_; // ring if (ring_->at(fetch_it).valid()) { continue; } auto const* self = this; // make sure it's const CHECK_LT(fetch_it, cache_info_->offset.size()); ring_->at(fetch_it) = std::async(std::launch::async, [fetch_it, self, this]() { auto page = std::make_shared(); this->exce_.Run([&] { std::unique_ptr> fmt{CreatePageFormat("raw")}; auto name = self->cache_info_->ShardName(); auto [offset, length] = self->cache_info_->View(fetch_it); auto fi = std::make_unique(name, offset, length); CHECK(fmt->Read(page.get(), fi.get())); }); return page; }); } CHECK_EQ(std::count_if(ring_->cbegin(), ring_->cend(), [](auto const& f) { return f.valid(); }), n_prefetch_batches) << "Sparse DMatrix assumes forward iteration."; monitor_.Start("Wait"); page_ = (*ring_)[count_].get(); CHECK(!(*ring_)[count_].valid()); monitor_.Stop("Wait"); exce_.Rethrow(); return true; } void WriteCache() { CHECK(!cache_info_->written); common::Timer timer; timer.Start(); std::unique_ptr> fmt{CreatePageFormat("raw")}; auto name = cache_info_->ShardName(); std::unique_ptr fo; if (this->Iter() == 0) { fo = std::make_unique(StringView{name}, "wb"); } else { fo = std::make_unique(StringView{name}, "ab"); } auto bytes = fmt->Write(*page_, fo.get()); timer.Stop(); // Not entirely accurate, the kernels doesn't have to flush the data. LOG(INFO) << static_cast(bytes) / 1024.0 / 1024.0 << " MB written in " << timer.ElapsedSeconds() << " seconds."; cache_info_->Push(bytes); } virtual void Fetch() = 0; public: SparsePageSourceImpl(float missing, int nthreads, bst_feature_t n_features, uint32_t n_batches, std::shared_ptr cache) : missing_{missing}, nthreads_{nthreads}, n_features_{n_features}, n_batches_{n_batches}, cache_info_{std::move(cache)} { monitor_.Init(typeid(S).name()); // not pretty, but works for basic profiling } SparsePageSourceImpl(SparsePageSourceImpl const &that) = delete; ~SparsePageSourceImpl() override { // Don't orphan the threads. for (auto& fu : *ring_) { if (fu.valid()) { fu.get(); } } } [[nodiscard]] uint32_t Iter() const { return count_; } const S &operator*() const override { CHECK(page_); return *page_; } [[nodiscard]] std::shared_ptr Page() const override { return page_; } [[nodiscard]] bool AtEnd() const override { return at_end_; } virtual void Reset() { TryLockGuard guard{single_threaded_}; at_end_ = false; count_ = 0; // Pre-fetch for the next round of iterations. this->Fetch(); } }; #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP) // Push data from CUDA. void DevicePush(DMatrixProxy* proxy, float missing, SparsePage* page); #else inline void DevicePush(DMatrixProxy*, float, SparsePage*) { common::AssertGPUSupport(); } #endif class SparsePageSource : public SparsePageSourceImpl { // This is the source from the user. DataIterProxy iter_; DMatrixProxy* proxy_; std::size_t base_row_id_{0}; void Fetch() final { page_ = std::make_shared(); if (!this->ReadCache()) { bool type_error { false }; CHECK(proxy_); HostAdapterDispatch(proxy_, [&](auto const &adapter_batch) { page_->Push(adapter_batch, this->missing_, this->nthreads_); }, &type_error); if (type_error) { DevicePush(proxy_, missing_, page_.get()); } page_->SetBaseRowId(base_row_id_); base_row_id_ += page_->Size(); n_batches_++; this->WriteCache(); } } public: SparsePageSource( DataIterProxy iter, DMatrixProxy *proxy, float missing, int nthreads, bst_feature_t n_features, uint32_t n_batches, std::shared_ptr cache) : SparsePageSourceImpl(missing, nthreads, n_features, n_batches, cache), iter_{iter}, proxy_{proxy} { if (!cache_info_->written) { iter_.Reset(); CHECK(iter_.Next()) << "Must have at least 1 batch."; } this->Fetch(); } SparsePageSource& operator++() final { TryLockGuard guard{single_threaded_}; count_++; if (cache_info_->written) { at_end_ = (count_ == n_batches_); } else { at_end_ = !iter_.Next(); } if (at_end_) { CHECK_EQ(cache_info_->offset.size(), n_batches_ + 1); cache_info_->Commit(); if (n_batches_ != 0) { CHECK_EQ(count_, n_batches_); } CHECK_GE(count_, 1); proxy_ = nullptr; } else { this->Fetch(); } return *this; } void Reset() override { if (proxy_) { TryLockGuard guard{single_threaded_}; iter_.Reset(); } SparsePageSourceImpl::Reset(); TryLockGuard guard{single_threaded_}; base_row_id_ = 0; } }; // A mixin for advancing the iterator. template class PageSourceIncMixIn : public SparsePageSourceImpl { protected: std::shared_ptr source_; using Super = SparsePageSourceImpl; // synchronize the row page, `hist` and `gpu_hist` don't need the original sparse page // so we avoid fetching it. bool sync_{true}; public: PageSourceIncMixIn(float missing, int nthreads, bst_feature_t n_features, uint32_t n_batches, std::shared_ptr cache, bool sync) : Super::SparsePageSourceImpl{missing, nthreads, n_features, n_batches, cache}, sync_{sync} {} PageSourceIncMixIn& operator++() final { TryLockGuard guard{this->single_threaded_}; if (sync_) { ++(*source_); } ++this->count_; this->at_end_ = this->count_ == this->n_batches_; if (this->at_end_) { this->cache_info_->Commit(); if (this->n_batches_ != 0) { CHECK_EQ(this->count_, this->n_batches_); } CHECK_GE(this->count_, 1); } else { this->Fetch(); } if (sync_) { CHECK_EQ(source_->Iter(), this->count_); } return *this; } }; class CSCPageSource : public PageSourceIncMixIn { protected: void Fetch() final { if (!this->ReadCache()) { auto const &csr = source_->Page(); this->page_.reset(new CSCPage{}); // we might be able to optimize this by merging transpose and pushcsc this->page_->PushCSC(csr->GetTranspose(n_features_, nthreads_)); page_->SetBaseRowId(csr->base_rowid); this->WriteCache(); } } public: CSCPageSource(float missing, int nthreads, bst_feature_t n_features, uint32_t n_batches, std::shared_ptr cache, std::shared_ptr source) : PageSourceIncMixIn(missing, nthreads, n_features, n_batches, cache, true) { this->source_ = source; this->Fetch(); } }; class SortedCSCPageSource : public PageSourceIncMixIn { protected: void Fetch() final { if (!this->ReadCache()) { auto const &csr = this->source_->Page(); this->page_.reset(new SortedCSCPage{}); // we might be able to optimize this by merging transpose and pushcsc this->page_->PushCSC(csr->GetTranspose(n_features_, nthreads_)); CHECK_EQ(this->page_->Size(), n_features_); CHECK_EQ(this->page_->data.Size(), csr->data.Size()); this->page_->SortRows(this->nthreads_); page_->SetBaseRowId(csr->base_rowid); this->WriteCache(); } } public: SortedCSCPageSource(float missing, int nthreads, bst_feature_t n_features, uint32_t n_batches, std::shared_ptr cache, std::shared_ptr source) : PageSourceIncMixIn(missing, nthreads, n_features, n_batches, cache, true) { this->source_ = source; this->Fetch(); } }; } // namespace xgboost::data #endif // XGBOOST_DATA_SPARSE_PAGE_SOURCE_H_