[TREE] Move the files to target refactor location
This commit is contained in:
267
src/common/base64.h
Normal file
267
src/common/base64.h
Normal file
@@ -0,0 +1,267 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file base64.h
|
||||
* \brief data stream support to input and output from/to base64 stream
|
||||
* base64 is easier to store and pass as text format in mapreduce
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_UTILS_BASE64_INL_H_
|
||||
#define XGBOOST_UTILS_BASE64_INL_H_
|
||||
|
||||
#include <cctype>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include "./io.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace utils {
|
||||
/*! \brief buffer reader of the stream that allows you to get */
|
||||
class StreamBufferReader {
|
||||
public:
|
||||
explicit StreamBufferReader(size_t buffer_size)
|
||||
:stream_(NULL),
|
||||
read_len_(1), read_ptr_(1) {
|
||||
buffer_.resize(buffer_size);
|
||||
}
|
||||
/*!
|
||||
* \brief set input stream
|
||||
*/
|
||||
inline void set_stream(IStream *stream) {
|
||||
stream_ = stream;
|
||||
read_len_ = read_ptr_ = 1;
|
||||
}
|
||||
/*!
|
||||
* \brief allows quick read using get char
|
||||
*/
|
||||
inline char GetChar(void) {
|
||||
while (true) {
|
||||
if (read_ptr_ < read_len_) {
|
||||
return buffer_[read_ptr_++];
|
||||
} else {
|
||||
read_len_ = stream_->Read(&buffer_[0], buffer_.length());
|
||||
if (read_len_ == 0) return EOF;
|
||||
read_ptr_ = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \brief whether we are reaching the end of file */
|
||||
inline bool AtEnd(void) const {
|
||||
return read_len_ == 0;
|
||||
}
|
||||
|
||||
private:
|
||||
/*! \brief the underlying stream */
|
||||
IStream *stream_;
|
||||
/*! \brief buffer to hold data */
|
||||
std::string buffer_;
|
||||
/*! \brief length of valid data in buffer */
|
||||
size_t read_len_;
|
||||
/*! \brief pointer in the buffer */
|
||||
size_t read_ptr_;
|
||||
};
|
||||
|
||||
/*! \brief namespace of base64 decoding and encoding table */
|
||||
namespace base64 {
|
||||
const char DecodeTable[] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
62, // '+'
|
||||
0, 0, 0,
|
||||
63, // '/'
|
||||
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // '0'-'9'
|
||||
0, 0, 0, 0, 0, 0, 0,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // 'A'-'Z'
|
||||
0, 0, 0, 0, 0, 0,
|
||||
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
|
||||
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // 'a'-'z'
|
||||
};
|
||||
static const char EncodeTable[] =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
} // namespace base64
|
||||
/*! \brief the stream that reads from base64, note we take from file pointers */
|
||||
class Base64InStream: public IStream {
|
||||
public:
|
||||
explicit Base64InStream(IStream *fs) : reader_(256) {
|
||||
reader_.set_stream(fs);
|
||||
num_prev = 0; tmp_ch = 0;
|
||||
}
|
||||
/*!
|
||||
* \brief initialize the stream position to beginning of next base64 stream
|
||||
* call this function before actually start read
|
||||
*/
|
||||
inline void InitPosition(void) {
|
||||
// get a character
|
||||
do {
|
||||
tmp_ch = reader_.GetChar();
|
||||
} while (isspace(tmp_ch));
|
||||
}
|
||||
/*! \brief whether current position is end of a base64 stream */
|
||||
inline bool IsEOF(void) const {
|
||||
return num_prev == 0 && (tmp_ch == EOF || isspace(tmp_ch));
|
||||
}
|
||||
virtual size_t Read(void *ptr, size_t size) {
|
||||
using base64::DecodeTable;
|
||||
if (size == 0) return 0;
|
||||
// use tlen to record left size
|
||||
size_t tlen = size;
|
||||
unsigned char *cptr = static_cast<unsigned char*>(ptr);
|
||||
// if anything left, load from previous buffered result
|
||||
if (num_prev != 0) {
|
||||
if (num_prev == 2) {
|
||||
if (tlen >= 2) {
|
||||
*cptr++ = buf_prev[0];
|
||||
*cptr++ = buf_prev[1];
|
||||
tlen -= 2;
|
||||
num_prev = 0;
|
||||
} else {
|
||||
// assert tlen == 1
|
||||
*cptr++ = buf_prev[0]; --tlen;
|
||||
buf_prev[0] = buf_prev[1];
|
||||
num_prev = 1;
|
||||
}
|
||||
} else {
|
||||
// assert num_prev == 1
|
||||
*cptr++ = buf_prev[0]; --tlen; num_prev = 0;
|
||||
}
|
||||
}
|
||||
if (tlen == 0) return size;
|
||||
int nvalue;
|
||||
// note: everything goes with 4 bytes in Base64
|
||||
// so we process 4 bytes a unit
|
||||
while (tlen && tmp_ch != EOF && !isspace(tmp_ch)) {
|
||||
// first byte
|
||||
nvalue = DecodeTable[tmp_ch] << 18;
|
||||
{
|
||||
// second byte
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch != EOF && !isspace(tmp_ch)),
|
||||
"invalid base64 format");
|
||||
nvalue |= DecodeTable[tmp_ch] << 12;
|
||||
*cptr++ = (nvalue >> 16) & 0xFF; --tlen;
|
||||
}
|
||||
{
|
||||
// third byte
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch != EOF && !isspace(tmp_ch)),
|
||||
"invalid base64 format");
|
||||
// handle termination
|
||||
if (tmp_ch == '=') {
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch == '='), "invalid base64 format");
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch == EOF || isspace(tmp_ch)),
|
||||
"invalid base64 format");
|
||||
break;
|
||||
}
|
||||
nvalue |= DecodeTable[tmp_ch] << 6;
|
||||
if (tlen) {
|
||||
*cptr++ = (nvalue >> 8) & 0xFF; --tlen;
|
||||
} else {
|
||||
buf_prev[num_prev++] = (nvalue >> 8) & 0xFF;
|
||||
}
|
||||
}
|
||||
{
|
||||
// fourth byte
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch != EOF && !isspace(tmp_ch)),
|
||||
"invalid base64 format");
|
||||
if (tmp_ch == '=') {
|
||||
utils::Check((tmp_ch = reader_.GetChar(), tmp_ch == EOF || isspace(tmp_ch)),
|
||||
"invalid base64 format");
|
||||
break;
|
||||
}
|
||||
nvalue |= DecodeTable[tmp_ch];
|
||||
if (tlen) {
|
||||
*cptr++ = nvalue & 0xFF; --tlen;
|
||||
} else {
|
||||
buf_prev[num_prev ++] = nvalue & 0xFF;
|
||||
}
|
||||
}
|
||||
// get next char
|
||||
tmp_ch = reader_.GetChar();
|
||||
}
|
||||
if (kStrictCheck) {
|
||||
utils::Check(tlen == 0, "Base64InStream: read incomplete");
|
||||
}
|
||||
return size - tlen;
|
||||
}
|
||||
virtual void Write(const void *ptr, size_t size) {
|
||||
utils::Error("Base64InStream do not support write");
|
||||
}
|
||||
|
||||
private:
|
||||
StreamBufferReader reader_;
|
||||
int tmp_ch;
|
||||
int num_prev;
|
||||
unsigned char buf_prev[2];
|
||||
// whether we need to do strict check
|
||||
static const bool kStrictCheck = false;
|
||||
};
|
||||
/*! \brief the stream that write to base64, note we take from file pointers */
|
||||
class Base64OutStream: public IStream {
|
||||
public:
|
||||
explicit Base64OutStream(IStream *fp) : fp(fp) {
|
||||
buf_top = 0;
|
||||
}
|
||||
virtual void Write(const void *ptr, size_t size) {
|
||||
using base64::EncodeTable;
|
||||
size_t tlen = size;
|
||||
const unsigned char *cptr = static_cast<const unsigned char*>(ptr);
|
||||
while (tlen) {
|
||||
while (buf_top < 3 && tlen != 0) {
|
||||
buf[++buf_top] = *cptr++; --tlen;
|
||||
}
|
||||
if (buf_top == 3) {
|
||||
// flush 4 bytes out
|
||||
PutChar(EncodeTable[buf[1] >> 2]);
|
||||
PutChar(EncodeTable[((buf[1] << 4) | (buf[2] >> 4)) & 0x3F]);
|
||||
PutChar(EncodeTable[((buf[2] << 2) | (buf[3] >> 6)) & 0x3F]);
|
||||
PutChar(EncodeTable[buf[3] & 0x3F]);
|
||||
buf_top = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
virtual size_t Read(void *ptr, size_t size) {
|
||||
utils::Error("Base64OutStream do not support read");
|
||||
return 0;
|
||||
}
|
||||
/*!
|
||||
* \brief finish writing of all current base64 stream, do some post processing
|
||||
* \param endch character to put to end of stream, if it is EOF, then nothing will be done
|
||||
*/
|
||||
inline void Finish(char endch = EOF) {
|
||||
using base64::EncodeTable;
|
||||
if (buf_top == 1) {
|
||||
PutChar(EncodeTable[buf[1] >> 2]);
|
||||
PutChar(EncodeTable[(buf[1] << 4) & 0x3F]);
|
||||
PutChar('=');
|
||||
PutChar('=');
|
||||
}
|
||||
if (buf_top == 2) {
|
||||
PutChar(EncodeTable[buf[1] >> 2]);
|
||||
PutChar(EncodeTable[((buf[1] << 4) | (buf[2] >> 4)) & 0x3F]);
|
||||
PutChar(EncodeTable[(buf[2] << 2) & 0x3F]);
|
||||
PutChar('=');
|
||||
}
|
||||
buf_top = 0;
|
||||
if (endch != EOF) PutChar(endch);
|
||||
this->Flush();
|
||||
}
|
||||
|
||||
private:
|
||||
IStream *fp;
|
||||
int buf_top;
|
||||
unsigned char buf[4];
|
||||
std::string out_buf;
|
||||
static const size_t kBufferSize = 256;
|
||||
|
||||
inline void PutChar(char ch) {
|
||||
out_buf += ch;
|
||||
if (out_buf.length() >= kBufferSize) Flush();
|
||||
}
|
||||
inline void Flush(void) {
|
||||
if (out_buf.length() != 0) {
|
||||
fp->Write(&out_buf[0], out_buf.length());
|
||||
out_buf.clear();
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace utils
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_UTILS_BASE64_INL_H_
|
||||
194
src/common/config.h
Normal file
194
src/common/config.h
Normal file
@@ -0,0 +1,194 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file config.h
|
||||
* \brief helper class to load in configures from file
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_UTILS_CONFIG_H_
|
||||
#define XGBOOST_UTILS_CONFIG_H_
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <istream>
|
||||
#include <fstream>
|
||||
#include "./utils.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace utils {
|
||||
/*!
|
||||
* \brief base implementation of config reader
|
||||
*/
|
||||
class ConfigReaderBase {
|
||||
public:
|
||||
/*!
|
||||
* \brief get current name, called after Next returns true
|
||||
* \return current parameter name
|
||||
*/
|
||||
inline const char *name(void) const {
|
||||
return s_name.c_str();
|
||||
}
|
||||
/*!
|
||||
* \brief get current value, called after Next returns true
|
||||
* \return current parameter value
|
||||
*/
|
||||
inline const char *val(void) const {
|
||||
return s_val.c_str();
|
||||
}
|
||||
/*!
|
||||
* \brief move iterator to next position
|
||||
* \return true if there is value in next position
|
||||
*/
|
||||
inline bool Next(void) {
|
||||
while (!this->IsEnd()) {
|
||||
GetNextToken(&s_name);
|
||||
if (s_name == "=") return false;
|
||||
if (GetNextToken(&s_buf) || s_buf != "=") return false;
|
||||
if (GetNextToken(&s_val) || s_val == "=") return false;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// called before usage
|
||||
inline void Init(void) {
|
||||
ch_buf = this->GetChar();
|
||||
}
|
||||
|
||||
protected:
|
||||
/*!
|
||||
* \brief to be implemented by subclass,
|
||||
* get next token, return EOF if end of file
|
||||
*/
|
||||
virtual char GetChar(void) = 0;
|
||||
/*! \brief to be implemented by child, check if end of stream */
|
||||
virtual bool IsEnd(void) = 0;
|
||||
|
||||
private:
|
||||
char ch_buf;
|
||||
std::string s_name, s_val, s_buf;
|
||||
|
||||
inline void SkipLine(void) {
|
||||
do {
|
||||
ch_buf = this->GetChar();
|
||||
} while (ch_buf != EOF && ch_buf != '\n' && ch_buf != '\r');
|
||||
}
|
||||
|
||||
inline void ParseStr(std::string *tok) {
|
||||
while ((ch_buf = this->GetChar()) != EOF) {
|
||||
switch (ch_buf) {
|
||||
case '\\': *tok += this->GetChar(); break;
|
||||
case '\"': return;
|
||||
case '\r':
|
||||
case '\n': Error("ConfigReader: unterminated string");
|
||||
default: *tok += ch_buf;
|
||||
}
|
||||
}
|
||||
Error("ConfigReader: unterminated string");
|
||||
}
|
||||
inline void ParseStrML(std::string *tok) {
|
||||
while ((ch_buf = this->GetChar()) != EOF) {
|
||||
switch (ch_buf) {
|
||||
case '\\': *tok += this->GetChar(); break;
|
||||
case '\'': return;
|
||||
default: *tok += ch_buf;
|
||||
}
|
||||
}
|
||||
Error("unterminated string");
|
||||
}
|
||||
// return newline
|
||||
inline bool GetNextToken(std::string *tok) {
|
||||
tok->clear();
|
||||
bool new_line = false;
|
||||
while (ch_buf != EOF) {
|
||||
switch (ch_buf) {
|
||||
case '#' : SkipLine(); new_line = true; break;
|
||||
case '\"':
|
||||
if (tok->length() == 0) {
|
||||
ParseStr(tok); ch_buf = this->GetChar(); return new_line;
|
||||
} else {
|
||||
Error("ConfigReader: token followed directly by string");
|
||||
}
|
||||
case '\'':
|
||||
if (tok->length() == 0) {
|
||||
ParseStrML(tok); ch_buf = this->GetChar(); return new_line;
|
||||
} else {
|
||||
Error("ConfigReader: token followed directly by string");
|
||||
}
|
||||
case '=':
|
||||
if (tok->length() == 0) {
|
||||
ch_buf = this->GetChar();
|
||||
*tok = '=';
|
||||
}
|
||||
return new_line;
|
||||
case '\r':
|
||||
case '\n':
|
||||
if (tok->length() == 0) new_line = true;
|
||||
case '\t':
|
||||
case ' ' :
|
||||
ch_buf = this->GetChar();
|
||||
if (tok->length() != 0) return new_line;
|
||||
break;
|
||||
default:
|
||||
*tok += ch_buf;
|
||||
ch_buf = this->GetChar();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (tok->length() == 0) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
/*!
|
||||
* \brief an iterator use stream base, allows use all types of istream
|
||||
*/
|
||||
class ConfigStreamReader: public ConfigReaderBase {
|
||||
public:
|
||||
/*!
|
||||
* \brief constructor
|
||||
* \param istream input stream
|
||||
*/
|
||||
explicit ConfigStreamReader(std::istream &fin) : fin(fin) {}
|
||||
|
||||
protected:
|
||||
virtual char GetChar(void) {
|
||||
return fin.get();
|
||||
}
|
||||
/*! \brief to be implemented by child, check if end of stream */
|
||||
virtual bool IsEnd(void) {
|
||||
return fin.eof();
|
||||
}
|
||||
|
||||
private:
|
||||
std::istream &fin;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief an iterator that iterates over a configure file and gets the configures
|
||||
*/
|
||||
class ConfigIterator: public ConfigStreamReader {
|
||||
public:
|
||||
/*!
|
||||
* \brief constructor
|
||||
* \param fname name of configure file
|
||||
*/
|
||||
explicit ConfigIterator(const char *fname) : ConfigStreamReader(fi) {
|
||||
fi.open(fname);
|
||||
if (fi.fail()) {
|
||||
utils::Error("cannot open file %s", fname);
|
||||
}
|
||||
ConfigReaderBase::Init();
|
||||
}
|
||||
/*! \brief destructor */
|
||||
~ConfigIterator(void) {
|
||||
fi.close();
|
||||
}
|
||||
|
||||
private:
|
||||
std::ifstream fi;
|
||||
};
|
||||
} // namespace utils
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_UTILS_CONFIG_H_
|
||||
820
src/common/quantile.h
Normal file
820
src/common/quantile.h
Normal file
@@ -0,0 +1,820 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file quantile.h
|
||||
* \brief util to compute quantiles
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_UTILS_QUANTILE_H_
|
||||
#define XGBOOST_UTILS_QUANTILE_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include "./io.h"
|
||||
#include "./utils.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace utils {
|
||||
/*!
|
||||
* \brief experimental wsummary
|
||||
* \tparam DType type of data content
|
||||
* \tparam RType type of rank
|
||||
*/
|
||||
template<typename DType, typename RType>
|
||||
struct WQSummary {
|
||||
/*! \brief an entry in the sketch summary */
|
||||
struct Entry {
|
||||
/*! \brief minimum rank */
|
||||
RType rmin;
|
||||
/*! \brief maximum rank */
|
||||
RType rmax;
|
||||
/*! \brief maximum weight */
|
||||
RType wmin;
|
||||
/*! \brief the value of data */
|
||||
DType value;
|
||||
// constructor
|
||||
Entry(void) {}
|
||||
// constructor
|
||||
Entry(RType rmin, RType rmax, RType wmin, DType value)
|
||||
: rmin(rmin), rmax(rmax), wmin(wmin), value(value) {}
|
||||
/*!
|
||||
* \brief debug function, check Valid
|
||||
* \param eps the tolerate level for violating the relation
|
||||
*/
|
||||
inline void CheckValid(RType eps = 0) const {
|
||||
utils::Assert(rmin >= 0 && rmax >= 0 && wmin >= 0, "nonneg constraint");
|
||||
utils::Assert(rmax- rmin - wmin > -eps, "relation constraint: min/max");
|
||||
}
|
||||
/*! \return rmin estimation for v strictly bigger than value */
|
||||
inline RType rmin_next(void) const {
|
||||
return rmin + wmin;
|
||||
}
|
||||
/*! \return rmax estimation for v strictly smaller than value */
|
||||
inline RType rmax_prev(void) const {
|
||||
return rmax - wmin;
|
||||
}
|
||||
};
|
||||
/*! \brief input data queue before entering the summary */
|
||||
struct Queue {
|
||||
// entry in the queue
|
||||
struct QEntry {
|
||||
// value of the instance
|
||||
DType value;
|
||||
// weight of instance
|
||||
RType weight;
|
||||
// default constructor
|
||||
QEntry(void) {}
|
||||
// constructor
|
||||
QEntry(DType value, RType weight)
|
||||
: value(value), weight(weight) {}
|
||||
// comparator on value
|
||||
inline bool operator<(const QEntry &b) const {
|
||||
return value < b.value;
|
||||
}
|
||||
};
|
||||
// the input queue
|
||||
std::vector<QEntry> queue;
|
||||
// end of the queue
|
||||
size_t qtail;
|
||||
// push data to the queue
|
||||
inline void Push(DType x, RType w) {
|
||||
if (qtail == 0 || queue[qtail - 1].value != x) {
|
||||
queue[qtail++] = QEntry(x, w);
|
||||
} else {
|
||||
queue[qtail - 1].weight += w;
|
||||
}
|
||||
}
|
||||
inline void MakeSummary(WQSummary *out) {
|
||||
std::sort(queue.begin(), queue.begin() + qtail);
|
||||
out->size = 0;
|
||||
// start update sketch
|
||||
RType wsum = 0;
|
||||
// construct data with unique weights
|
||||
for (size_t i = 0; i < qtail;) {
|
||||
size_t j = i + 1;
|
||||
RType w = queue[i].weight;
|
||||
while (j < qtail && queue[j].value == queue[i].value) {
|
||||
w += queue[j].weight; ++j;
|
||||
}
|
||||
out->data[out->size++] = Entry(wsum, wsum + w, w, queue[i].value);
|
||||
wsum += w; i = j;
|
||||
}
|
||||
}
|
||||
};
|
||||
/*! \brief data field */
|
||||
Entry *data;
|
||||
/*! \brief number of elements in the summary */
|
||||
size_t size;
|
||||
// constructor
|
||||
WQSummary(Entry *data, size_t size)
|
||||
: data(data), size(size) {}
|
||||
/*!
|
||||
* \return the maximum error of the Summary
|
||||
*/
|
||||
inline RType MaxError(void) const {
|
||||
RType res = data[0].rmax - data[0].rmin - data[0].wmin;
|
||||
for (size_t i = 1; i < size; ++i) {
|
||||
res = std::max(data[i].rmax_prev() - data[i - 1].rmin_next(), res);
|
||||
res = std::max(data[i].rmax - data[i].rmin - data[i].wmin, res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
/*!
|
||||
* \brief query qvalue, start from istart
|
||||
* \param qvalue the value we query for
|
||||
* \param istart starting position
|
||||
*/
|
||||
inline Entry Query(DType qvalue, size_t &istart) const { // NOLINT(*)
|
||||
while (istart < size && qvalue > data[istart].value) {
|
||||
++istart;
|
||||
}
|
||||
if (istart == size) {
|
||||
RType rmax = data[size - 1].rmax;
|
||||
return Entry(rmax, rmax, 0.0f, qvalue);
|
||||
}
|
||||
if (qvalue == data[istart].value) {
|
||||
return data[istart];
|
||||
} else {
|
||||
if (istart == 0) {
|
||||
return Entry(0.0f, 0.0f, 0.0f, qvalue);
|
||||
} else {
|
||||
return Entry(data[istart - 1].rmin_next(),
|
||||
data[istart].rmax_prev(),
|
||||
0.0f, qvalue);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \return maximum rank in the summary */
|
||||
inline RType MaxRank(void) const {
|
||||
return data[size - 1].rmax;
|
||||
}
|
||||
/*!
|
||||
* \brief copy content from src
|
||||
* \param src source sketch
|
||||
*/
|
||||
inline void CopyFrom(const WQSummary &src) {
|
||||
size = src.size;
|
||||
std::memcpy(data, src.data, sizeof(Entry) * size);
|
||||
}
|
||||
/*!
|
||||
* \brief debug function, validate whether the summary
|
||||
* run consistency check to check if it is a valid summary
|
||||
* \param eps the tolerate error level, used when RType is floating point and
|
||||
* some inconsistency could occur due to rounding error
|
||||
*/
|
||||
inline void CheckValid(RType eps) const {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
data[i].CheckValid(eps);
|
||||
if (i != 0) {
|
||||
utils::Assert(data[i].rmin >= data[i - 1].rmin + data[i - 1].wmin, "rmin range constraint");
|
||||
utils::Assert(data[i].rmax >= data[i - 1].rmax + data[i].wmin, "rmax range constraint");
|
||||
}
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief set current summary to be pruned summary of src
|
||||
* assume data field is already allocated to be at least maxsize
|
||||
* \param src source summary
|
||||
* \param maxsize size we can afford in the pruned sketch
|
||||
*/
|
||||
|
||||
inline void SetPrune(const WQSummary &src, size_t maxsize) {
|
||||
if (src.size <= maxsize) {
|
||||
this->CopyFrom(src); return;
|
||||
}
|
||||
const RType begin = src.data[0].rmax;
|
||||
const RType range = src.data[src.size - 1].rmin - src.data[0].rmax;
|
||||
const size_t n = maxsize - 1;
|
||||
data[0] = src.data[0];
|
||||
this->size = 1;
|
||||
// lastidx is used to avoid duplicated records
|
||||
size_t i = 1, lastidx = 0;
|
||||
for (size_t k = 1; k < n; ++k) {
|
||||
RType dx2 = 2 * ((k * range) / n + begin);
|
||||
// find first i such that d < (rmax[i+1] + rmin[i+1]) / 2
|
||||
while (i < src.size - 1
|
||||
&& dx2 >= src.data[i + 1].rmax + src.data[i + 1].rmin) ++i;
|
||||
utils::Assert(i != src.size - 1, "this cannot happen");
|
||||
if (dx2 < src.data[i].rmin_next() + src.data[i + 1].rmax_prev()) {
|
||||
if (i != lastidx) {
|
||||
data[size++] = src.data[i]; lastidx = i;
|
||||
}
|
||||
} else {
|
||||
if (i + 1 != lastidx) {
|
||||
data[size++] = src.data[i + 1]; lastidx = i + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lastidx != src.size - 1) {
|
||||
data[size++] = src.data[src.size - 1];
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief set current summary to be merged summary of sa and sb
|
||||
* \param sa first input summary to be merged
|
||||
* \param sb second input summary to be merged
|
||||
*/
|
||||
inline void SetCombine(const WQSummary &sa,
|
||||
const WQSummary &sb) {
|
||||
if (sa.size == 0) {
|
||||
this->CopyFrom(sb); return;
|
||||
}
|
||||
if (sb.size == 0) {
|
||||
this->CopyFrom(sa); return;
|
||||
}
|
||||
utils::Assert(sa.size > 0 && sb.size > 0, "invalid input for merge");
|
||||
const Entry *a = sa.data, *a_end = sa.data + sa.size;
|
||||
const Entry *b = sb.data, *b_end = sb.data + sb.size;
|
||||
// extended rmin value
|
||||
RType aprev_rmin = 0, bprev_rmin = 0;
|
||||
Entry *dst = this->data;
|
||||
while (a != a_end && b != b_end) {
|
||||
// duplicated value entry
|
||||
if (a->value == b->value) {
|
||||
*dst = Entry(a->rmin + b->rmin,
|
||||
a->rmax + b->rmax,
|
||||
a->wmin + b->wmin, a->value);
|
||||
aprev_rmin = a->rmin_next();
|
||||
bprev_rmin = b->rmin_next();
|
||||
++dst; ++a; ++b;
|
||||
} else if (a->value < b->value) {
|
||||
*dst = Entry(a->rmin + bprev_rmin,
|
||||
a->rmax + b->rmax_prev(),
|
||||
a->wmin, a->value);
|
||||
aprev_rmin = a->rmin_next();
|
||||
++dst; ++a;
|
||||
} else {
|
||||
*dst = Entry(b->rmin + aprev_rmin,
|
||||
b->rmax + a->rmax_prev(),
|
||||
b->wmin, b->value);
|
||||
bprev_rmin = b->rmin_next();
|
||||
++dst; ++b;
|
||||
}
|
||||
}
|
||||
if (a != a_end) {
|
||||
RType brmax = (b_end - 1)->rmax;
|
||||
do {
|
||||
*dst = Entry(a->rmin + bprev_rmin, a->rmax + brmax, a->wmin, a->value);
|
||||
++dst; ++a;
|
||||
} while (a != a_end);
|
||||
}
|
||||
if (b != b_end) {
|
||||
RType armax = (a_end - 1)->rmax;
|
||||
do {
|
||||
*dst = Entry(b->rmin + aprev_rmin, b->rmax + armax, b->wmin, b->value);
|
||||
++dst; ++b;
|
||||
} while (b != b_end);
|
||||
}
|
||||
this->size = dst - data;
|
||||
const RType tol = 10;
|
||||
RType err_mingap, err_maxgap, err_wgap;
|
||||
this->FixError(&err_mingap, &err_maxgap, &err_wgap);
|
||||
if (err_mingap > tol || err_maxgap > tol || err_wgap > tol) {
|
||||
utils::Printf("INFO: mingap=%g, maxgap=%g, wgap=%g\n",
|
||||
err_mingap, err_maxgap, err_wgap);
|
||||
}
|
||||
|
||||
utils::Assert(size <= sa.size + sb.size, "bug in combine");
|
||||
}
|
||||
// helper function to print the current content of sketch
|
||||
inline void Print() const {
|
||||
for (size_t i = 0; i < this->size; ++i) {
|
||||
utils::Printf("[%lu] rmin=%g, rmax=%g, wmin=%g, v=%g\n",
|
||||
i, data[i].rmin, data[i].rmax,
|
||||
data[i].wmin, data[i].value);
|
||||
}
|
||||
}
|
||||
// try to fix rounding error
|
||||
// and re-establish invariance
|
||||
inline void FixError(RType *err_mingap,
|
||||
RType *err_maxgap,
|
||||
RType *err_wgap) const {
|
||||
*err_mingap = 0;
|
||||
*err_maxgap = 0;
|
||||
*err_wgap = 0;
|
||||
RType prev_rmin = 0, prev_rmax = 0;
|
||||
for (size_t i = 0; i < this->size; ++i) {
|
||||
if (data[i].rmin < prev_rmin) {
|
||||
data[i].rmin = prev_rmin;
|
||||
*err_mingap = std::max(*err_mingap, prev_rmin - data[i].rmin);
|
||||
} else {
|
||||
prev_rmin = data[i].rmin;
|
||||
}
|
||||
if (data[i].rmax < prev_rmax) {
|
||||
data[i].rmax = prev_rmax;
|
||||
*err_maxgap = std::max(*err_maxgap, prev_rmax - data[i].rmax);
|
||||
}
|
||||
RType rmin_next = data[i].rmin_next();
|
||||
if (data[i].rmax < rmin_next) {
|
||||
data[i].rmax = rmin_next;
|
||||
*err_wgap = std::max(*err_wgap, data[i].rmax - rmin_next);
|
||||
}
|
||||
prev_rmax = data[i].rmax;
|
||||
}
|
||||
}
|
||||
// check consistency of the summary
|
||||
inline bool Check(const char *msg) const {
|
||||
const float tol = 10.0f;
|
||||
for (size_t i = 0; i < this->size; ++i) {
|
||||
if (data[i].rmin + data[i].wmin > data[i].rmax + tol ||
|
||||
data[i].rmin < -1e-6f || data[i].rmax < -1e-6f) {
|
||||
utils::Printf("----%s: Check not Pass------\n", msg);
|
||||
this->Print();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/*! \brief try to do efficient pruning */
|
||||
template<typename DType, typename RType>
|
||||
struct WXQSummary : public WQSummary<DType, RType> {
|
||||
// redefine entry type
|
||||
typedef typename WQSummary<DType, RType>::Entry Entry;
|
||||
// constructor
|
||||
WXQSummary(Entry *data, size_t size)
|
||||
: WQSummary<DType, RType>(data, size) {}
|
||||
// check if the block is large chunk
|
||||
inline static bool CheckLarge(const Entry &e, RType chunk) {
|
||||
return e.rmin_next() > e.rmax_prev() + chunk;
|
||||
}
|
||||
// set prune
|
||||
inline void SetPrune(const WQSummary<DType, RType> &src, size_t maxsize) {
|
||||
if (src.size <= maxsize) {
|
||||
this->CopyFrom(src); return;
|
||||
}
|
||||
RType begin = src.data[0].rmax;
|
||||
size_t n = maxsize - 1, nbig = 0;
|
||||
RType range = src.data[src.size - 1].rmin - begin;
|
||||
// prune off zero weights
|
||||
if (range == 0.0f) {
|
||||
// special case, contain only two effective data pts
|
||||
this->data[0] = src.data[0];
|
||||
this->data[1] = src.data[src.size - 1];
|
||||
this->size = 2;
|
||||
return;
|
||||
} else {
|
||||
range = std::max(range, static_cast<RType>(1e-3f));
|
||||
}
|
||||
const RType chunk = 2 * range / n;
|
||||
// minimized range
|
||||
RType mrange = 0;
|
||||
{
|
||||
// first scan, grab all the big chunk
|
||||
// moving block index
|
||||
size_t bid = 0;
|
||||
for (size_t i = 1; i < src.size; ++i) {
|
||||
if (CheckLarge(src.data[i], chunk)) {
|
||||
if (bid != i - 1) {
|
||||
mrange += src.data[i].rmax_prev() - src.data[bid].rmin_next();
|
||||
}
|
||||
bid = i; ++nbig;
|
||||
}
|
||||
}
|
||||
if (bid != src.size - 2) {
|
||||
mrange += src.data[src.size-1].rmax_prev() - src.data[bid].rmin_next();
|
||||
}
|
||||
}
|
||||
if (nbig >= n - 1) {
|
||||
// see what was the case
|
||||
utils::Printf("LOG: check quantile stats, nbig=%lu, n=%lu\n", nbig, n);
|
||||
utils::Printf("LOG: srcsize=%lu, maxsize=%lu, range=%g, chunk=%g\n",
|
||||
src.size, maxsize, static_cast<double>(range),
|
||||
static_cast<double>(chunk));
|
||||
src.Print();
|
||||
utils::Assert(nbig < n - 1, "quantile: too many large chunk");
|
||||
}
|
||||
this->data[0] = src.data[0];
|
||||
this->size = 1;
|
||||
// use smaller size
|
||||
n = n - nbig;
|
||||
// find the rest of point
|
||||
size_t bid = 0, k = 1, lastidx = 0;
|
||||
for (size_t end = 1; end < src.size; ++end) {
|
||||
if (end == src.size - 1 || CheckLarge(src.data[end], chunk)) {
|
||||
if (bid != end - 1) {
|
||||
size_t i = bid;
|
||||
RType maxdx2 = src.data[end].rmax_prev() * 2;
|
||||
for (; k < n; ++k) {
|
||||
RType dx2 = 2 * ((k * mrange) / n + begin);
|
||||
if (dx2 >= maxdx2) break;
|
||||
while (i < end &&
|
||||
dx2 >= src.data[i + 1].rmax + src.data[i + 1].rmin) ++i;
|
||||
if (i == end) break;
|
||||
if (dx2 < src.data[i].rmin_next() + src.data[i + 1].rmax_prev()) {
|
||||
if (i != lastidx) {
|
||||
this->data[this->size++] = src.data[i]; lastidx = i;
|
||||
}
|
||||
} else {
|
||||
if (i + 1 != lastidx) {
|
||||
this->data[this->size++] = src.data[i + 1]; lastidx = i + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lastidx != end) {
|
||||
this->data[this->size++] = src.data[end];
|
||||
lastidx = end;
|
||||
}
|
||||
bid = end;
|
||||
// shift base by the gap
|
||||
begin += src.data[bid].rmin_next() - src.data[bid].rmax_prev();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
/*!
|
||||
* \brief traditional GK summary
|
||||
*/
|
||||
template<typename DType, typename RType>
|
||||
struct GKSummary {
|
||||
/*! \brief an entry in the sketch summary */
|
||||
struct Entry {
|
||||
/*! \brief minimum rank */
|
||||
RType rmin;
|
||||
/*! \brief maximum rank */
|
||||
RType rmax;
|
||||
/*! \brief the value of data */
|
||||
DType value;
|
||||
// constructor
|
||||
Entry(void) {}
|
||||
// constructor
|
||||
Entry(RType rmin, RType rmax, DType value)
|
||||
: rmin(rmin), rmax(rmax), value(value) {}
|
||||
};
|
||||
/*! \brief input data queue before entering the summary */
|
||||
struct Queue {
|
||||
// the input queue
|
||||
std::vector<DType> queue;
|
||||
// end of the queue
|
||||
size_t qtail;
|
||||
// push data to the queue
|
||||
inline void Push(DType x, RType w) {
|
||||
queue[qtail++] = x;
|
||||
}
|
||||
inline void MakeSummary(GKSummary *out) {
|
||||
std::sort(queue.begin(), queue.begin() + qtail);
|
||||
out->size = qtail;
|
||||
for (size_t i = 0; i < qtail; ++i) {
|
||||
out->data[i] = Entry(i + 1, i + 1, queue[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
/*! \brief data field */
|
||||
Entry *data;
|
||||
/*! \brief number of elements in the summary */
|
||||
size_t size;
|
||||
GKSummary(Entry *data, size_t size)
|
||||
: data(data), size(size) {}
|
||||
/*! \brief the maximum error of the summary */
|
||||
inline RType MaxError(void) const {
|
||||
RType res = 0;
|
||||
for (size_t i = 1; i < size; ++i) {
|
||||
res = std::max(data[i].rmax - data[i-1].rmin, res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
/*! \return maximum rank in the summary */
|
||||
inline RType MaxRank(void) const {
|
||||
return data[size - 1].rmax;
|
||||
}
|
||||
/*!
|
||||
* \brief copy content from src
|
||||
* \param src source sketch
|
||||
*/
|
||||
inline void CopyFrom(const GKSummary &src) {
|
||||
size = src.size;
|
||||
std::memcpy(data, src.data, sizeof(Entry) * size);
|
||||
}
|
||||
inline void CheckValid(RType eps) const {
|
||||
// assume always valid
|
||||
}
|
||||
/*! \brief used for debug purpose, print the summary */
|
||||
inline void Print(void) const {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
std::cout << "x=" << data[i].value << "\t"
|
||||
<< "[" << data[i].rmin << "," << data[i].rmax << "]"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief set current summary to be pruned summary of src
|
||||
* assume data field is already allocated to be at least maxsize
|
||||
* \param src source summary
|
||||
* \param maxsize size we can afford in the pruned sketch
|
||||
*/
|
||||
inline void SetPrune(const GKSummary &src, size_t maxsize) {
|
||||
if (src.size <= maxsize) {
|
||||
this->CopyFrom(src); return;
|
||||
}
|
||||
const RType max_rank = src.MaxRank();
|
||||
this->size = maxsize;
|
||||
data[0] = src.data[0];
|
||||
size_t n = maxsize - 1;
|
||||
RType top = 1;
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
RType k = (i * max_rank) / n;
|
||||
while (k > src.data[top + 1].rmax) ++top;
|
||||
// assert src.data[top].rmin <= k
|
||||
// because k > src.data[top].rmax >= src.data[top].rmin
|
||||
if ((k - src.data[top].rmin) < (src.data[top+1].rmax - k)) {
|
||||
data[i] = src.data[top];
|
||||
} else {
|
||||
data[i] = src.data[top + 1];
|
||||
}
|
||||
}
|
||||
data[n] = src.data[src.size - 1];
|
||||
}
|
||||
inline void SetCombine(const GKSummary &sa,
|
||||
const GKSummary &sb) {
|
||||
if (sa.size == 0) {
|
||||
this->CopyFrom(sb); return;
|
||||
}
|
||||
if (sb.size == 0) {
|
||||
this->CopyFrom(sa); return;
|
||||
}
|
||||
utils::Assert(sa.size > 0 && sb.size > 0, "invalid input for merge");
|
||||
const Entry *a = sa.data, *a_end = sa.data + sa.size;
|
||||
const Entry *b = sb.data, *b_end = sb.data + sb.size;
|
||||
this->size = sa.size + sb.size;
|
||||
RType aprev_rmin = 0, bprev_rmin = 0;
|
||||
Entry *dst = this->data;
|
||||
while (a != a_end && b != b_end) {
|
||||
if (a->value < b->value) {
|
||||
*dst = Entry(bprev_rmin + a->rmin,
|
||||
a->rmax + b->rmax - 1, a->value);
|
||||
aprev_rmin = a->rmin;
|
||||
++dst; ++a;
|
||||
} else {
|
||||
*dst = Entry(aprev_rmin + b->rmin,
|
||||
b->rmax + a->rmax - 1, b->value);
|
||||
bprev_rmin = b->rmin;
|
||||
++dst; ++b;
|
||||
}
|
||||
}
|
||||
if (a != a_end) {
|
||||
RType bprev_rmax = (b_end - 1)->rmax;
|
||||
do {
|
||||
*dst = Entry(bprev_rmin + a->rmin, bprev_rmax + a->rmax, a->value);
|
||||
++dst; ++a;
|
||||
} while (a != a_end);
|
||||
}
|
||||
if (b != b_end) {
|
||||
RType aprev_rmax = (a_end - 1)->rmax;
|
||||
do {
|
||||
*dst = Entry(aprev_rmin + b->rmin, aprev_rmax + b->rmax, b->value);
|
||||
++dst; ++b;
|
||||
} while (b != b_end);
|
||||
}
|
||||
utils::Assert(dst == data + size, "bug in combine");
|
||||
}
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief template for all quantile sketch algorithm
|
||||
* that uses merge/prune scheme
|
||||
* \tparam DType type of data content
|
||||
* \tparam RType type of rank
|
||||
* \tparam TSummary actual summary data structure it uses
|
||||
*/
|
||||
template<typename DType, typename RType, class TSummary>
|
||||
class QuantileSketchTemplate {
|
||||
public:
|
||||
/*! \brief type of summary type */
|
||||
typedef TSummary Summary;
|
||||
/*! \brief the entry type */
|
||||
typedef typename Summary::Entry Entry;
|
||||
/*! \brief same as summary, but use STL to backup the space */
|
||||
struct SummaryContainer : public Summary {
|
||||
std::vector<Entry> space;
|
||||
SummaryContainer(const SummaryContainer &src) : Summary(NULL, src.size) {
|
||||
this->space = src.space;
|
||||
this->data = BeginPtr(this->space);
|
||||
}
|
||||
SummaryContainer(void) : Summary(NULL, 0) {
|
||||
}
|
||||
/*! \brief reserve space for summary */
|
||||
inline void Reserve(size_t size) {
|
||||
if (size > space.size()) {
|
||||
space.resize(size);
|
||||
this->data = BeginPtr(space);
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief set the space to be merge of all Summary arrays
|
||||
* \param begin beginning position in the summary array
|
||||
* \param end ending position in the Summary array
|
||||
*/
|
||||
inline void SetMerge(const Summary *begin,
|
||||
const Summary *end) {
|
||||
utils::Assert(begin < end, "can not set combine to empty instance");
|
||||
size_t len = end - begin;
|
||||
if (len == 1) {
|
||||
this->Reserve(begin[0].size);
|
||||
this->CopyFrom(begin[0]);
|
||||
} else if (len == 2) {
|
||||
this->Reserve(begin[0].size + begin[1].size);
|
||||
this->SetMerge(begin[0], begin[1]);
|
||||
} else {
|
||||
// recursive merge
|
||||
SummaryContainer lhs, rhs;
|
||||
lhs.SetCombine(begin, begin + len / 2);
|
||||
rhs.SetCombine(begin + len / 2, end);
|
||||
this->Reserve(lhs.size + rhs.size);
|
||||
this->SetCombine(lhs, rhs);
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief do elementwise combination of summary array
|
||||
* this[i] = combine(this[i], src[i]) for each i
|
||||
* \param src the source summary
|
||||
* \param max_nbyte, maximum number of byte allowed in here
|
||||
*/
|
||||
inline void Reduce(const Summary &src, size_t max_nbyte) {
|
||||
this->Reserve((max_nbyte - sizeof(this->size)) / sizeof(Entry));
|
||||
SummaryContainer temp;
|
||||
temp.Reserve(this->size + src.size);
|
||||
temp.SetCombine(*this, src);
|
||||
this->SetPrune(temp, space.size());
|
||||
}
|
||||
/*! \brief return the number of bytes this data structure cost in serialization */
|
||||
inline static size_t CalcMemCost(size_t nentry) {
|
||||
return sizeof(size_t) + sizeof(Entry) * nentry;
|
||||
}
|
||||
/*! \brief save the data structure into stream */
|
||||
template<typename TStream>
|
||||
inline void Save(TStream &fo) const { // NOLINT(*)
|
||||
fo.Write(&(this->size), sizeof(this->size));
|
||||
if (this->size != 0) {
|
||||
fo.Write(this->data, this->size * sizeof(Entry));
|
||||
}
|
||||
}
|
||||
/*! \brief load data structure from input stream */
|
||||
template<typename TStream>
|
||||
inline void Load(TStream &fi) { // NOLINT(*)
|
||||
utils::Check(fi.Read(&this->size, sizeof(this->size)) != 0, "invalid SummaryArray 1");
|
||||
this->Reserve(this->size);
|
||||
if (this->size != 0) {
|
||||
utils::Check(fi.Read(this->data, this->size * sizeof(Entry)) != 0,
|
||||
"invalid SummaryArray 2");
|
||||
}
|
||||
}
|
||||
};
|
||||
/*!
|
||||
* \brief initialize the quantile sketch, given the performance specification
|
||||
* \param maxn maximum number of data points can be feed into sketch
|
||||
* \param eps accuracy level of summary
|
||||
*/
|
||||
inline void Init(size_t maxn, double eps) {
|
||||
nlevel = 1;
|
||||
while (true) {
|
||||
limit_size = static_cast<size_t>(ceil(nlevel / eps)) + 1;
|
||||
size_t n = (1UL << nlevel);
|
||||
if (n * limit_size >= maxn) break;
|
||||
++nlevel;
|
||||
}
|
||||
// check invariant
|
||||
size_t n = (1UL << nlevel);
|
||||
utils::Assert(n * limit_size >= maxn, "invalid init parameter");
|
||||
utils::Assert(nlevel <= limit_size * eps, "invalid init parameter");
|
||||
// lazy reserve the space, if there is only one value, no need to allocate space
|
||||
inqueue.queue.resize(1);
|
||||
inqueue.qtail = 0;
|
||||
data.clear();
|
||||
level.clear();
|
||||
}
|
||||
/*!
|
||||
* \brief add an element to a sketch
|
||||
* \param x the element added to the sketch
|
||||
*/
|
||||
inline void Push(DType x, RType w = 1) {
|
||||
if (w == static_cast<RType>(0)) return;
|
||||
if (inqueue.qtail == inqueue.queue.size()) {
|
||||
// jump from lazy one value to limit_size * 2
|
||||
if (inqueue.queue.size() == 1) {
|
||||
inqueue.queue.resize(limit_size * 2);
|
||||
} else {
|
||||
temp.Reserve(limit_size * 2);
|
||||
inqueue.MakeSummary(&temp);
|
||||
// cleanup queue
|
||||
inqueue.qtail = 0;
|
||||
this->PushTemp();
|
||||
}
|
||||
}
|
||||
inqueue.Push(x, w);
|
||||
}
|
||||
/*! \brief push up temp */
|
||||
inline void PushTemp(void) {
|
||||
temp.Reserve(limit_size * 2);
|
||||
for (size_t l = 1; true; ++l) {
|
||||
this->InitLevel(l + 1);
|
||||
// check if level l is empty
|
||||
if (level[l].size == 0) {
|
||||
level[l].SetPrune(temp, limit_size);
|
||||
break;
|
||||
} else {
|
||||
// level 0 is actually temp space
|
||||
level[0].SetPrune(temp, limit_size);
|
||||
temp.SetCombine(level[0], level[l]);
|
||||
if (temp.size > limit_size) {
|
||||
// try next level
|
||||
level[l].size = 0;
|
||||
} else {
|
||||
// if merged record is still smaller, no need to send to next level
|
||||
level[l].CopyFrom(temp); break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \brief get the summary after finalize */
|
||||
inline void GetSummary(SummaryContainer *out) {
|
||||
if (level.size() != 0) {
|
||||
out->Reserve(limit_size * 2);
|
||||
} else {
|
||||
out->Reserve(inqueue.queue.size());
|
||||
}
|
||||
inqueue.MakeSummary(out);
|
||||
if (level.size() != 0) {
|
||||
level[0].SetPrune(*out, limit_size);
|
||||
for (size_t l = 1; l < level.size(); ++l) {
|
||||
if (level[l].size == 0) continue;
|
||||
if (level[0].size == 0) {
|
||||
level[0].CopyFrom(level[l]);
|
||||
} else {
|
||||
out->SetCombine(level[0], level[l]);
|
||||
level[0].SetPrune(*out, limit_size);
|
||||
}
|
||||
}
|
||||
out->CopyFrom(level[0]);
|
||||
} else {
|
||||
if (out->size > limit_size) {
|
||||
temp.Reserve(limit_size);
|
||||
temp.SetPrune(*out, limit_size);
|
||||
out->CopyFrom(temp);
|
||||
}
|
||||
}
|
||||
}
|
||||
// used for debug, check if the sketch is valid
|
||||
inline void CheckValid(RType eps) const {
|
||||
for (size_t l = 1; l < level.size(); ++l) {
|
||||
level[l].CheckValid(eps);
|
||||
}
|
||||
}
|
||||
// initialize level space to at least nlevel
|
||||
inline void InitLevel(size_t nlevel) {
|
||||
if (level.size() >= nlevel) return;
|
||||
data.resize(limit_size * nlevel);
|
||||
level.resize(nlevel, Summary(NULL, 0));
|
||||
for (size_t l = 0; l < level.size(); ++l) {
|
||||
level[l].data = BeginPtr(data) + l * limit_size;
|
||||
}
|
||||
}
|
||||
// input data queue
|
||||
typename Summary::Queue inqueue;
|
||||
// number of levels
|
||||
size_t nlevel;
|
||||
// size of summary in each level
|
||||
size_t limit_size;
|
||||
// the level of each summaries
|
||||
std::vector<Summary> level;
|
||||
// content of the summary
|
||||
std::vector<Entry> data;
|
||||
// temporal summary, used for temp-merge
|
||||
SummaryContainer temp;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief Quantile sketch use WQSummary
|
||||
* \tparam DType type of data content
|
||||
* \tparam RType type of rank
|
||||
*/
|
||||
template<typename DType, typename RType = unsigned>
|
||||
class WQuantileSketch :
|
||||
public QuantileSketchTemplate<DType, RType, WQSummary<DType, RType> >{
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief Quantile sketch use WXQSummary
|
||||
* \tparam DType type of data content
|
||||
* \tparam RType type of rank
|
||||
*/
|
||||
template<typename DType, typename RType = unsigned>
|
||||
class WXQuantileSketch :
|
||||
public QuantileSketchTemplate<DType, RType, WXQSummary<DType, RType> >{
|
||||
};
|
||||
/*!
|
||||
* \brief Quantile sketch use WQSummary
|
||||
* \tparam DType type of data content
|
||||
* \tparam RType type of rank
|
||||
*/
|
||||
template<typename DType, typename RType = unsigned>
|
||||
class GKQuantileSketch :
|
||||
public QuantileSketchTemplate<DType, RType, GKSummary<DType, RType> >{
|
||||
};
|
||||
|
||||
} // namespace utils
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_UTILS_QUANTILE_H_
|
||||
427
src/tree/updater_basemaker-inl.h
Normal file
427
src/tree/updater_basemaker-inl.h
Normal file
@@ -0,0 +1,427 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file updater_basemaker-inl.hpp
|
||||
* \brief implement a common tree constructor
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_HPP_
|
||||
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_HPP_
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <limits>
|
||||
#include "../sync/sync.h"
|
||||
#include "../utils/random.h"
|
||||
#include "../utils/quantile.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
/*!
|
||||
* \brief base tree maker class that defines common operation
|
||||
* needed in tree making
|
||||
*/
|
||||
class BaseMaker: public IUpdater {
|
||||
public:
|
||||
// destructor
|
||||
virtual ~BaseMaker(void) {}
|
||||
// set training parameter
|
||||
virtual void SetParam(const char *name, const char *val) {
|
||||
param.SetParam(name, val);
|
||||
}
|
||||
|
||||
protected:
|
||||
// helper to collect and query feature meta information
|
||||
struct FMetaHelper {
|
||||
public:
|
||||
/*! \brief find type of each feature, use column format */
|
||||
inline void InitByCol(IFMatrix *p_fmat,
|
||||
const RegTree &tree) {
|
||||
fminmax.resize(tree.param.num_feature * 2);
|
||||
std::fill(fminmax.begin(), fminmax.end(),
|
||||
-std::numeric_limits<bst_float>::max());
|
||||
// start accumulating statistics
|
||||
utils::IIterator<ColBatch> *iter = p_fmat->ColIterator();
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const ColBatch &batch = iter->Value();
|
||||
for (bst_uint i = 0; i < batch.size; ++i) {
|
||||
const bst_uint fid = batch.col_index[i];
|
||||
const ColBatch::Inst &c = batch[i];
|
||||
if (c.length != 0) {
|
||||
fminmax[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax[fid * 2 + 0]);
|
||||
fminmax[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax[fid * 2 + 1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
rabit::Allreduce<rabit::op::Max>(BeginPtr(fminmax), fminmax.size());
|
||||
}
|
||||
// get feature type, 0:empty 1:binary 2:real
|
||||
inline int Type(bst_uint fid) const {
|
||||
utils::Assert(fid * 2 + 1 < fminmax.size(),
|
||||
"FeatHelper fid exceed query bound ");
|
||||
bst_float a = fminmax[fid * 2];
|
||||
bst_float b = fminmax[fid * 2 + 1];
|
||||
if (a == -std::numeric_limits<bst_float>::max()) return 0;
|
||||
if (-a == b) {
|
||||
return 1;
|
||||
} else {
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
inline bst_float MaxValue(bst_uint fid) const {
|
||||
return fminmax[fid *2 + 1];
|
||||
}
|
||||
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
|
||||
std::vector<bst_uint> &findex = *p_findex;
|
||||
findex.clear();
|
||||
for (size_t i = 0; i < fminmax.size(); i += 2) {
|
||||
const bst_uint fid = static_cast<bst_uint>(i / 2);
|
||||
if (this->Type(fid) != 0) findex.push_back(fid);
|
||||
}
|
||||
unsigned n = static_cast<unsigned>(p * findex.size());
|
||||
random::Shuffle(findex);
|
||||
findex.resize(n);
|
||||
// sync the findex if it is subsample
|
||||
std::string s_cache;
|
||||
utils::MemoryBufferStream fc(&s_cache);
|
||||
utils::IStream &fs = fc;
|
||||
if (rabit::GetRank() == 0) {
|
||||
fs.Write(findex);
|
||||
}
|
||||
rabit::Broadcast(&s_cache, 0);
|
||||
fs.Read(&findex);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<bst_float> fminmax;
|
||||
};
|
||||
// ------static helper functions ------
|
||||
// helper function to get to next level of the tree
|
||||
/*! \brief this is helper function for row based data*/
|
||||
inline static int NextLevel(const RowBatch::Inst &inst, const RegTree &tree, int nid) {
|
||||
const RegTree::Node &n = tree[nid];
|
||||
bst_uint findex = n.split_index();
|
||||
for (unsigned i = 0; i < inst.length; ++i) {
|
||||
if (findex == inst[i].index) {
|
||||
if (inst[i].fvalue < n.split_cond()) {
|
||||
return n.cleft();
|
||||
} else {
|
||||
return n.cright();
|
||||
}
|
||||
}
|
||||
}
|
||||
return n.cdefault();
|
||||
}
|
||||
/*! \brief get number of omp thread in current context */
|
||||
inline static int get_nthread(void) {
|
||||
int nthread;
|
||||
#pragma omp parallel
|
||||
{
|
||||
nthread = omp_get_num_threads();
|
||||
}
|
||||
return nthread;
|
||||
}
|
||||
// ------class member helpers---------
|
||||
/*! \brief initialize temp data structure */
|
||||
inline void InitData(const std::vector<bst_gpair> &gpair,
|
||||
const IFMatrix &fmat,
|
||||
const std::vector<unsigned> &root_index,
|
||||
const RegTree &tree) {
|
||||
utils::Assert(tree.param.num_nodes == tree.param.num_roots,
|
||||
"TreeMaker: can only grow new tree");
|
||||
{
|
||||
// setup position
|
||||
position.resize(gpair.size());
|
||||
if (root_index.size() == 0) {
|
||||
std::fill(position.begin(), position.end(), 0);
|
||||
} else {
|
||||
for (size_t i = 0; i < position.size(); ++i) {
|
||||
position[i] = root_index[i];
|
||||
utils::Assert(root_index[i] < (unsigned)tree.param.num_roots,
|
||||
"root index exceed setting");
|
||||
}
|
||||
}
|
||||
// mark delete for the deleted datas
|
||||
for (size_t i = 0; i < position.size(); ++i) {
|
||||
if (gpair[i].hess < 0.0f) position[i] = ~position[i];
|
||||
}
|
||||
// mark subsample
|
||||
if (param.subsample < 1.0f) {
|
||||
for (size_t i = 0; i < position.size(); ++i) {
|
||||
if (gpair[i].hess < 0.0f) continue;
|
||||
if (random::SampleBinary(param.subsample) == 0) position[i] = ~position[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
// expand query
|
||||
qexpand.reserve(256); qexpand.clear();
|
||||
for (int i = 0; i < tree.param.num_roots; ++i) {
|
||||
qexpand.push_back(i);
|
||||
}
|
||||
this->UpdateNode2WorkIndex(tree);
|
||||
}
|
||||
}
|
||||
/*! \brief update queue expand add in new leaves */
|
||||
inline void UpdateQueueExpand(const RegTree &tree) {
|
||||
std::vector<int> newnodes;
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
const int nid = qexpand[i];
|
||||
if (!tree[nid].is_leaf()) {
|
||||
newnodes.push_back(tree[nid].cleft());
|
||||
newnodes.push_back(tree[nid].cright());
|
||||
}
|
||||
}
|
||||
// use new nodes for qexpand
|
||||
qexpand = newnodes;
|
||||
this->UpdateNode2WorkIndex(tree);
|
||||
}
|
||||
// return decoded position
|
||||
inline int DecodePosition(bst_uint ridx) const {
|
||||
const int pid = position[ridx];
|
||||
return pid < 0 ? ~pid : pid;
|
||||
}
|
||||
// encode the encoded position value for ridx
|
||||
inline void SetEncodePosition(bst_uint ridx, int nid) {
|
||||
if (position[ridx] < 0) {
|
||||
position[ridx] = ~nid;
|
||||
} else {
|
||||
position[ridx] = nid;
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief this is helper function uses column based data structure,
|
||||
* reset the positions to the lastest one
|
||||
* \param nodes the set of nodes that contains the split to be used
|
||||
* \param p_fmat feature matrix needed for tree construction
|
||||
* \param tree the regression tree structure
|
||||
*/
|
||||
inline void ResetPositionCol(const std::vector<int> &nodes,
|
||||
IFMatrix *p_fmat, const RegTree &tree) {
|
||||
// set the positions in the nondefault
|
||||
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
|
||||
// set rest of instances to default position
|
||||
const std::vector<bst_uint> &rowset = p_fmat->buffered_rowset();
|
||||
// set default direct nodes to default
|
||||
// for leaf nodes that are not fresh, mark then to ~nid,
|
||||
// so that they are ignored in future statistics collection
|
||||
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
|
||||
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < ndata; ++i) {
|
||||
const bst_uint ridx = rowset[i];
|
||||
const int nid = this->DecodePosition(ridx);
|
||||
if (tree[nid].is_leaf()) {
|
||||
// mark finish when it is not a fresh leaf
|
||||
if (tree[nid].cright() == -1) {
|
||||
position[ridx] = ~nid;
|
||||
}
|
||||
} else {
|
||||
// push to default branch
|
||||
if (tree[nid].default_left()) {
|
||||
this->SetEncodePosition(ridx, tree[nid].cleft());
|
||||
} else {
|
||||
this->SetEncodePosition(ridx, tree[nid].cright());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief this is helper function uses column based data structure,
|
||||
* update all positions into nondefault branch, if any, ignore the default branch
|
||||
* \param nodes the set of nodes that contains the split to be used
|
||||
* \param p_fmat feature matrix needed for tree construction
|
||||
* \param tree the regression tree structure
|
||||
*/
|
||||
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
|
||||
IFMatrix *p_fmat, const RegTree &tree) {
|
||||
// step 1, classify the non-default data into right places
|
||||
std::vector<unsigned> fsplits;
|
||||
for (size_t i = 0; i < nodes.size(); ++i) {
|
||||
const int nid = nodes[i];
|
||||
if (!tree[nid].is_leaf()) {
|
||||
fsplits.push_back(tree[nid].split_index());
|
||||
}
|
||||
}
|
||||
std::sort(fsplits.begin(), fsplits.end());
|
||||
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
|
||||
|
||||
utils::IIterator<ColBatch> *iter = p_fmat->ColIterator(fsplits);
|
||||
while (iter->Next()) {
|
||||
const ColBatch &batch = iter->Value();
|
||||
for (size_t i = 0; i < batch.size; ++i) {
|
||||
ColBatch::Inst col = batch[i];
|
||||
const bst_uint fid = batch.col_index[i];
|
||||
const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint j = 0; j < ndata; ++j) {
|
||||
const bst_uint ridx = col[j].index;
|
||||
const float fvalue = col[j].fvalue;
|
||||
const int nid = this->DecodePosition(ridx);
|
||||
// go back to parent, correct those who are not default
|
||||
if (!tree[nid].is_leaf() && tree[nid].split_index() == fid) {
|
||||
if (fvalue < tree[nid].split_cond()) {
|
||||
this->SetEncodePosition(ridx, tree[nid].cleft());
|
||||
} else {
|
||||
this->SetEncodePosition(ridx, tree[nid].cright());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \brief helper function to get statistics from a tree */
|
||||
template<typename TStats>
|
||||
inline void GetNodeStats(const std::vector<bst_gpair> &gpair,
|
||||
const IFMatrix &fmat,
|
||||
const RegTree &tree,
|
||||
const BoosterInfo &info,
|
||||
std::vector< std::vector<TStats> > *p_thread_temp,
|
||||
std::vector<TStats> *p_node_stats) {
|
||||
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
|
||||
thread_temp.resize(this->get_nthread());
|
||||
p_node_stats->resize(tree.param.num_nodes);
|
||||
#pragma omp parallel
|
||||
{
|
||||
const int tid = omp_get_thread_num();
|
||||
thread_temp[tid].resize(tree.param.num_nodes, TStats(param));
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
const unsigned nid = qexpand[i];
|
||||
thread_temp[tid][nid].Clear();
|
||||
}
|
||||
}
|
||||
const std::vector<bst_uint> &rowset = fmat.buffered_rowset();
|
||||
// setup position
|
||||
const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size());
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < ndata; ++i) {
|
||||
const bst_uint ridx = rowset[i];
|
||||
const int nid = position[ridx];
|
||||
const int tid = omp_get_thread_num();
|
||||
if (nid >= 0) {
|
||||
thread_temp[tid][nid].Add(gpair, info, ridx);
|
||||
}
|
||||
}
|
||||
// sum the per thread statistics together
|
||||
for (size_t j = 0; j < qexpand.size(); ++j) {
|
||||
const int nid = qexpand[j];
|
||||
TStats &s = (*p_node_stats)[nid];
|
||||
s.Clear();
|
||||
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
|
||||
s.Add(thread_temp[tid][nid]);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \brief common helper data structure to build sketch */
|
||||
struct SketchEntry {
|
||||
/*! \brief total sum of amount to be met */
|
||||
double sum_total;
|
||||
/*! \brief statistics used in the sketch */
|
||||
double rmin, wmin;
|
||||
/*! \brief last seen feature value */
|
||||
bst_float last_fvalue;
|
||||
/*! \brief current size of sketch */
|
||||
double next_goal;
|
||||
// pointer to the sketch to put things in
|
||||
utils::WXQuantileSketch<bst_float, bst_float> *sketch;
|
||||
// initialize the space
|
||||
inline void Init(unsigned max_size) {
|
||||
next_goal = -1.0f;
|
||||
rmin = wmin = 0.0f;
|
||||
sketch->temp.Reserve(max_size + 1);
|
||||
sketch->temp.size = 0;
|
||||
}
|
||||
/*!
|
||||
* \brief push a new element to sketch
|
||||
* \param fvalue feature value, comes in sorted ascending order
|
||||
* \param w weight
|
||||
* \param max_size
|
||||
*/
|
||||
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
|
||||
if (next_goal == -1.0f) {
|
||||
next_goal = 0.0f;
|
||||
last_fvalue = fvalue;
|
||||
wmin = w;
|
||||
return;
|
||||
}
|
||||
if (last_fvalue != fvalue) {
|
||||
double rmax = rmin + wmin;
|
||||
if (rmax >= next_goal && sketch->temp.size != max_size) {
|
||||
if (sketch->temp.size == 0 ||
|
||||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
|
||||
// push to sketch
|
||||
sketch->temp.data[sketch->temp.size] =
|
||||
utils::WXQuantileSketch<bst_float, bst_float>::
|
||||
Entry(static_cast<bst_float>(rmin),
|
||||
static_cast<bst_float>(rmax),
|
||||
static_cast<bst_float>(wmin), last_fvalue);
|
||||
utils::Assert(sketch->temp.size < max_size,
|
||||
"invalid maximum size max_size=%u, stemp.size=%lu\n",
|
||||
max_size, sketch->temp.size);
|
||||
++sketch->temp.size;
|
||||
}
|
||||
if (sketch->temp.size == max_size) {
|
||||
next_goal = sum_total * 2.0f + 1e-5f;
|
||||
} else {
|
||||
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
|
||||
}
|
||||
} else {
|
||||
if (rmax >= next_goal) {
|
||||
rabit::TrackerPrintf("INFO: rmax=%g, sum_total=%g, next_goal=%g, size=%lu\n",
|
||||
rmax, sum_total, next_goal, sketch->temp.size);
|
||||
}
|
||||
}
|
||||
rmin = rmax;
|
||||
wmin = w;
|
||||
last_fvalue = fvalue;
|
||||
} else {
|
||||
wmin += w;
|
||||
}
|
||||
}
|
||||
/*! \brief push final unfinished value to the sketch */
|
||||
inline void Finalize(unsigned max_size) {
|
||||
double rmax = rmin + wmin;
|
||||
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
|
||||
utils::Assert(sketch->temp.size <= max_size,
|
||||
"Finalize: invalid maximum size, max_size=%u, stemp.size=%lu",
|
||||
sketch->temp.size, max_size);
|
||||
// push to sketch
|
||||
sketch->temp.data[sketch->temp.size] =
|
||||
utils::WXQuantileSketch<bst_float, bst_float>::
|
||||
Entry(static_cast<bst_float>(rmin),
|
||||
static_cast<bst_float>(rmax),
|
||||
static_cast<bst_float>(wmin), last_fvalue);
|
||||
++sketch->temp.size;
|
||||
}
|
||||
sketch->PushTemp();
|
||||
}
|
||||
};
|
||||
/*! \brief training parameter of tree grower */
|
||||
TrainParam param;
|
||||
/*! \brief queue of nodes to be expanded */
|
||||
std::vector<int> qexpand;
|
||||
/*!
|
||||
* \brief map active node to is working index offset in qexpand,
|
||||
* can be -1, which means the node is node actively expanding
|
||||
*/
|
||||
std::vector<int> node2workindex;
|
||||
/*!
|
||||
* \brief position of each instance in the tree
|
||||
* can be negative, which means this position is no longer expanding
|
||||
* see also Decode/EncodePosition
|
||||
*/
|
||||
std::vector<int> position;
|
||||
|
||||
private:
|
||||
inline void UpdateNode2WorkIndex(const RegTree &tree) {
|
||||
// update the node2workindex
|
||||
std::fill(node2workindex.begin(), node2workindex.end(), -1);
|
||||
node2workindex.resize(tree.param.num_nodes);
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
node2workindex[qexpand[i]] = static_cast<int>(i);
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_HPP_
|
||||
769
src/tree/updater_histmaker.cc
Normal file
769
src/tree/updater_histmaker.cc
Normal file
@@ -0,0 +1,769 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file updater_histmaker-inl.hpp
|
||||
* \brief use histogram counting to construct a tree
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_UPDATER_HISTMAKER_INL_HPP_
|
||||
#define XGBOOST_TREE_UPDATER_HISTMAKER_INL_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include "../sync/sync.h"
|
||||
#include "../utils/quantile.h"
|
||||
#include "../utils/group_data.h"
|
||||
#include "./updater_basemaker-inl.hpp"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
template<typename TStats>
|
||||
class HistMaker: public BaseMaker {
|
||||
public:
|
||||
virtual ~HistMaker(void) {}
|
||||
virtual void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<RegTree*> &trees) {
|
||||
TStats::CheckInfo(info);
|
||||
// rescale learning rate according to size of trees
|
||||
float lr = param.learning_rate;
|
||||
param.learning_rate = lr / trees.size();
|
||||
// build tree
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
this->Update(gpair, p_fmat, info, trees[i]);
|
||||
}
|
||||
param.learning_rate = lr;
|
||||
}
|
||||
|
||||
protected:
|
||||
/*! \brief a single histogram */
|
||||
struct HistUnit {
|
||||
/*! \brief cutting point of histogram, contains maximum point */
|
||||
const bst_float *cut;
|
||||
/*! \brief content of statistics data */
|
||||
TStats *data;
|
||||
/*! \brief size of histogram */
|
||||
unsigned size;
|
||||
// default constructor
|
||||
HistUnit(void) {}
|
||||
// constructor
|
||||
HistUnit(const bst_float *cut, TStats *data, unsigned size)
|
||||
: cut(cut), data(data), size(size) {}
|
||||
/*! \brief add a histogram to data */
|
||||
inline void Add(bst_float fv,
|
||||
const std::vector<bst_gpair> &gpair,
|
||||
const BoosterInfo &info,
|
||||
const bst_uint ridx) {
|
||||
unsigned i = std::upper_bound(cut, cut + size, fv) - cut;
|
||||
utils::Assert(size != 0, "try insert into size=0");
|
||||
utils::Assert(i < size,
|
||||
"maximum value must be in cut, fv = %g, cutmax=%g", fv, cut[size-1]);
|
||||
data[i].Add(gpair, info, ridx);
|
||||
}
|
||||
};
|
||||
/*! \brief a set of histograms from different index */
|
||||
struct HistSet {
|
||||
/*! \brief the index pointer of each histunit */
|
||||
const unsigned *rptr;
|
||||
/*! \brief cutting points in each histunit */
|
||||
const bst_float *cut;
|
||||
/*! \brief data in different hist unit */
|
||||
std::vector<TStats> data;
|
||||
/*! \brief */
|
||||
inline HistUnit operator[](size_t fid) {
|
||||
return HistUnit(cut + rptr[fid],
|
||||
&data[0] + rptr[fid],
|
||||
rptr[fid+1] - rptr[fid]);
|
||||
}
|
||||
};
|
||||
// thread workspace
|
||||
struct ThreadWSpace {
|
||||
/*! \brief actual unit pointer */
|
||||
std::vector<unsigned> rptr;
|
||||
/*! \brief cut field */
|
||||
std::vector<bst_float> cut;
|
||||
// per thread histset
|
||||
std::vector<HistSet> hset;
|
||||
// initialize the hist set
|
||||
inline void Init(const TrainParam ¶m, int nthread) {
|
||||
hset.resize(nthread);
|
||||
// cleanup statistics
|
||||
for (int tid = 0; tid < nthread; ++tid) {
|
||||
for (size_t i = 0; i < hset[tid].data.size(); ++i) {
|
||||
hset[tid].data[i].Clear();
|
||||
}
|
||||
hset[tid].rptr = BeginPtr(rptr);
|
||||
hset[tid].cut = BeginPtr(cut);
|
||||
hset[tid].data.resize(cut.size(), TStats(param));
|
||||
}
|
||||
}
|
||||
// aggregate all statistics to hset[0]
|
||||
inline void Aggregate(void) {
|
||||
bst_omp_uint nsize = static_cast<bst_omp_uint>(cut.size());
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < nsize; ++i) {
|
||||
for (size_t tid = 1; tid < hset.size(); ++tid) {
|
||||
hset[0].data[i].Add(hset[tid].data[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*! \brief clear the workspace */
|
||||
inline void Clear(void) {
|
||||
cut.clear(); rptr.resize(1); rptr[0] = 0;
|
||||
}
|
||||
/*! \brief total size */
|
||||
inline size_t Size(void) const {
|
||||
return rptr.size() - 1;
|
||||
}
|
||||
};
|
||||
// workspace of thread
|
||||
ThreadWSpace wspace;
|
||||
// reducer for histogram
|
||||
rabit::Reducer<TStats, TStats::Reduce> histred;
|
||||
// set of working features
|
||||
std::vector<bst_uint> fwork_set;
|
||||
// update function implementation
|
||||
virtual void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
RegTree *p_tree) {
|
||||
this->InitData(gpair, *p_fmat, info.root_index, *p_tree);
|
||||
this->InitWorkSet(p_fmat, *p_tree, &fwork_set);
|
||||
for (int depth = 0; depth < param.max_depth; ++depth) {
|
||||
// reset and propose candidate split
|
||||
this->ResetPosAndPropose(gpair, p_fmat, info, fwork_set, *p_tree);
|
||||
// create histogram
|
||||
this->CreateHist(gpair, p_fmat, info, fwork_set, *p_tree);
|
||||
// find split based on histogram statistics
|
||||
this->FindSplit(depth, gpair, p_fmat, info, fwork_set, p_tree);
|
||||
// reset position after split
|
||||
this->ResetPositionAfterSplit(p_fmat, *p_tree);
|
||||
this->UpdateQueueExpand(*p_tree);
|
||||
// if nothing left to be expand, break
|
||||
if (qexpand.size() == 0) break;
|
||||
}
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
const int nid = qexpand[i];
|
||||
(*p_tree)[nid].set_leaf(p_tree->stat(nid).base_weight * param.learning_rate);
|
||||
}
|
||||
}
|
||||
// this function does two jobs
|
||||
// (1) reset the position in array position, to be the latest leaf id
|
||||
// (2) propose a set of candidate cuts and set wspace.rptr wspace.cut correctly
|
||||
virtual void ResetPosAndPropose(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector <bst_uint> &fset,
|
||||
const RegTree &tree) = 0;
|
||||
// initialize the current working set of features in this round
|
||||
virtual void InitWorkSet(IFMatrix *p_fmat,
|
||||
const RegTree &tree,
|
||||
std::vector<bst_uint> *p_fset) {
|
||||
p_fset->resize(tree.param.num_feature);
|
||||
for (size_t i = 0; i < p_fset->size(); ++i) {
|
||||
(*p_fset)[i] = static_cast<unsigned>(i);
|
||||
}
|
||||
}
|
||||
// reset position after split, this is not a must, depending on implementation
|
||||
virtual void ResetPositionAfterSplit(IFMatrix *p_fmat,
|
||||
const RegTree &tree) {
|
||||
}
|
||||
virtual void CreateHist(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector <bst_uint> &fset,
|
||||
const RegTree &tree) = 0;
|
||||
|
||||
private:
|
||||
inline void EnumerateSplit(const HistUnit &hist,
|
||||
const TStats &node_sum,
|
||||
bst_uint fid,
|
||||
SplitEntry *best,
|
||||
TStats *left_sum) {
|
||||
if (hist.size == 0) return;
|
||||
|
||||
double root_gain = node_sum.CalcGain(param);
|
||||
TStats s(param), c(param);
|
||||
for (bst_uint i = 0; i < hist.size; ++i) {
|
||||
s.Add(hist.data[i]);
|
||||
if (s.sum_hess >= param.min_child_weight) {
|
||||
c.SetSubstract(node_sum, s);
|
||||
if (c.sum_hess >= param.min_child_weight) {
|
||||
double loss_chg = s.CalcGain(param) + c.CalcGain(param) - root_gain;
|
||||
if (best->Update(static_cast<float>(loss_chg), fid, hist.cut[i], false)) {
|
||||
*left_sum = s;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s.Clear();
|
||||
for (bst_uint i = hist.size - 1; i != 0; --i) {
|
||||
s.Add(hist.data[i]);
|
||||
if (s.sum_hess >= param.min_child_weight) {
|
||||
c.SetSubstract(node_sum, s);
|
||||
if (c.sum_hess >= param.min_child_weight) {
|
||||
double loss_chg = s.CalcGain(param) + c.CalcGain(param) - root_gain;
|
||||
if (best->Update(static_cast<float>(loss_chg), fid, hist.cut[i-1], true)) {
|
||||
*left_sum = c;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
inline void FindSplit(int depth,
|
||||
const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector <bst_uint> &fset,
|
||||
RegTree *p_tree) {
|
||||
const size_t num_feature = fset.size();
|
||||
// get the best split condition for each node
|
||||
std::vector<SplitEntry> sol(qexpand.size());
|
||||
std::vector<TStats> left_sum(qexpand.size());
|
||||
bst_omp_uint nexpand = static_cast<bst_omp_uint>(qexpand.size());
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint wid = 0; wid < nexpand; ++wid) {
|
||||
const int nid = qexpand[wid];
|
||||
utils::Assert(node2workindex[nid] == static_cast<int>(wid),
|
||||
"node2workindex inconsistent");
|
||||
SplitEntry &best = sol[wid];
|
||||
TStats &node_sum = wspace.hset[0][num_feature + wid * (num_feature + 1)].data[0];
|
||||
for (size_t i = 0; i < fset.size(); ++i) {
|
||||
EnumerateSplit(this->wspace.hset[0][i + wid * (num_feature+1)],
|
||||
node_sum, fset[i], &best, &left_sum[wid]);
|
||||
}
|
||||
}
|
||||
// get the best result, we can synchronize the solution
|
||||
for (bst_omp_uint wid = 0; wid < nexpand; ++wid) {
|
||||
const int nid = qexpand[wid];
|
||||
const SplitEntry &best = sol[wid];
|
||||
const TStats &node_sum = wspace.hset[0][num_feature + wid * (num_feature + 1)].data[0];
|
||||
this->SetStats(p_tree, nid, node_sum);
|
||||
// set up the values
|
||||
p_tree->stat(nid).loss_chg = best.loss_chg;
|
||||
// now we know the solution in snode[nid], set split
|
||||
if (best.loss_chg > rt_eps) {
|
||||
p_tree->AddChilds(nid);
|
||||
(*p_tree)[nid].set_split(best.split_index(),
|
||||
best.split_value, best.default_left());
|
||||
// mark right child as 0, to indicate fresh leaf
|
||||
(*p_tree)[(*p_tree)[nid].cleft()].set_leaf(0.0f, 0);
|
||||
(*p_tree)[(*p_tree)[nid].cright()].set_leaf(0.0f, 0);
|
||||
// right side sum
|
||||
TStats right_sum;
|
||||
right_sum.SetSubstract(node_sum, left_sum[wid]);
|
||||
this->SetStats(p_tree, (*p_tree)[nid].cleft(), left_sum[wid]);
|
||||
this->SetStats(p_tree, (*p_tree)[nid].cright(), right_sum);
|
||||
} else {
|
||||
(*p_tree)[nid].set_leaf(p_tree->stat(nid).base_weight * param.learning_rate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void SetStats(RegTree *p_tree, int nid, const TStats &node_sum) {
|
||||
p_tree->stat(nid).base_weight = static_cast<float>(node_sum.CalcWeight(param));
|
||||
p_tree->stat(nid).sum_hess = static_cast<float>(node_sum.sum_hess);
|
||||
node_sum.SetLeafVec(param, p_tree->leafvec(nid));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename TStats>
|
||||
class CQHistMaker: public HistMaker<TStats> {
|
||||
protected:
|
||||
struct HistEntry {
|
||||
typename HistMaker<TStats>::HistUnit hist;
|
||||
unsigned istart;
|
||||
/*!
|
||||
* \brief add a histogram to data,
|
||||
* do linear scan, start from istart
|
||||
*/
|
||||
inline void Add(bst_float fv,
|
||||
const std::vector<bst_gpair> &gpair,
|
||||
const BoosterInfo &info,
|
||||
const bst_uint ridx) {
|
||||
while (istart < hist.size && !(fv < hist.cut[istart])) ++istart;
|
||||
utils::Assert(istart != hist.size, "the bound variable must be max");
|
||||
hist.data[istart].Add(gpair, info, ridx);
|
||||
}
|
||||
/*!
|
||||
* \brief add a histogram to data,
|
||||
* do linear scan, start from istart
|
||||
*/
|
||||
inline void Add(bst_float fv,
|
||||
bst_gpair gstats) {
|
||||
while (istart < hist.size && !(fv < hist.cut[istart])) ++istart;
|
||||
utils::Assert(istart != hist.size, "the bound variable must be max");
|
||||
hist.data[istart].Add(gstats);
|
||||
}
|
||||
};
|
||||
// sketch type used for this
|
||||
typedef utils::WXQuantileSketch<bst_float, bst_float> WXQSketch;
|
||||
// initialize the work set of tree
|
||||
virtual void InitWorkSet(IFMatrix *p_fmat,
|
||||
const RegTree &tree,
|
||||
std::vector<bst_uint> *p_fset) {
|
||||
feat_helper.InitByCol(p_fmat, tree);
|
||||
feat_helper.SampleCol(this->param.colsample_bytree, p_fset);
|
||||
}
|
||||
// code to create histogram
|
||||
virtual void CreateHist(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<bst_uint> &fset,
|
||||
const RegTree &tree) {
|
||||
// fill in reverse map
|
||||
feat2workindex.resize(tree.param.num_feature);
|
||||
std::fill(feat2workindex.begin(), feat2workindex.end(), -1);
|
||||
for (size_t i = 0; i < fset.size(); ++i) {
|
||||
feat2workindex[fset[i]] = static_cast<int>(i);
|
||||
}
|
||||
// start to work
|
||||
this->wspace.Init(this->param, 1);
|
||||
// if it is C++11, use lazy evaluation for Allreduce,
|
||||
// to gain speedup in recovery
|
||||
#if __cplusplus >= 201103L
|
||||
auto lazy_get_hist = [&]()
|
||||
#endif
|
||||
{
|
||||
thread_hist.resize(this->get_nthread());
|
||||
// start accumulating statistics
|
||||
utils::IIterator<ColBatch> *iter = p_fmat->ColIterator(fset);
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const ColBatch &batch = iter->Value();
|
||||
// start enumeration
|
||||
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint i = 0; i < nsize; ++i) {
|
||||
int offset = feat2workindex[batch.col_index[i]];
|
||||
if (offset >= 0) {
|
||||
this->UpdateHistCol(gpair, batch[i], info, tree,
|
||||
fset, offset,
|
||||
&thread_hist[omp_get_thread_num()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
const int wid = this->node2workindex[nid];
|
||||
this->wspace.hset[0][fset.size() + wid * (fset.size()+1)]
|
||||
.data[0] = node_stats[nid];
|
||||
}
|
||||
};
|
||||
// sync the histogram
|
||||
// if it is C++11, use lazy evaluation for Allreduce
|
||||
#if __cplusplus >= 201103L
|
||||
this->histred.Allreduce(BeginPtr(this->wspace.hset[0].data),
|
||||
this->wspace.hset[0].data.size(), lazy_get_hist);
|
||||
#else
|
||||
this->histred.Allreduce(BeginPtr(this->wspace.hset[0].data), this->wspace.hset[0].data.size());
|
||||
#endif
|
||||
}
|
||||
virtual void ResetPositionAfterSplit(IFMatrix *p_fmat,
|
||||
const RegTree &tree) {
|
||||
this->ResetPositionCol(this->qexpand, p_fmat, tree);
|
||||
}
|
||||
virtual void ResetPosAndPropose(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<bst_uint> &fset,
|
||||
const RegTree &tree) {
|
||||
// fill in reverse map
|
||||
feat2workindex.resize(tree.param.num_feature);
|
||||
std::fill(feat2workindex.begin(), feat2workindex.end(), -1);
|
||||
freal_set.clear();
|
||||
for (size_t i = 0; i < fset.size(); ++i) {
|
||||
if (feat_helper.Type(fset[i]) == 2) {
|
||||
feat2workindex[fset[i]] = static_cast<int>(freal_set.size());
|
||||
freal_set.push_back(fset[i]);
|
||||
} else {
|
||||
feat2workindex[fset[i]] = -2;
|
||||
}
|
||||
}
|
||||
this->GetNodeStats(gpair, *p_fmat, tree, info,
|
||||
&thread_stats, &node_stats);
|
||||
sketchs.resize(this->qexpand.size() * freal_set.size());
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
sketchs[i].Init(info.num_row, this->param.sketch_eps);
|
||||
}
|
||||
// intitialize the summary array
|
||||
summary_array.resize(sketchs.size());
|
||||
// setup maximum size
|
||||
unsigned max_size = this->param.max_sketch_size();
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
summary_array[i].Reserve(max_size);
|
||||
}
|
||||
// if it is C++11, use lazy evaluation for Allreduce
|
||||
#if __cplusplus >= 201103L
|
||||
auto lazy_get_summary = [&]()
|
||||
#endif
|
||||
{
|
||||
// get smmary
|
||||
thread_sketch.resize(this->get_nthread());
|
||||
// number of rows in
|
||||
const size_t nrows = p_fmat->buffered_rowset().size();
|
||||
// start accumulating statistics
|
||||
utils::IIterator<ColBatch> *iter = p_fmat->ColIterator(freal_set);
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const ColBatch &batch = iter->Value();
|
||||
// start enumeration
|
||||
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint i = 0; i < nsize; ++i) {
|
||||
int offset = feat2workindex[batch.col_index[i]];
|
||||
if (offset >= 0) {
|
||||
this->UpdateSketchCol(gpair, batch[i], tree,
|
||||
node_stats,
|
||||
freal_set, offset,
|
||||
batch[i].length == nrows,
|
||||
&thread_sketch[omp_get_thread_num()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
utils::WXQuantileSketch<bst_float, bst_float>::SummaryContainer out;
|
||||
sketchs[i].GetSummary(&out);
|
||||
summary_array[i].SetPrune(out, max_size);
|
||||
}
|
||||
utils::Assert(summary_array.size() == sketchs.size(), "shape mismatch");
|
||||
};
|
||||
if (summary_array.size() != 0) {
|
||||
size_t nbytes = WXQSketch::SummaryContainer::CalcMemCost(max_size);
|
||||
#if __cplusplus >= 201103L
|
||||
sreducer.Allreduce(BeginPtr(summary_array), nbytes, summary_array.size(), lazy_get_summary);
|
||||
#else
|
||||
sreducer.Allreduce(BeginPtr(summary_array), nbytes, summary_array.size());
|
||||
#endif
|
||||
}
|
||||
// now we get the final result of sketch, setup the cut
|
||||
this->wspace.cut.clear();
|
||||
this->wspace.rptr.clear();
|
||||
this->wspace.rptr.push_back(0);
|
||||
for (size_t wid = 0; wid < this->qexpand.size(); ++wid) {
|
||||
for (size_t i = 0; i < fset.size(); ++i) {
|
||||
int offset = feat2workindex[fset[i]];
|
||||
if (offset >= 0) {
|
||||
const WXQSketch::Summary &a = summary_array[wid * freal_set.size() + offset];
|
||||
for (size_t i = 1; i < a.size; ++i) {
|
||||
bst_float cpt = a.data[i].value - rt_eps;
|
||||
if (i == 1 || cpt > this->wspace.cut.back()) {
|
||||
this->wspace.cut.push_back(cpt);
|
||||
}
|
||||
}
|
||||
// push a value that is greater than anything
|
||||
if (a.size != 0) {
|
||||
bst_float cpt = a.data[a.size - 1].value;
|
||||
// this must be bigger than last value in a scale
|
||||
bst_float last = cpt + fabs(cpt) + rt_eps;
|
||||
this->wspace.cut.push_back(last);
|
||||
}
|
||||
this->wspace.rptr.push_back(static_cast<unsigned>(this->wspace.cut.size()));
|
||||
} else {
|
||||
utils::Assert(offset == -2, "BUG in mark");
|
||||
bst_float cpt = feat_helper.MaxValue(fset[i]);
|
||||
this->wspace.cut.push_back(cpt + fabs(cpt) + rt_eps);
|
||||
this->wspace.rptr.push_back(static_cast<unsigned>(this->wspace.cut.size()));
|
||||
}
|
||||
}
|
||||
// reserve last value for global statistics
|
||||
this->wspace.cut.push_back(0.0f);
|
||||
this->wspace.rptr.push_back(static_cast<unsigned>(this->wspace.cut.size()));
|
||||
}
|
||||
utils::Assert(this->wspace.rptr.size() ==
|
||||
(fset.size() + 1) * this->qexpand.size() + 1,
|
||||
"cut space inconsistent");
|
||||
}
|
||||
|
||||
private:
|
||||
inline void UpdateHistCol(const std::vector<bst_gpair> &gpair,
|
||||
const ColBatch::Inst &c,
|
||||
const BoosterInfo &info,
|
||||
const RegTree &tree,
|
||||
const std::vector<bst_uint> &fset,
|
||||
bst_uint fid_offset,
|
||||
std::vector<HistEntry> *p_temp) {
|
||||
if (c.length == 0) return;
|
||||
// initialize sbuilder for use
|
||||
std::vector<HistEntry> &hbuilder = *p_temp;
|
||||
hbuilder.resize(tree.param.num_nodes);
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const unsigned nid = this->qexpand[i];
|
||||
const unsigned wid = this->node2workindex[nid];
|
||||
hbuilder[nid].istart = 0;
|
||||
hbuilder[nid].hist = this->wspace.hset[0][fid_offset + wid * (fset.size()+1)];
|
||||
}
|
||||
if (TStats::kSimpleStats != 0 && this->param.cache_opt != 0) {
|
||||
const bst_uint kBuffer = 32;
|
||||
bst_uint align_length = c.length / kBuffer * kBuffer;
|
||||
int buf_position[kBuffer];
|
||||
bst_gpair buf_gpair[kBuffer];
|
||||
for (bst_uint j = 0; j < align_length; j += kBuffer) {
|
||||
for (bst_uint i = 0; i < kBuffer; ++i) {
|
||||
bst_uint ridx = c[j + i].index;
|
||||
buf_position[i] = this->position[ridx];
|
||||
buf_gpair[i] = gpair[ridx];
|
||||
}
|
||||
for (bst_uint i = 0; i < kBuffer; ++i) {
|
||||
const int nid = buf_position[i];
|
||||
if (nid >= 0) {
|
||||
hbuilder[nid].Add(c[j + i].fvalue, buf_gpair[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (bst_uint j = align_length; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
hbuilder[nid].Add(c[j].fvalue, gpair[ridx]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (bst_uint j = 0; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
hbuilder[nid].Add(c[j].fvalue, gpair, info, ridx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
inline void UpdateSketchCol(const std::vector<bst_gpair> &gpair,
|
||||
const ColBatch::Inst &c,
|
||||
const RegTree &tree,
|
||||
const std::vector<TStats> &nstats,
|
||||
const std::vector<bst_uint> &frealset,
|
||||
bst_uint offset,
|
||||
bool col_full,
|
||||
std::vector<BaseMaker::SketchEntry> *p_temp) {
|
||||
if (c.length == 0) return;
|
||||
// initialize sbuilder for use
|
||||
std::vector<BaseMaker::SketchEntry> &sbuilder = *p_temp;
|
||||
sbuilder.resize(tree.param.num_nodes);
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const unsigned nid = this->qexpand[i];
|
||||
const unsigned wid = this->node2workindex[nid];
|
||||
sbuilder[nid].sum_total = 0.0f;
|
||||
sbuilder[nid].sketch = &sketchs[wid * frealset.size() + offset];
|
||||
}
|
||||
|
||||
if (!col_full) {
|
||||
// first pass, get sum of weight, TODO, optimization to skip first pass
|
||||
for (bst_uint j = 0; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
sbuilder[nid].sum_total += gpair[ridx].hess;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const unsigned nid = this->qexpand[i];
|
||||
sbuilder[nid].sum_total = static_cast<bst_float>(nstats[nid].sum_hess);
|
||||
}
|
||||
}
|
||||
// if only one value, no need to do second pass
|
||||
if (c[0].fvalue == c[c.length-1].fvalue) {
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
sbuilder[nid].sketch->Push(c[0].fvalue, static_cast<bst_float>(sbuilder[nid].sum_total));
|
||||
}
|
||||
return;
|
||||
}
|
||||
// two pass scan
|
||||
unsigned max_size = this->param.max_sketch_size();
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
sbuilder[nid].Init(max_size);
|
||||
}
|
||||
// second pass, build the sketch
|
||||
if (TStats::kSimpleStats != 0 && this->param.cache_opt != 0) {
|
||||
const bst_uint kBuffer = 32;
|
||||
bst_uint align_length = c.length / kBuffer * kBuffer;
|
||||
int buf_position[kBuffer];
|
||||
bst_float buf_hess[kBuffer];
|
||||
for (bst_uint j = 0; j < align_length; j += kBuffer) {
|
||||
for (bst_uint i = 0; i < kBuffer; ++i) {
|
||||
bst_uint ridx = c[j + i].index;
|
||||
buf_position[i] = this->position[ridx];
|
||||
buf_hess[i] = gpair[ridx].hess;
|
||||
}
|
||||
for (bst_uint i = 0; i < kBuffer; ++i) {
|
||||
const int nid = buf_position[i];
|
||||
if (nid >= 0) {
|
||||
sbuilder[nid].Push(c[j + i].fvalue, buf_hess[i], max_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (bst_uint j = align_length; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
sbuilder[nid].Push(c[j].fvalue, gpair[ridx].hess, max_size);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (bst_uint j = 0; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
sbuilder[nid].Push(c[j].fvalue, gpair[ridx].hess, max_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
sbuilder[nid].Finalize(max_size);
|
||||
}
|
||||
}
|
||||
// feature helper
|
||||
BaseMaker::FMetaHelper feat_helper;
|
||||
// temp space to map feature id to working index
|
||||
std::vector<int> feat2workindex;
|
||||
// set of index from fset that are real
|
||||
std::vector<bst_uint> freal_set;
|
||||
// thread temp data
|
||||
std::vector< std::vector<BaseMaker::SketchEntry> > thread_sketch;
|
||||
// used to hold statistics
|
||||
std::vector< std::vector<TStats> > thread_stats;
|
||||
// used to hold start pointer
|
||||
std::vector< std::vector<HistEntry> > thread_hist;
|
||||
// node statistics
|
||||
std::vector<TStats> node_stats;
|
||||
// summary array
|
||||
std::vector<WXQSketch::SummaryContainer> summary_array;
|
||||
// reducer for summary
|
||||
rabit::SerializeReducer<WXQSketch::SummaryContainer> sreducer;
|
||||
// per node, per feature sketch
|
||||
std::vector< utils::WXQuantileSketch<bst_float, bst_float> > sketchs;
|
||||
};
|
||||
|
||||
template<typename TStats>
|
||||
class QuantileHistMaker: public HistMaker<TStats> {
|
||||
protected:
|
||||
typedef utils::WXQuantileSketch<bst_float, bst_float> WXQSketch;
|
||||
virtual void ResetPosAndPropose(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector <bst_uint> &fset,
|
||||
const RegTree &tree) {
|
||||
// initialize the data structure
|
||||
int nthread = BaseMaker::get_nthread();
|
||||
sketchs.resize(this->qexpand.size() * tree.param.num_feature);
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
sketchs[i].Init(info.num_row, this->param.sketch_eps);
|
||||
}
|
||||
// start accumulating statistics
|
||||
utils::IIterator<RowBatch> *iter = p_fmat->RowIterator();
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const RowBatch &batch = iter->Value();
|
||||
// parallel convert to column major format
|
||||
utils::ParallelGroupBuilder<SparseBatch::Entry> builder(&col_ptr, &col_data, &thread_col_ptr);
|
||||
builder.InitBudget(tree.param.num_feature, nthread);
|
||||
|
||||
const bst_omp_uint nbatch = static_cast<bst_omp_uint>(batch.size);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < nbatch; ++i) {
|
||||
RowBatch::Inst inst = batch[i];
|
||||
const bst_uint ridx = static_cast<bst_uint>(batch.base_rowid + i);
|
||||
int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
if (!tree[nid].is_leaf()) {
|
||||
this->position[ridx] = nid = HistMaker<TStats>::NextLevel(inst, tree, nid);
|
||||
}
|
||||
if (this->node2workindex[nid] < 0) {
|
||||
this->position[ridx] = ~nid;
|
||||
} else {
|
||||
for (bst_uint j = 0; j < inst.length; ++j) {
|
||||
builder.AddBudget(inst[j].index, omp_get_thread_num());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.InitStorage();
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < nbatch; ++i) {
|
||||
RowBatch::Inst inst = batch[i];
|
||||
const bst_uint ridx = static_cast<bst_uint>(batch.base_rowid + i);
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
for (bst_uint j = 0; j < inst.length; ++j) {
|
||||
builder.Push(inst[j].index,
|
||||
SparseBatch::Entry(nid, inst[j].fvalue),
|
||||
omp_get_thread_num());
|
||||
}
|
||||
}
|
||||
}
|
||||
// start putting things into sketch
|
||||
const bst_omp_uint nfeat = col_ptr.size() - 1;
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint k = 0; k < nfeat; ++k) {
|
||||
for (size_t i = col_ptr[k]; i < col_ptr[k+1]; ++i) {
|
||||
const SparseBatch::Entry &e = col_data[i];
|
||||
const int wid = this->node2workindex[e.index];
|
||||
sketchs[wid * tree.param.num_feature + k].Push(e.fvalue, gpair[e.index].hess);
|
||||
}
|
||||
}
|
||||
}
|
||||
// setup maximum size
|
||||
unsigned max_size = this->param.max_sketch_size();
|
||||
// synchronize sketch
|
||||
summary_array.resize(sketchs.size());
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
utils::WQuantileSketch<bst_float, bst_float>::SummaryContainer out;
|
||||
sketchs[i].GetSummary(&out);
|
||||
summary_array[i].Reserve(max_size);
|
||||
summary_array[i].SetPrune(out, max_size);
|
||||
}
|
||||
|
||||
size_t nbytes = WXQSketch::SummaryContainer::CalcMemCost(max_size);
|
||||
sreducer.Allreduce(BeginPtr(summary_array), nbytes, summary_array.size());
|
||||
// now we get the final result of sketch, setup the cut
|
||||
this->wspace.cut.clear();
|
||||
this->wspace.rptr.clear();
|
||||
this->wspace.rptr.push_back(0);
|
||||
for (size_t wid = 0; wid < this->qexpand.size(); ++wid) {
|
||||
for (int fid = 0; fid < tree.param.num_feature; ++fid) {
|
||||
const WXQSketch::Summary &a = summary_array[wid * tree.param.num_feature + fid];
|
||||
for (size_t i = 1; i < a.size; ++i) {
|
||||
bst_float cpt = a.data[i].value - rt_eps;
|
||||
if (i == 1 || cpt > this->wspace.cut.back()) {
|
||||
this->wspace.cut.push_back(cpt);
|
||||
}
|
||||
}
|
||||
// push a value that is greater than anything
|
||||
if (a.size != 0) {
|
||||
bst_float cpt = a.data[a.size - 1].value;
|
||||
// this must be bigger than last value in a scale
|
||||
bst_float last = cpt + fabs(cpt) + rt_eps;
|
||||
this->wspace.cut.push_back(last);
|
||||
}
|
||||
this->wspace.rptr.push_back(this->wspace.cut.size());
|
||||
}
|
||||
// reserve last value for global statistics
|
||||
this->wspace.cut.push_back(0.0f);
|
||||
this->wspace.rptr.push_back(this->wspace.cut.size());
|
||||
}
|
||||
utils::Assert(this->wspace.rptr.size() ==
|
||||
(tree.param.num_feature + 1) * this->qexpand.size() + 1,
|
||||
"cut space inconsistent");
|
||||
}
|
||||
|
||||
private:
|
||||
// summary array
|
||||
std::vector<WXQSketch::SummaryContainer> summary_array;
|
||||
// reducer for summary
|
||||
rabit::SerializeReducer<WXQSketch::SummaryContainer> sreducer;
|
||||
// local temp column data structure
|
||||
std::vector<size_t> col_ptr;
|
||||
// local storage of column data
|
||||
std::vector<SparseBatch::Entry> col_data;
|
||||
std::vector< std::vector<size_t> > thread_col_ptr;
|
||||
// per node, per feature sketch
|
||||
std::vector< utils::WQuantileSketch<bst_float, bst_float> > sketchs;
|
||||
};
|
||||
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_TREE_UPDATER_HISTMAKER_INL_HPP_
|
||||
87
src/tree/updater_prune.cc
Normal file
87
src/tree/updater_prune.cc
Normal file
@@ -0,0 +1,87 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file updater_prune-inl.hpp
|
||||
* \brief prune a tree given the statistics
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_UPDATER_PRUNE_INL_HPP_
|
||||
#define XGBOOST_TREE_UPDATER_PRUNE_INL_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include "./param.h"
|
||||
#include "./updater.h"
|
||||
#include "./updater_sync-inl.hpp"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
/*! \brief pruner that prunes a tree after growing finishes */
|
||||
class TreePruner: public IUpdater {
|
||||
public:
|
||||
virtual ~TreePruner(void) {}
|
||||
// set training parameter
|
||||
virtual void SetParam(const char *name, const char *val) {
|
||||
using namespace std;
|
||||
param.SetParam(name, val);
|
||||
syncher.SetParam(name, val);
|
||||
if (!strcmp(name, "silent")) silent = atoi(val);
|
||||
}
|
||||
// update the tree, do pruning
|
||||
virtual void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<RegTree*> &trees) {
|
||||
// rescale learning rate according to size of trees
|
||||
float lr = param.learning_rate;
|
||||
param.learning_rate = lr / trees.size();
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
this->DoPrune(*trees[i]);
|
||||
}
|
||||
param.learning_rate = lr;
|
||||
syncher.Update(gpair, p_fmat, info, trees);
|
||||
}
|
||||
|
||||
private:
|
||||
// try to prune off current leaf
|
||||
inline int TryPruneLeaf(RegTree &tree, int nid, int depth, int npruned) { // NOLINT(*)
|
||||
if (tree[nid].is_root()) return npruned;
|
||||
int pid = tree[nid].parent();
|
||||
RegTree::NodeStat &s = tree.stat(pid);
|
||||
++s.leaf_child_cnt;
|
||||
if (s.leaf_child_cnt >= 2 && param.need_prune(s.loss_chg, depth - 1)) {
|
||||
// need to be pruned
|
||||
tree.ChangeToLeaf(pid, param.learning_rate * s.base_weight);
|
||||
// tail recursion
|
||||
return this->TryPruneLeaf(tree, pid, depth - 1, npruned+2);
|
||||
} else {
|
||||
return npruned;
|
||||
}
|
||||
}
|
||||
/*! \brief do pruning of a tree */
|
||||
inline void DoPrune(RegTree &tree) { // NOLINT(*)
|
||||
int npruned = 0;
|
||||
// initialize auxiliary statistics
|
||||
for (int nid = 0; nid < tree.param.num_nodes; ++nid) {
|
||||
tree.stat(nid).leaf_child_cnt = 0;
|
||||
}
|
||||
for (int nid = 0; nid < tree.param.num_nodes; ++nid) {
|
||||
if (tree[nid].is_leaf()) {
|
||||
npruned = this->TryPruneLeaf(tree, nid, tree.GetDepth(nid), npruned);
|
||||
}
|
||||
}
|
||||
if (silent == 0) {
|
||||
utils::Printf("tree pruning end, %d roots, %d extra nodes, %d pruned nodes, max_depth=%d\n",
|
||||
tree.param.num_roots, tree.num_extra_nodes(), npruned, tree.MaxDepth());
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// synchronizer
|
||||
TreeSyncher syncher;
|
||||
// shutup
|
||||
int silent;
|
||||
// training parameter
|
||||
TrainParam param;
|
||||
};
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_TREE_UPDATER_PRUNE_INL_HPP_
|
||||
157
src/tree/updater_refresh.cc
Normal file
157
src/tree/updater_refresh.cc
Normal file
@@ -0,0 +1,157 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file updater_refresh-inl.hpp
|
||||
* \brief refresh the statistics and leaf value on the tree on the dataset
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_UPDATER_REFRESH_INL_HPP_
|
||||
#define XGBOOST_TREE_UPDATER_REFRESH_INL_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include "../sync/sync.h"
|
||||
#include "./param.h"
|
||||
#include "./updater.h"
|
||||
#include "../utils/omp.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
/*! \brief pruner that prunes a tree after growing finishs */
|
||||
template<typename TStats>
|
||||
class TreeRefresher: public IUpdater {
|
||||
public:
|
||||
virtual ~TreeRefresher(void) {}
|
||||
// set training parameter
|
||||
virtual void SetParam(const char *name, const char *val) {
|
||||
param.SetParam(name, val);
|
||||
}
|
||||
// update the tree, do pruning
|
||||
virtual void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<RegTree*> &trees) {
|
||||
if (trees.size() == 0) return;
|
||||
// number of threads
|
||||
// thread temporal space
|
||||
std::vector< std::vector<TStats> > stemp;
|
||||
std::vector<RegTree::FVec> fvec_temp;
|
||||
// setup temp space for each thread
|
||||
int nthread;
|
||||
#pragma omp parallel
|
||||
{
|
||||
nthread = omp_get_num_threads();
|
||||
}
|
||||
fvec_temp.resize(nthread, RegTree::FVec());
|
||||
stemp.resize(nthread, std::vector<TStats>());
|
||||
#pragma omp parallel
|
||||
{
|
||||
int tid = omp_get_thread_num();
|
||||
int num_nodes = 0;
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
num_nodes += trees[i]->param.num_nodes;
|
||||
}
|
||||
stemp[tid].resize(num_nodes, TStats(param));
|
||||
std::fill(stemp[tid].begin(), stemp[tid].end(), TStats(param));
|
||||
fvec_temp[tid].Init(trees[0]->param.num_feature);
|
||||
}
|
||||
// if it is C++11, use lazy evaluation for Allreduce,
|
||||
// to gain speedup in recovery
|
||||
#if __cplusplus >= 201103L
|
||||
auto lazy_get_stats = [&]()
|
||||
#endif
|
||||
{
|
||||
// start accumulating statistics
|
||||
utils::IIterator<RowBatch> *iter = p_fmat->RowIterator();
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const RowBatch &batch = iter->Value();
|
||||
utils::Check(batch.size < std::numeric_limits<unsigned>::max(),
|
||||
"too large batch size ");
|
||||
const bst_omp_uint nbatch = static_cast<bst_omp_uint>(batch.size);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < nbatch; ++i) {
|
||||
RowBatch::Inst inst = batch[i];
|
||||
const int tid = omp_get_thread_num();
|
||||
const bst_uint ridx = static_cast<bst_uint>(batch.base_rowid + i);
|
||||
RegTree::FVec &feats = fvec_temp[tid];
|
||||
feats.Fill(inst);
|
||||
int offset = 0;
|
||||
for (size_t j = 0; j < trees.size(); ++j) {
|
||||
AddStats(*trees[j], feats, gpair, info, ridx,
|
||||
BeginPtr(stemp[tid]) + offset);
|
||||
offset += trees[j]->param.num_nodes;
|
||||
}
|
||||
feats.Drop(inst);
|
||||
}
|
||||
}
|
||||
// aggregate the statistics
|
||||
int num_nodes = static_cast<int>(stemp[0].size());
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int nid = 0; nid < num_nodes; ++nid) {
|
||||
for (int tid = 1; tid < nthread; ++tid) {
|
||||
stemp[0][nid].Add(stemp[tid][nid]);
|
||||
}
|
||||
}
|
||||
};
|
||||
#if __cplusplus >= 201103L
|
||||
reducer.Allreduce(BeginPtr(stemp[0]), stemp[0].size(), lazy_get_stats);
|
||||
#else
|
||||
reducer.Allreduce(BeginPtr(stemp[0]), stemp[0].size());
|
||||
#endif
|
||||
// rescale learning rate according to size of trees
|
||||
float lr = param.learning_rate;
|
||||
param.learning_rate = lr / trees.size();
|
||||
int offset = 0;
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
for (int rid = 0; rid < trees[i]->param.num_roots; ++rid) {
|
||||
this->Refresh(BeginPtr(stemp[0]) + offset, rid, trees[i]);
|
||||
}
|
||||
offset += trees[i]->param.num_nodes;
|
||||
}
|
||||
// set learning rate back
|
||||
param.learning_rate = lr;
|
||||
}
|
||||
|
||||
private:
|
||||
inline static void AddStats(const RegTree &tree,
|
||||
const RegTree::FVec &feat,
|
||||
const std::vector<bst_gpair> &gpair,
|
||||
const BoosterInfo &info,
|
||||
const bst_uint ridx,
|
||||
TStats *gstats) {
|
||||
// start from groups that belongs to current data
|
||||
int pid = static_cast<int>(info.GetRoot(ridx));
|
||||
gstats[pid].Add(gpair, info, ridx);
|
||||
// tranverse tree
|
||||
while (!tree[pid].is_leaf()) {
|
||||
unsigned split_index = tree[pid].split_index();
|
||||
pid = tree.GetNext(pid, feat.fvalue(split_index), feat.is_missing(split_index));
|
||||
gstats[pid].Add(gpair, info, ridx);
|
||||
}
|
||||
}
|
||||
inline void Refresh(const TStats *gstats,
|
||||
int nid, RegTree *p_tree) {
|
||||
RegTree &tree = *p_tree;
|
||||
tree.stat(nid).base_weight = static_cast<float>(gstats[nid].CalcWeight(param));
|
||||
tree.stat(nid).sum_hess = static_cast<float>(gstats[nid].sum_hess);
|
||||
gstats[nid].SetLeafVec(param, tree.leafvec(nid));
|
||||
if (tree[nid].is_leaf()) {
|
||||
tree[nid].set_leaf(tree.stat(nid).base_weight * param.learning_rate);
|
||||
} else {
|
||||
tree.stat(nid).loss_chg = static_cast<float>(
|
||||
gstats[tree[nid].cleft()].CalcGain(param) +
|
||||
gstats[tree[nid].cright()].CalcGain(param) -
|
||||
gstats[nid].CalcGain(param));
|
||||
this->Refresh(gstats, tree[nid].cleft(), p_tree);
|
||||
this->Refresh(gstats, tree[nid].cright(), p_tree);
|
||||
}
|
||||
}
|
||||
// training parameter
|
||||
TrainParam param;
|
||||
// reducer
|
||||
rabit::Reducer<TStats, TStats::Reduce> reducer;
|
||||
};
|
||||
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_TREE_UPDATER_REFRESH_INL_HPP_
|
||||
399
src/tree/updater_skmaker.cc
Normal file
399
src/tree/updater_skmaker.cc
Normal file
@@ -0,0 +1,399 @@
|
||||
/*!
|
||||
* Copyright 2014 by Contributors
|
||||
* \file updater_skmaker-inl.hpp
|
||||
* \brief use approximation sketch to construct a tree,
|
||||
a refresh is needed to make the statistics exactly correct
|
||||
* \author Tianqi Chen
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_UPDATER_SKMAKER_INL_HPP_
|
||||
#define XGBOOST_TREE_UPDATER_SKMAKER_INL_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include "../sync/sync.h"
|
||||
#include "../utils/quantile.h"
|
||||
#include "./updater_basemaker-inl.hpp"
|
||||
|
||||
namespace xgboost {
|
||||
namespace tree {
|
||||
class SketchMaker: public BaseMaker {
|
||||
public:
|
||||
virtual ~SketchMaker(void) {}
|
||||
virtual void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const std::vector<RegTree*> &trees) {
|
||||
// rescale learning rate according to size of trees
|
||||
float lr = param.learning_rate;
|
||||
param.learning_rate = lr / trees.size();
|
||||
// build tree
|
||||
for (size_t i = 0; i < trees.size(); ++i) {
|
||||
this->Update(gpair, p_fmat, info, trees[i]);
|
||||
}
|
||||
param.learning_rate = lr;
|
||||
}
|
||||
|
||||
protected:
|
||||
inline void Update(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
RegTree *p_tree) {
|
||||
this->InitData(gpair, *p_fmat, info.root_index, *p_tree);
|
||||
for (int depth = 0; depth < param.max_depth; ++depth) {
|
||||
this->GetNodeStats(gpair, *p_fmat, *p_tree, info,
|
||||
&thread_stats, &node_stats);
|
||||
this->BuildSketch(gpair, p_fmat, info, *p_tree);
|
||||
this->SyncNodeStats();
|
||||
this->FindSplit(depth, gpair, p_fmat, info, p_tree);
|
||||
this->ResetPositionCol(qexpand, p_fmat, *p_tree);
|
||||
this->UpdateQueueExpand(*p_tree);
|
||||
// if nothing left to be expand, break
|
||||
if (qexpand.size() == 0) break;
|
||||
}
|
||||
if (qexpand.size() != 0) {
|
||||
this->GetNodeStats(gpair, *p_fmat, *p_tree, info,
|
||||
&thread_stats, &node_stats);
|
||||
this->SyncNodeStats();
|
||||
}
|
||||
// set all statistics correctly
|
||||
for (int nid = 0; nid < p_tree->param.num_nodes; ++nid) {
|
||||
this->SetStats(nid, node_stats[nid], p_tree);
|
||||
if (!(*p_tree)[nid].is_leaf()) {
|
||||
p_tree->stat(nid).loss_chg = static_cast<float>(
|
||||
node_stats[(*p_tree)[nid].cleft()].CalcGain(param) +
|
||||
node_stats[(*p_tree)[nid].cright()].CalcGain(param) -
|
||||
node_stats[nid].CalcGain(param));
|
||||
}
|
||||
}
|
||||
// set left leaves
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
const int nid = qexpand[i];
|
||||
(*p_tree)[nid].set_leaf(p_tree->stat(nid).base_weight * param.learning_rate);
|
||||
}
|
||||
}
|
||||
// define the sketch we want to use
|
||||
typedef utils::WXQuantileSketch<bst_float, bst_float> WXQSketch;
|
||||
|
||||
private:
|
||||
// statistics needed in the gradient calculation
|
||||
struct SKStats {
|
||||
/*! \brief sum of all positive gradient */
|
||||
double pos_grad;
|
||||
/*! \brief sum of all negative gradient */
|
||||
double neg_grad;
|
||||
/*! \brief sum of hessian statistics */
|
||||
double sum_hess;
|
||||
SKStats(void) {}
|
||||
// constructor
|
||||
explicit SKStats(const TrainParam ¶m) {
|
||||
this->Clear();
|
||||
}
|
||||
/*! \brief clear the statistics */
|
||||
inline void Clear(void) {
|
||||
neg_grad = pos_grad = sum_hess = 0.0f;
|
||||
}
|
||||
// accumulate statistics
|
||||
inline void Add(const std::vector<bst_gpair> &gpair,
|
||||
const BoosterInfo &info,
|
||||
bst_uint ridx) {
|
||||
const bst_gpair &b = gpair[ridx];
|
||||
if (b.grad >= 0.0f) {
|
||||
pos_grad += b.grad;
|
||||
} else {
|
||||
neg_grad -= b.grad;
|
||||
}
|
||||
sum_hess += b.hess;
|
||||
}
|
||||
/*! \brief calculate gain of the solution */
|
||||
inline double CalcGain(const TrainParam ¶m) const {
|
||||
return param.CalcGain(pos_grad - neg_grad, sum_hess);
|
||||
}
|
||||
/*! \brief set current value to a - b */
|
||||
inline void SetSubstract(const SKStats &a, const SKStats &b) {
|
||||
pos_grad = a.pos_grad - b.pos_grad;
|
||||
neg_grad = a.neg_grad - b.neg_grad;
|
||||
sum_hess = a.sum_hess - b.sum_hess;
|
||||
}
|
||||
// calculate leaf weight
|
||||
inline double CalcWeight(const TrainParam ¶m) const {
|
||||
return param.CalcWeight(pos_grad - neg_grad, sum_hess);
|
||||
}
|
||||
/*! \brief add statistics to the data */
|
||||
inline void Add(const SKStats &b) {
|
||||
pos_grad += b.pos_grad;
|
||||
neg_grad += b.neg_grad;
|
||||
sum_hess += b.sum_hess;
|
||||
}
|
||||
/*! \brief same as add, reduce is used in All Reduce */
|
||||
inline static void Reduce(SKStats &a, const SKStats &b) { // NOLINT(*)
|
||||
a.Add(b);
|
||||
}
|
||||
/*! \brief set leaf vector value based on statistics */
|
||||
inline void SetLeafVec(const TrainParam ¶m, bst_float *vec) const {
|
||||
}
|
||||
};
|
||||
inline void BuildSketch(const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
const RegTree &tree) {
|
||||
sketchs.resize(this->qexpand.size() * tree.param.num_feature * 3);
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
sketchs[i].Init(info.num_row, this->param.sketch_eps);
|
||||
}
|
||||
thread_sketch.resize(this->get_nthread());
|
||||
// number of rows in
|
||||
const size_t nrows = p_fmat->buffered_rowset().size();
|
||||
// start accumulating statistics
|
||||
utils::IIterator<ColBatch> *iter = p_fmat->ColIterator();
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
const ColBatch &batch = iter->Value();
|
||||
// start enumeration
|
||||
const bst_omp_uint nsize = static_cast<bst_omp_uint>(batch.size);
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint i = 0; i < nsize; ++i) {
|
||||
this->UpdateSketchCol(gpair, batch[i], tree,
|
||||
node_stats,
|
||||
batch.col_index[i],
|
||||
batch[i].length == nrows,
|
||||
&thread_sketch[omp_get_thread_num()]);
|
||||
}
|
||||
}
|
||||
// setup maximum size
|
||||
unsigned max_size = param.max_sketch_size();
|
||||
// synchronize sketch
|
||||
summary_array.resize(sketchs.size());
|
||||
for (size_t i = 0; i < sketchs.size(); ++i) {
|
||||
utils::WXQuantileSketch<bst_float, bst_float>::SummaryContainer out;
|
||||
sketchs[i].GetSummary(&out);
|
||||
summary_array[i].Reserve(max_size);
|
||||
summary_array[i].SetPrune(out, max_size);
|
||||
}
|
||||
size_t nbytes = WXQSketch::SummaryContainer::CalcMemCost(max_size);
|
||||
sketch_reducer.Allreduce(BeginPtr(summary_array), nbytes, summary_array.size());
|
||||
}
|
||||
// update sketch information in column fid
|
||||
inline void UpdateSketchCol(const std::vector<bst_gpair> &gpair,
|
||||
const ColBatch::Inst &c,
|
||||
const RegTree &tree,
|
||||
const std::vector<SKStats> &nstats,
|
||||
bst_uint fid,
|
||||
bool col_full,
|
||||
std::vector<SketchEntry> *p_temp) {
|
||||
if (c.length == 0) return;
|
||||
// initialize sbuilder for use
|
||||
std::vector<SketchEntry> &sbuilder = *p_temp;
|
||||
sbuilder.resize(tree.param.num_nodes * 3);
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const unsigned nid = this->qexpand[i];
|
||||
const unsigned wid = this->node2workindex[nid];
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
sbuilder[3 * nid + k].sum_total = 0.0f;
|
||||
sbuilder[3 * nid + k].sketch = &sketchs[(wid * tree.param.num_feature + fid) * 3 + k];
|
||||
}
|
||||
}
|
||||
if (!col_full) {
|
||||
for (bst_uint j = 0; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
const bst_gpair &e = gpair[ridx];
|
||||
if (e.grad >= 0.0f) {
|
||||
sbuilder[3 * nid + 0].sum_total += e.grad;
|
||||
} else {
|
||||
sbuilder[3 * nid + 1].sum_total -= e.grad;
|
||||
}
|
||||
sbuilder[3 * nid + 2].sum_total += e.hess;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const unsigned nid = this->qexpand[i];
|
||||
sbuilder[3 * nid + 0].sum_total = static_cast<bst_float>(nstats[nid].pos_grad);
|
||||
sbuilder[3 * nid + 1].sum_total = static_cast<bst_float>(nstats[nid].neg_grad);
|
||||
sbuilder[3 * nid + 2].sum_total = static_cast<bst_float>(nstats[nid].sum_hess);
|
||||
}
|
||||
}
|
||||
// if only one value, no need to do second pass
|
||||
if (c[0].fvalue == c[c.length-1].fvalue) {
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
sbuilder[3 * nid + k].sketch->Push(c[0].fvalue,
|
||||
static_cast<bst_float>(
|
||||
sbuilder[3 * nid + k].sum_total));
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
// two pass scan
|
||||
unsigned max_size = param.max_sketch_size();
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
sbuilder[3 * nid + k].Init(max_size);
|
||||
}
|
||||
}
|
||||
// second pass, build the sketch
|
||||
for (bst_uint j = 0; j < c.length; ++j) {
|
||||
const bst_uint ridx = c[j].index;
|
||||
const int nid = this->position[ridx];
|
||||
if (nid >= 0) {
|
||||
const bst_gpair &e = gpair[ridx];
|
||||
if (e.grad >= 0.0f) {
|
||||
sbuilder[3 * nid + 0].Push(c[j].fvalue, e.grad, max_size);
|
||||
} else {
|
||||
sbuilder[3 * nid + 1].Push(c[j].fvalue, -e.grad, max_size);
|
||||
}
|
||||
sbuilder[3 * nid + 2].Push(c[j].fvalue, e.hess, max_size);
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < this->qexpand.size(); ++i) {
|
||||
const int nid = this->qexpand[i];
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
sbuilder[3 * nid + k].Finalize(max_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
inline void SyncNodeStats(void) {
|
||||
utils::Assert(qexpand.size() != 0, "qexpand must not be empty");
|
||||
std::vector<SKStats> tmp(qexpand.size());
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
tmp[i] = node_stats[qexpand[i]];
|
||||
}
|
||||
stats_reducer.Allreduce(BeginPtr(tmp), tmp.size());
|
||||
for (size_t i = 0; i < qexpand.size(); ++i) {
|
||||
node_stats[qexpand[i]] = tmp[i];
|
||||
}
|
||||
}
|
||||
inline void FindSplit(int depth,
|
||||
const std::vector<bst_gpair> &gpair,
|
||||
IFMatrix *p_fmat,
|
||||
const BoosterInfo &info,
|
||||
RegTree *p_tree) {
|
||||
const bst_uint num_feature = p_tree->param.num_feature;
|
||||
// get the best split condition for each node
|
||||
std::vector<SplitEntry> sol(qexpand.size());
|
||||
bst_omp_uint nexpand = static_cast<bst_omp_uint>(qexpand.size());
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint wid = 0; wid < nexpand; ++wid) {
|
||||
const int nid = qexpand[wid];
|
||||
utils::Assert(node2workindex[nid] == static_cast<int>(wid),
|
||||
"node2workindex inconsistent");
|
||||
SplitEntry &best = sol[wid];
|
||||
for (bst_uint fid = 0; fid < num_feature; ++fid) {
|
||||
unsigned base = (wid * p_tree->param.num_feature + fid) * 3;
|
||||
EnumerateSplit(summary_array[base + 0],
|
||||
summary_array[base + 1],
|
||||
summary_array[base + 2],
|
||||
node_stats[nid], fid, &best);
|
||||
}
|
||||
}
|
||||
// get the best result, we can synchronize the solution
|
||||
for (bst_omp_uint wid = 0; wid < nexpand; ++wid) {
|
||||
const int nid = qexpand[wid];
|
||||
const SplitEntry &best = sol[wid];
|
||||
// set up the values
|
||||
p_tree->stat(nid).loss_chg = best.loss_chg;
|
||||
this->SetStats(nid, node_stats[nid], p_tree);
|
||||
// now we know the solution in snode[nid], set split
|
||||
if (best.loss_chg > rt_eps) {
|
||||
p_tree->AddChilds(nid);
|
||||
(*p_tree)[nid].set_split(best.split_index(),
|
||||
best.split_value, best.default_left());
|
||||
// mark right child as 0, to indicate fresh leaf
|
||||
(*p_tree)[(*p_tree)[nid].cleft()].set_leaf(0.0f, 0);
|
||||
(*p_tree)[(*p_tree)[nid].cright()].set_leaf(0.0f, 0);
|
||||
} else {
|
||||
(*p_tree)[nid].set_leaf(p_tree->stat(nid).base_weight * param.learning_rate);
|
||||
}
|
||||
}
|
||||
}
|
||||
// set statistics on ptree
|
||||
inline void SetStats(int nid, const SKStats &node_sum, RegTree *p_tree) {
|
||||
p_tree->stat(nid).base_weight = static_cast<float>(node_sum.CalcWeight(param));
|
||||
p_tree->stat(nid).sum_hess = static_cast<float>(node_sum.sum_hess);
|
||||
node_sum.SetLeafVec(param, p_tree->leafvec(nid));
|
||||
}
|
||||
inline void EnumerateSplit(const WXQSketch::Summary &pos_grad,
|
||||
const WXQSketch::Summary &neg_grad,
|
||||
const WXQSketch::Summary &sum_hess,
|
||||
const SKStats &node_sum,
|
||||
bst_uint fid,
|
||||
SplitEntry *best) {
|
||||
if (sum_hess.size == 0) return;
|
||||
double root_gain = node_sum.CalcGain(param);
|
||||
std::vector<bst_float> fsplits;
|
||||
for (size_t i = 0; i < pos_grad.size; ++i) {
|
||||
fsplits.push_back(pos_grad.data[i].value);
|
||||
}
|
||||
for (size_t i = 0; i < neg_grad.size; ++i) {
|
||||
fsplits.push_back(neg_grad.data[i].value);
|
||||
}
|
||||
for (size_t i = 0; i < sum_hess.size; ++i) {
|
||||
fsplits.push_back(sum_hess.data[i].value);
|
||||
}
|
||||
std::sort(fsplits.begin(), fsplits.end());
|
||||
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
|
||||
// sum feature
|
||||
SKStats feat_sum;
|
||||
feat_sum.pos_grad = pos_grad.data[pos_grad.size - 1].rmax;
|
||||
feat_sum.neg_grad = neg_grad.data[neg_grad.size - 1].rmax;
|
||||
feat_sum.sum_hess = sum_hess.data[sum_hess.size - 1].rmax;
|
||||
size_t ipos = 0, ineg = 0, ihess = 0;
|
||||
for (size_t i = 1; i < fsplits.size(); ++i) {
|
||||
WXQSketch::Entry pos = pos_grad.Query(fsplits[i], ipos);
|
||||
WXQSketch::Entry neg = neg_grad.Query(fsplits[i], ineg);
|
||||
WXQSketch::Entry hess = sum_hess.Query(fsplits[i], ihess);
|
||||
SKStats s, c;
|
||||
s.pos_grad = 0.5f * (pos.rmin + pos.rmax - pos.wmin);
|
||||
s.neg_grad = 0.5f * (neg.rmin + neg.rmax - neg.wmin);
|
||||
s.sum_hess = 0.5f * (hess.rmin + hess.rmax - hess.wmin);
|
||||
c.SetSubstract(node_sum, s);
|
||||
// forward
|
||||
if (s.sum_hess >= param.min_child_weight &&
|
||||
c.sum_hess >= param.min_child_weight) {
|
||||
double loss_chg = s.CalcGain(param) + c.CalcGain(param) - root_gain;
|
||||
best->Update(static_cast<bst_float>(loss_chg), fid, fsplits[i], false);
|
||||
}
|
||||
// backward
|
||||
c.SetSubstract(feat_sum, s);
|
||||
s.SetSubstract(node_sum, c);
|
||||
if (s.sum_hess >= param.min_child_weight &&
|
||||
c.sum_hess >= param.min_child_weight) {
|
||||
double loss_chg = s.CalcGain(param) + c.CalcGain(param) - root_gain;
|
||||
best->Update(static_cast<bst_float>(loss_chg), fid, fsplits[i], true);
|
||||
}
|
||||
}
|
||||
{
|
||||
// all including
|
||||
SKStats s = feat_sum, c;
|
||||
c.SetSubstract(node_sum, s);
|
||||
if (s.sum_hess >= param.min_child_weight &&
|
||||
c.sum_hess >= param.min_child_weight) {
|
||||
bst_float cpt = fsplits.back();
|
||||
double loss_chg = s.CalcGain(param) + c.CalcGain(param) - root_gain;
|
||||
best->Update(static_cast<bst_float>(loss_chg), fid, cpt + fabsf(cpt) + 1.0f, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// thread temp data
|
||||
// used to hold temporal sketch
|
||||
std::vector< std::vector<SketchEntry> > thread_sketch;
|
||||
// used to hold statistics
|
||||
std::vector< std::vector<SKStats> > thread_stats;
|
||||
// node statistics
|
||||
std::vector<SKStats> node_stats;
|
||||
// summary array
|
||||
std::vector<WXQSketch::SummaryContainer> summary_array;
|
||||
// reducer for summary
|
||||
rabit::Reducer<SKStats, SKStats::Reduce> stats_reducer;
|
||||
// reducer for summary
|
||||
rabit::SerializeReducer<WXQSketch::SummaryContainer> sketch_reducer;
|
||||
// per node, per feature sketch
|
||||
std::vector< utils::WXQuantileSketch<bst_float, bst_float> > sketchs;
|
||||
};
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_TREE_UPDATER_SKMAKER_INL_HPP_
|
||||
Reference in New Issue
Block a user