Refactor linear modelling and add new coordinate descent updater (#3103)

* Refactor linear modelling and add new coordinate descent updater

* Allow unsorted column iterator

* Add prediction cacheing to gblinear
This commit is contained in:
Rory Mitchell
2018-02-17 09:17:01 +13:00
committed by GitHub
parent 9ffe8596f2
commit 10eb05a63a
23 changed files with 1252 additions and 271 deletions

View File

@@ -0,0 +1,69 @@
#pylint: skip-file
import sys, argparse
import xgboost as xgb
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import time
import ast
rng = np.random.RandomState(1994)
def run_benchmark(args):
try:
dtest = xgb.DMatrix('dtest.dm')
dtrain = xgb.DMatrix('dtrain.dm')
if not (dtest.num_col() == args.columns \
and dtrain.num_col() == args.columns):
raise ValueError("Wrong cols")
if not (dtest.num_row() == args.rows * args.test_size \
and dtrain.num_row() == args.rows * (1-args.test_size)):
raise ValueError("Wrong rows")
except:
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size))
tmp = time.time()
X, y = make_classification(args.rows, n_features=args.columns, n_redundant=0, n_informative=args.columns, n_repeated=0, random_state=7)
if args.sparsity < 1.0:
X = np.array([[np.nan if rng.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=7)
print ("Generate Time: %s seconds" % (str(time.time() - tmp)))
tmp = time.time()
print ("DMatrix Start")
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test, nthread=-1)
print ("DMatrix Time: %s seconds" % (str(time.time() - tmp)))
dtest.save_binary('dtest.dm')
dtrain.save_binary('dtrain.dm')
param = {'objective': 'binary:logistic','booster':'gblinear'}
if args.params is not '':
param.update(ast.literal_eval(args.params))
param['updater'] = args.updater
print("Training with '%s'" % param['updater'])
tmp = time.time()
xgb.train(param, dtrain, args.iterations, evals=[(dtrain,"train")], early_stopping_rounds = args.columns)
print ("Train Time: %s seconds" % (str(time.time() - tmp)))
parser = argparse.ArgumentParser()
parser.add_argument('--updater', default='coord_descent')
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--lambda', type=float, default=1.0)
parser.add_argument('--tol', type=float, default=1e-5)
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--iterations', type=int, default=10000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--test_size', type=float, default=0.25)
parser.add_argument('--standardise', type=bool, default=False)
parser.add_argument('--params', default='', help='Provide additional parameters as a Python dict string, e.g. --params \"{\'max_depth\':2}\"')
args = parser.parse_args()
run_benchmark(args)

View File

@@ -42,11 +42,18 @@ TEST(SimpleDMatrix, ColAccessWithoutBatches) {
xgboost::DMatrix * dmat = xgboost::DMatrix::Load(tmp_file, true, false);
std::remove(tmp_file.c_str());
EXPECT_EQ(dmat->HaveColAccess(), false);
// Unsorted column access
const std::vector<bool> enable(dmat->info().num_col, true);
dmat->InitColAccess(enable, 1, dmat->info().num_row);
dmat->InitColAccess(enable, 0, 0); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(), true);
EXPECT_EQ(dmat->HaveColAccess(false), false);
dmat->InitColAccess(enable, 1, dmat->info().num_row, false);
dmat->InitColAccess(enable, 0, 0, false); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(false), true);
// Sorted column access
EXPECT_EQ(dmat->HaveColAccess(true), false);
dmat->InitColAccess(enable, 1, dmat->info().num_row, true);
dmat->InitColAccess(enable, 0, 0, true); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(true), true);
EXPECT_EQ(dmat->GetColSize(0), 2);
EXPECT_EQ(dmat->GetColSize(1), 1);
@@ -86,11 +93,18 @@ TEST(SimpleDMatrix, ColAccessWithBatches) {
xgboost::DMatrix * dmat = xgboost::DMatrix::Load(tmp_file, true, false);
std::remove(tmp_file.c_str());
EXPECT_EQ(dmat->HaveColAccess(), false);
// Unsorted column access
const std::vector<bool> enable(dmat->info().num_col, true);
dmat->InitColAccess(enable, 1, 1); // Max 1 row per patch
dmat->InitColAccess(enable, 0, 0); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(), true);
EXPECT_EQ(dmat->HaveColAccess(false), false);
dmat->InitColAccess(enable, 1, 1, false);
dmat->InitColAccess(enable, 0, 0, false); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(false), true);
// Sorted column access
EXPECT_EQ(dmat->HaveColAccess(true), false);
dmat->InitColAccess(enable, 1, 1, true); // Max 1 row per patch
dmat->InitColAccess(enable, 0, 0, true); // Calling it again should not change it
ASSERT_EQ(dmat->HaveColAccess(true), true);
EXPECT_EQ(dmat->GetColSize(0), 2);
EXPECT_EQ(dmat->GetColSize(1), 1);

View File

@@ -56,10 +56,10 @@ TEST(SparsePageDMatrix, ColAcess) {
std::remove(tmp_file.c_str());
EXPECT_FALSE(FileExists(tmp_file + ".cache.col.page"));
EXPECT_EQ(dmat->HaveColAccess(), false);
EXPECT_EQ(dmat->HaveColAccess(true), false);
const std::vector<bool> enable(dmat->info().num_col, true);
dmat->InitColAccess(enable, 1, 1); // Max 1 row per patch
ASSERT_EQ(dmat->HaveColAccess(), true);
dmat->InitColAccess(enable, 1, 1, true); // Max 1 row per patch
ASSERT_EQ(dmat->HaveColAccess(true), true);
EXPECT_TRUE(FileExists(tmp_file + ".cache.col.page"));
EXPECT_EQ(dmat->GetColSize(0), 2);

View File

@@ -0,0 +1,44 @@
// Copyright by Contributors
#include <xgboost/linear_updater.h>
#include "../helpers.h"
#include "xgboost/gbm.h"
typedef std::pair<std::string, std::string> arg;
TEST(Linear, shotgun) {
typedef std::pair<std::string, std::string> arg;
auto mat = CreateDMatrix(10, 10, 0);
std::vector<bool> enabled(mat->info().num_col, true);
mat->InitColAccess(enabled, 1.0f, 1 << 16, false);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("shotgun"));
updater->Init({});
std::vector<xgboost::bst_gpair> gpair(mat->info().num_row,
xgboost::bst_gpair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = mat->info().num_col;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, mat.get(), &model, gpair.size());
ASSERT_EQ(model.bias()[0], 5.0f);
}
TEST(Linear, coordinate) {
typedef std::pair<std::string, std::string> arg;
auto mat = CreateDMatrix(10, 10, 0);
std::vector<bool> enabled(mat->info().num_col, true);
mat->InitColAccess(enabled, 1.0f, 1 << 16, false);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("coord_descent"));
updater->Init({});
std::vector<xgboost::bst_gpair> gpair(mat->info().num_row,
xgboost::bst_gpair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = mat->info().num_col;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, mat.get(), &model, gpair.size());
ASSERT_EQ(model.bias()[0], 5.0f);
}

133
tests/python/test_linear.py Normal file
View File

@@ -0,0 +1,133 @@
from __future__ import print_function
import itertools as it
import numpy as np
import sys
import testing as tm
import unittest
import xgboost as xgb
rng = np.random.RandomState(199)
num_rounds = 1000
def is_float(s):
try:
float(s)
return 1
except ValueError:
return 0
def xgb_get_weights(bst):
return [float(s) for s in bst.get_dump()[0].split() if is_float(s)]
# Check gradient/subgradient = 0
def check_least_squares_solution(X, y, pred, tol, reg_alpha, reg_lambda, weights):
reg_alpha = reg_alpha * len(y)
reg_lambda = reg_lambda * len(y)
r = np.subtract(y, pred)
g = X.T.dot(r)
g = np.subtract(g, np.multiply(reg_lambda, weights))
for i in range(0, len(weights)):
if weights[i] == 0.0:
assert abs(g[i]) <= reg_alpha
else:
assert np.isclose(g[i], np.sign(weights[i]) * reg_alpha, rtol=tol, atol=tol)
def train_diabetes(param_in):
from sklearn import datasets
data = datasets.load_diabetes()
dtrain = xgb.DMatrix(data.data, label=data.target)
param = {}
param.update(param_in)
bst = xgb.train(param, dtrain, num_rounds)
xgb_pred = bst.predict(dtrain)
check_least_squares_solution(data.data, data.target, xgb_pred, 1e-2, param['alpha'], param['lambda'],
xgb_get_weights(bst)[1:])
def train_breast_cancer(param_in):
from sklearn import metrics, datasets
data = datasets.load_breast_cancer()
dtrain = xgb.DMatrix(data.data, label=data.target)
param = {'objective': 'binary:logistic'}
param.update(param_in)
bst = xgb.train(param, dtrain, num_rounds)
xgb_pred = bst.predict(dtrain)
xgb_score = metrics.accuracy_score(data.target, np.round(xgb_pred))
assert xgb_score >= 0.8
def train_classification(param_in):
from sklearn import metrics, datasets
X, y = datasets.make_classification(random_state=rng,
scale=100) # Scale is necessary otherwise regularisation parameters will force all coefficients to 0
dtrain = xgb.DMatrix(X, label=y)
param = {'objective': 'binary:logistic'}
param.update(param_in)
bst = xgb.train(param, dtrain, num_rounds)
xgb_pred = bst.predict(dtrain)
xgb_score = metrics.accuracy_score(y, np.round(xgb_pred))
assert xgb_score >= 0.8
def train_classification_multi(param_in):
from sklearn import metrics, datasets
num_class = 3
X, y = datasets.make_classification(n_samples=10, random_state=rng, scale=100, n_classes=num_class, n_informative=4,
n_features=4, n_redundant=0)
dtrain = xgb.DMatrix(X, label=y)
param = {'objective': 'multi:softmax', 'num_class': num_class}
param.update(param_in)
bst = xgb.train(param, dtrain, num_rounds)
xgb_pred = bst.predict(dtrain)
xgb_score = metrics.accuracy_score(y, np.round(xgb_pred))
assert xgb_score >= 0.50
def train_boston(param_in):
from sklearn import datasets
data = datasets.load_boston()
dtrain = xgb.DMatrix(data.data, label=data.target)
param = {}
param.update(param_in)
bst = xgb.train(param, dtrain, num_rounds)
xgb_pred = bst.predict(dtrain)
check_least_squares_solution(data.data, data.target, xgb_pred, 1e-2, param['alpha'], param['lambda'],
xgb_get_weights(bst)[1:])
# Enumerates all permutations of variable parameters
def assert_updater_accuracy(linear_updater, variable_param):
param = {'booster': 'gblinear', 'updater': linear_updater, 'tolerance': 1e-8}
names = sorted(variable_param)
combinations = it.product(*(variable_param[Name] for Name in names))
for set in combinations:
param_tmp = param.copy()
for i, name in enumerate(names):
param_tmp[name] = set[i]
print(param_tmp, file=sys.stderr)
train_boston(param_tmp)
train_diabetes(param_tmp)
train_classification(param_tmp)
train_classification_multi(param_tmp)
train_breast_cancer(param_tmp)
class TestLinear(unittest.TestCase):
def test_coordinate(self):
tm._skip_if_no_sklearn()
variable_param = {'alpha': [1.0, 5.0], 'lambda': [1.0, 5.0],
'coordinate_selection': ['cyclic', 'random', 'greedy']}
assert_updater_accuracy('coord_descent', variable_param)
def test_shotgun(self):
tm._skip_if_no_sklearn()
variable_param = {'alpha': [1.0, 5.0], 'lambda': [1.0, 5.0]}
assert_updater_accuracy('shotgun', variable_param)