Compare commits

...

15 Commits

Author SHA1 Message Date
Hyunsu Cho
ea6b117a57 [jvm-packages] [CI] Publish XGBoost4J JARs with Scala 2.11 and 2.12 2020-04-15 08:39:36 -07:00
Hyunsu Cho
1830a5c5cb Add link to file listing 2020-04-14 16:20:53 -07:00
Hyunsu Cho
163149cb10 Document how to install from xgboost-maven-repo 2020-04-14 16:11:23 -07:00
Hyunsu Cho
40b4a45770 [CI] Deploy SNAPSHOT JARs to S3 bucket 2020-04-14 14:52:32 -07:00
Hyunsu Cho
d83db4844b Support 32-bit Solaris target for R package 2020-03-24 17:10:48 -07:00
Hyunsu Cho
3550b16a34 Don't use memset to set struct when compiling for R 2020-03-21 21:47:51 -07:00
Hyunsu Cho
917b0a7b46 Bump version 2020-03-04 00:39:03 +00:00
Jiaming Yuan
58ebbab979 Define lazy isinstance for Python compat. (#5364) (#5369)
* Avoid importing datatable.
* Fix #5363.
2020-02-26 20:39:38 +08:00
Jiaming Yuan
2bc5d8d449 Restore loading model from buffer. (#5360) (#5366) 2020-02-26 14:23:10 +08:00
Philip Hyunsu Cho
7d178cbd25 Fix a small typo in sklearn.py that broke multiple eval metrics (#5341) 2020-02-22 19:04:48 +08:00
Hyunsu Cho
74e2f652de Enforce only major version in JSON model schema 2020-02-21 07:57:45 +00:00
Hyunsu Cho
e02fff53f2 Change version_config.h too 2020-02-21 07:50:41 +00:00
Hyunsu Cho
fcb2efbadd Fix a unit test that mistook MINOR ver for PATCH ver 2020-02-21 07:11:59 +00:00
Hyunsu Cho
f4621f09c7 Release 1.0.1 to add #5330 2020-02-20 22:56:32 -08:00
Philip Hyunsu Cho
bf1b2cbfa2 Remove f-string, since it's not supported by Python 3.5 (#5330)
* Remove f-string, since it's not supported by Python 3.5

* Add Python 3.5 to CI, to ensure compatibility

* Remove duplicated matplotlib

* Show deprecation notice for Python 3.5

* Fix lint

* Fix lint
2020-02-20 22:47:05 -08:00
25 changed files with 346 additions and 172 deletions

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.12)
project(xgboost LANGUAGES CXX C VERSION 1.0.0)
project(xgboost LANGUAGES CXX C VERSION 1.0.2)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)

27
Jenkinsfile vendored
View File

@@ -95,6 +95,17 @@ pipeline {
milestone ordinal: 4
}
}
stage('Jenkins Linux: Deploy') {
agent none
steps {
script {
parallel ([
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') }
])
}
milestone ordinal: 5
}
}
}
}
@@ -273,6 +284,7 @@ def TestPythonCPU() {
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
"""
deleteDir()
}
@@ -379,3 +391,18 @@ def TestR(args) {
deleteDir()
}
}
def DeployJVMPackages(args) {
node('linux && cpu') {
unstash name: 'srcs'
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
echo 'Deploying to xgboost-maven-repo S3 repo...'
def container_type = "jvm"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
"""
}
deleteDir()
}
}

View File

@@ -7,8 +7,8 @@ require(vcd, quietly = TRUE)
float_tolerance = 5e-6
# disable some tests for Win32
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
# disable some tests for 32-bit environment
flag_32bit = .Machine$sizeof.pointer != 8
set.seed(1982)
data(Arthritis)
@@ -44,7 +44,7 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
test_that("xgb.dump works", {
if (!win32_flag)
if (!flag_32bit)
expect_length(xgb.dump(bst.Tree), 200)
dump_file = file.path(tempdir(), 'xgb.model.dump')
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
@@ -54,7 +54,7 @@ test_that("xgb.dump works", {
# JSON format
dmp <- xgb.dump(bst.Tree, dump_format = "json")
expect_length(dmp, 1)
if (!win32_flag)
if (!flag_32bit)
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
})
@@ -256,7 +256,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
expect_equal(names.dt.trees, names(dt.tree))
if (!win32_flag)
if (!flag_32bit)
expect_equal(dim(dt.tree), c(188, 10))
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
@@ -283,7 +283,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
test_that("xgb.importance works with and without feature names", {
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
if (!win32_flag)
if (!flag_32bit)
expect_equal(dim(importance.Tree), c(7, 4))
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')

View File

@@ -8,15 +8,143 @@ XGBoost JVM Package
<img alt="Build Status" src="https://travis-ci.org/dmlc/xgboost.svg?branch=master">
</a>
<a href="https://github.com/dmlc/xgboost/blob/master/LICENSE">
<img alt="GitHub license" src="http://dmlc.github.io/img/apache2.svg">
<img alt="GitHub license" src="https://dmlc.github.io/img/apache2.svg">
</a>
You have found the XGBoost JVM Package!
.. _install_jvm_packages:
************
Installation
************
.. contents::
:local:
:backlinks: none
Installation from Maven repository
==================================
Access release version
----------------------
You can use XGBoost4J in your Java/Scala application by adding XGBoost4J as a dependency:
.. code-block:: xml
:caption: Maven
<properties>
...
<!-- Specify Scala version in package name -->
<scala.binary.version>2.12</scala.binary.version>
</properties>
<dependencies>
...
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
<version>latest_version_num</version>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
<version>latest_version_num</version>
</dependency>
</dependencies>
.. code-block:: scala
:caption: sbt
libraryDependencies ++= Seq(
"ml.dmlc" %% "xgboost4j" % "latest_version_num",
"ml.dmlc" %% "xgboost4j-spark" % "latest_version_num"
)
This will check out the latest stable version from the Maven Central.
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
.. note:: Using Maven repository hosted by the XGBoost project
There may be some delay until a new release becomes available to Maven Central. If you would like to access the latest release immediately, add the Maven repository hosted by the XGBoost project:
.. code-block:: xml
:caption: Maven
<repository>
<id>XGBoost4J Release Repo</id>
<name>XGBoost4J Release Repo</name>
<url>https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/release/</url>
</repository>
.. code-block:: scala
:caption: sbt
resolvers += "XGBoost4J Release Repo" at "https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/release/"
Access SNAPSHOT version
-----------------------
First add the following Maven repository hosted by the XGBoost project:
.. code-block:: xml
:caption: Maven
<repository>
<id>XGBoost4J Snapshot Repo</id>
<name>XGBoost4J Snapshot Repo</name>
<url>https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/snapshot/</url>
</repository>
.. code-block:: scala
:caption: sbt
resolvers += "XGBoost4J Snapshot Repo" at "https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/snapshot/"
Then add XGBoost4J as a dependency:
.. code-block:: xml
:caption: maven
<properties>
...
<!-- Specify Scala version in package name -->
<scala.binary.version>2.12</scala.binary.version>
</properties>
<dependencies>
...
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
<version>latest_version_num-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
<version>latest_version_num-SNAPSHOT</version>
</dependency>
</dependencies>
.. code-block:: scala
:caption: sbt
libraryDependencies ++= Seq(
"ml.dmlc" %% "xgboost4j" % "latest_version_num-SNAPSHOT",
"ml.dmlc" %% "xgboost4j-spark" % "latest_version_num-SNAPSHOT"
)
Look up the ``version`` field in `pom.xml <https://github.com/dmlc/xgboost/blob/master/jvm-packages/pom.xml>`_ to get the correct version number.
The SNAPSHOT JARs are hosted by the XGBoost project. Every commit in the ``master`` branch will automatically trigger generation of a new SNAPSHOT JAR. You can control how often Maven should upgrade your SNAPSHOT installation by specifying ``updatePolicy``. See `here <http://maven.apache.org/pom.html#Repositories>`_ for details.
You can browse the file listing of the Maven repository at https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/list.html.
.. note:: Windows not supported by published JARs
The published JARs from the Maven Central and GitHub currently only supports Linux and MacOS. Windows users should consider building XGBoost4J / XGBoost4J-Spark from the source. Alternatively, checkout pre-built JARs from `criteo-forks/xgboost-jars <https://github.com/criteo-forks/xgboost-jars>`_.
Installation from source
========================
@@ -64,73 +192,6 @@ If you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
Installation from maven repo
============================
Access release version
----------------------
.. code-block:: xml
:caption: maven
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j</artifactId>
<version>latest_version_num</version>
</dependency>
.. code-block:: scala
:caption: sbt
"ml.dmlc" % "xgboost4j" % "latest_version_num"
This will checkout the latest stable version from the Maven Central.
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
if you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark``.
Access SNAPSHOT version
-----------------------
You need to add GitHub as repo:
.. code-block:: xml
:caption: maven
<repository>
<id>GitHub Repo</id>
<name>GitHub Repo</name>
<url>https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/</url>
</repository>
.. code-block:: scala
:caption: sbt
resolvers += "GitHub Repo" at "https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/"
Then add dependency as following:
.. code-block:: xml
:caption: maven
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j</artifactId>
<version>latest_version_num</version>
</dependency>
.. code-block:: scala
:caption: sbt
"ml.dmlc" % "xgboost4j" % "latest_version_num"
For the latest release version number, please check `here <https://github.com/CodingCat/xgboost/tree/maven-repo/ml/dmlc/xgboost4j>`_.
.. note:: Windows not supported by published JARs
The published JARs from the Maven Central and GitHub currently only supports Linux and MacOS. Windows users should consider building XGBoost4J / XGBoost4J-Spark from the source. Alternatively, checkout pre-built JARs from `criteo-forks/xgboost-jars <https://github.com/criteo-forks/xgboost-jars>`_.
Enabling OpenMP for Mac OS
--------------------------
If you are on Mac OS and using a compiler that supports OpenMP, you need to go to the file ``xgboost/jvm-packages/create_jni.py`` and comment out the line

View File

@@ -27,39 +27,7 @@ Build an ML Application with XGBoost4J-Spark
Refer to XGBoost4J-Spark Dependency
===================================
Before we go into the tour of how to use XGBoost4J-Spark, we would bring a brief introduction about how to build a machine learning application with XGBoost4J-Spark. The first thing you need to do is to refer to the dependency in Maven Central.
You can add the following dependency in your ``pom.xml``.
.. code-block:: xml
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark</artifactId>
<version>latest_version_num</version>
</dependency>
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
We also publish some functionalities which would be included in the coming release in the form of snapshot version. To access these functionalities, you can add dependency to the snapshot artifacts. We publish snapshot version in github-based repo, so you can add the following repo in ``pom.xml``:
.. code-block:: xml
<repository>
<id>XGBoost4J-Spark Snapshot Repo</id>
<name>XGBoost4J-Spark Snapshot Repo</name>
<url>https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/</url>
</repository>
and then refer to the snapshot dependency by adding:
.. code-block:: xml
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark</artifactId>
<version>next_version_num-SNAPSHOT</version>
</dependency>
Before we go into the tour of how to use XGBoost4J-Spark, you should first consult :ref:`Installation from Maven repository <install_jvm_packages>` in order to add XGBoost4J-Spark as a dependency for your project. We provide both stable releases and snapshots.
.. note:: XGBoost4J-Spark requires Apache Spark 2.4+

View File

@@ -195,12 +195,22 @@
"properties": {
"version": {
"type": "array",
"const": [
1,
0,
0
"items": [
{
"type": "number",
"const": 1
},
{
"type": "number",
"minimum": 0
},
{
"type": "number",
"minimum": 0
}
],
"additionalItems": false
"minItems": 3,
"maxItems": 3
},
"learner": {
"type": "object",

View File

@@ -6,6 +6,6 @@
#define XGBOOST_VER_MAJOR 1
#define XGBOOST_VER_MINOR 0
#define XGBOOST_VER_PATCH 0
#define XGBOOST_VER_PATCH 1
#endif // XGBOOST_VERSION_CONFIG_H_

View File

@@ -205,6 +205,29 @@
</plugins>
</build>
</profile>
<profile>
<id>release-to-s3</id>
<distributionManagement>
<snapshotRepository>
<id>maven-s3-snapshot-repo</id>
<url>s3://xgboost-maven-repo/snapshot</url>
</snapshotRepository>
<repository>
<id>maven-s3-release-repo</id>
<url>s3://xgboost-maven-repo/release</url>
</repository>
</distributionManagement>
<repositories>
<repository>
<id>maven-s3-snapshot-repo</id>
<url>https://s3.amazonaws.com/xgboost-maven-repo/snapshot</url>
</repository>
<repository>
<id>maven-s3-release-repo</id>
<url>https://s3.amazonaws.com/xgboost-maven-repo/release</url>
</repository>
</repositories>
</profile>
</profiles>
<distributionManagement>
<snapshotRepository>
@@ -324,6 +347,13 @@
</executions>
</plugin>
</plugins>
<extensions>
<extension>
<groupId>org.kuali.maven.wagons</groupId>
<artifactId>maven-s3-wagon</artifactId>
<version>1.2.1</version>
</extension>
</extensions>
</build>
<reporting>
<plugins>

View File

@@ -1 +1 @@
1.0.0
1.0.2

View File

@@ -5,6 +5,8 @@ Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
import os
import sys
import warnings
from .core import DMatrix, Booster
from .training import train, cv
@@ -19,6 +21,12 @@ try:
except ImportError:
pass
if sys.version_info[:2] == (3, 5):
warnings.warn(
'Python 3.5 support is deprecated; XGBoost will require Python 3.6+ in the near future. ' +
'Consider upgrading to Python 3.6+.',
FutureWarning)
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
with open(VERSION_FILE) as f:
__version__ = f.read().strip()

View File

@@ -79,6 +79,14 @@ else:
# END NUMPY PATHLIB ATTRIBUTION
###############################################################################
def lazy_isinstance(instance, module, name):
'''Use string representation to identify a type.'''
module = type(instance).__module__ == module
name = type(instance).__name__ == name
return module and name
# pandas
try:
from pandas import DataFrame, Series
@@ -95,27 +103,6 @@ except ImportError:
pandas_concat = None
PANDAS_INSTALLED = False
# dt
try:
# Workaround for #4473, compatibility with dask
if sys.__stdin__ is not None and sys.__stdin__.closed:
sys.__stdin__ = None
import datatable
if hasattr(datatable, "Frame"):
DataTable = datatable.Frame
else:
DataTable = datatable.DataTable
DT_INSTALLED = True
except ImportError:
# pylint: disable=too-few-public-methods
class DataTable(object):
""" dummy for datatable.DataTable """
DT_INSTALLED = False
# cudf
try:
from cudf import DataFrame as CUDF_DataFrame

View File

@@ -19,9 +19,9 @@ import scipy.sparse
from .compat import (
STRING_TYPES, DataFrame, MultiIndex, Int64Index, py_str,
PANDAS_INSTALLED, DataTable,
CUDF_INSTALLED, CUDF_DataFrame, CUDF_Series, CUDF_MultiIndex,
os_fspath, os_PathLike)
PANDAS_INSTALLED, CUDF_INSTALLED,
CUDF_DataFrame, CUDF_Series, CUDF_MultiIndex,
os_fspath, os_PathLike, lazy_isinstance)
from .libpath import find_lib_path
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
@@ -319,7 +319,8 @@ DT_TYPE_MAPPER2 = {'bool': 'i', 'int': 'int', 'real': 'float'}
def _maybe_dt_data(data, feature_names, feature_types,
meta=None, meta_type=None):
"""Validate feature names and types if data table"""
if not isinstance(data, DataTable):
if (not lazy_isinstance(data, 'datatable', 'Frame') and
not lazy_isinstance(data, 'datatable', 'DataTable')):
return data, feature_names, feature_types
if meta and data.shape[1] > 1:
@@ -470,7 +471,7 @@ class DMatrix(object):
self._init_from_csc(data)
elif isinstance(data, np.ndarray):
self._init_from_npy2d(data, missing, nthread)
elif isinstance(data, DataTable):
elif lazy_isinstance(data, 'datatable', 'Frame'):
self._init_from_dt(data, nthread)
elif hasattr(data, "__cuda_array_interface__"):
self._init_from_array_interface(data, missing, nthread)
@@ -1052,7 +1053,7 @@ class Booster(object):
_check_call(
_LIB.XGBoosterUnserializeFromBuffer(self.handle, ptr, length))
self.__dict__.update(state)
elif isinstance(model_file, (STRING_TYPES, os_PathLike)):
elif isinstance(model_file, (STRING_TYPES, os_PathLike, bytearray)):
self.load_model(model_file)
elif model_file is None:
pass
@@ -1512,7 +1513,8 @@ class Booster(object):
return ctypes2buffer(cptr, length.value)
def load_model(self, fname):
"""Load the model from a file, local or as URI.
"""Load the model from a file or bytearray. Path to file can be local
or as an URI.
The model is loaded from an XGBoost format which is universal among the
various XGBoost interfaces. Auxiliary attributes of the Python Booster
@@ -1530,6 +1532,12 @@ class Booster(object):
# from URL.
_check_call(_LIB.XGBoosterLoadModel(
self.handle, c_str(os_fspath(fname))))
elif isinstance(fname, bytearray):
buf = fname
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr,
length))
else:
raise TypeError('Unknown file type: ', fname)

View File

@@ -434,8 +434,8 @@ class XGBModel(XGBModelBase):
self.classes_ = np.array(v)
continue
if k == 'type' and type(self).__name__ != v:
msg = f'Current model type: {type(self).__name__}, ' + \
f'type of model in file: {v}'
msg = 'Current model type: {}, '.format(type(self).__name__) + \
'type of model in file: {}'.format(v)
raise TypeError(msg)
if k == 'type':
continue

View File

@@ -38,7 +38,7 @@ def _train_internal(params, dtrain,
_params = dict(params) if isinstance(params, list) else params
if 'num_parallel_tree' in _params and params[
if 'num_parallel_tree' in _params and _params[
'num_parallel_tree'] is not None:
num_parallel_tree = _params['num_parallel_tree']
nboost //= num_parallel_tree

View File

@@ -663,7 +663,11 @@ void GHistIndexBlockMatrix::Init(const GHistIndexMatrix& gmat,
* \brief fill a histogram by zeroes
*/
void InitilizeHistByZeroes(GHistRow hist, size_t begin, size_t end) {
#if defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
std::fill(hist.begin() + begin, hist.begin() + end, tree::GradStats());
#else // defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
memset(hist.data() + begin, '\0', (end-begin)*sizeof(tree::GradStats));
#endif // defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
}
/*!

View File

@@ -117,14 +117,16 @@ std::string LoadSequentialFile(std::string fname) {
size_t f_size_bytes = fs.st_size;
buffer.resize(f_size_bytes + 1);
int32_t fd = open(fname.c_str(), O_RDONLY);
#if defined(__linux__)
posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL);
#endif // defined(__linux__)
ssize_t bytes_read = read(fd, &buffer[0], f_size_bytes);
if (bytes_read < 0) {
close(fd);
ReadErr();
}
close(fd);
#else
#else // defined(__unix__)
FILE *f = fopen(fname.c_str(), "r");
if (f == NULL) {
std::string msg;

View File

@@ -15,15 +15,15 @@
#include "xgboost/base.h"
#include "xgboost/tree_model.h"
#if defined(XGBOOST_STRICT_R_MODE)
#if defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
#define OBSERVER_PRINT LOG(INFO)
#define OBSERVER_ENDL ""
#define OBSERVER_NEWLINE ""
#else
#else // defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
#define OBSERVER_PRINT std::cout
#define OBSERVER_ENDL std::endl
#define OBSERVER_NEWLINE "\n"
#endif // defined(XGBOOST_STRICT_R_MODE)
#endif // defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1
namespace xgboost {
/*\brief An observer for logging internal data structures.

View File

@@ -3,6 +3,7 @@ ARG CMAKE_VERSION=3.12
# Environment
ENV DEBIAN_FRONTEND noninteractive
SHELL ["/bin/bash", "-c"] # Use Bash as shell
# Install all basic requirements
RUN \
@@ -19,10 +20,16 @@ ENV PATH=/opt/python/bin:$PATH
ENV GOSU_VERSION 1.10
# Install Python packages
# Create new Conda environment with Python 3.5
RUN conda create -n py35 python=3.5 && \
source activate py35 && \
pip install numpy pytest scipy scikit-learn pandas matplotlib wheel kubernetes urllib3 graphviz && \
source deactivate
# Install Python packages in default env
RUN \
pip install pyyaml cpplint pylint astroid sphinx numpy scipy pandas matplotlib sh \
recommonmark guzzle_sphinx_theme mock breathe matplotlib graphviz \
recommonmark guzzle_sphinx_theme mock breathe graphviz \
pytest scikit-learn wheel kubernetes urllib3 jsonschema boto3 && \
pip install https://h2o-release.s3.amazonaws.com/datatable/stable/datatable-0.7.0/datatable-0.7.0-cp37-cp37m-linux_x86_64.whl && \
pip install "dask[complete]"

View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -e
set -x
if [ $# -ne 1 ]; then
echo "Usage: $0 [spark version]"
exit 1
fi
spark_version=$1
# Initialize local Maven repository
./tests/ci_build/initialize_maven.sh
rm -rf build/
cd jvm-packages
# Re-build package without Mock Rabit
# Deploy to S3 bucket xgboost-maven-repo
mvn --no-transfer-progress package deploy -P release-to-s3 -Dspark.version=${spark_version} -DskipTests
# Compile XGBoost4J with Scala 2.11 too
mvn clean
# Rename artifactId of all XGBoost4J packages with suffix _2.11
sed -i -e 's/<artifactId>xgboost\(.*\)_[0-9\.]\+/<artifactId>xgboost\1_2.11/' $(find . -name pom.xml)
# Modify scala.version and scala.binary.version fields
sed -i -e 's/<scala\.version>[0-9\.]\+/<scala.version>2.11.12/' $(find . -name pom.xml)
sed -i -e 's/<scala\.binary\.version>[0-9\.]\+/<scala.binary.version>2.11/' $(find . -name pom.xml)
# Re-build and deploy
mvn --no-transfer-progress package deploy -P release-to-s3 -Dspark.version=${spark_version} -DskipTests
set +x
set +e

View File

@@ -5,31 +5,35 @@ set -x
suite=$1
# Install XGBoost Python package
wheel_found=0
for file in python-package/dist/*.whl
do
if [ -e "${file}" ]
function install_xgboost {
wheel_found=0
for file in python-package/dist/*.whl
do
if [ -e "${file}" ]
then
pip install --user "${file}"
wheel_found=1
break # need just one
fi
done
if [ "$wheel_found" -eq 0 ]
then
pip install --user "${file}"
wheel_found=1
break # need just one
pushd .
cd python-package
python setup.py install --user
popd
fi
done
if [ "$wheel_found" -eq 0 ]
then
pushd .
cd python-package
python setup.py install --user
popd
fi
}
# Run specified test suite
case "$suite" in
gpu)
install_xgboost
pytest -v -s --fulltrace -m "not mgpu" tests/python-gpu
;;
mgpu)
install_xgboost
pytest -v -s --fulltrace -m "mgpu" tests/python-gpu
cd tests/distributed
./runtests-gpu.sh
@@ -39,17 +43,25 @@ case "$suite" in
cudf)
source activate cudf_test
install_xgboost
pytest -v -s --fulltrace -m "not mgpu" tests/python-gpu/test_from_columnar.py tests/python-gpu/test_from_cupy.py
;;
cpu)
install_xgboost
pytest -v -s --fulltrace tests/python
cd tests/distributed
./runtests.sh
;;
cpu-py35)
source activate py35
install_xgboost
pytest -v -s --fulltrace tests/python
;;
*)
echo "Usage: $0 {gpu|mgpu|cudf|cpu}"
echo "Usage: $0 {gpu|mgpu|cudf|cpu|cpu-py35}"
exit 1
;;
esac

View File

@@ -54,7 +54,7 @@ TEST(Version, Basic) {
ptr = 0;
v = std::stoi(str, &ptr);
ASSERT_EQ(v, XGBOOST_VER_MINOR) << "patch: " << v;;
ASSERT_EQ(v, XGBOOST_VER_PATCH) << "patch: " << v;;
str = str.substr(ptr);
ASSERT_EQ(str.size(), 0);

View File

@@ -35,6 +35,11 @@ def captured_output():
class TestBasic(unittest.TestCase):
def test_compat(self):
from xgboost.compat import lazy_isinstance
a = np.array([1, 2, 3])
assert lazy_isinstance(a, 'numpy', 'ndarray')
assert not lazy_isinstance(a, 'numpy', 'dataframe')
def test_basic(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')

View File

@@ -300,6 +300,13 @@ class TestModels(unittest.TestCase):
assert float(config['learner']['objective'][
'reg_loss_param']['scale_pos_weight']) == 0.5
buf = bst.save_raw()
from_raw = xgb.Booster()
from_raw.load_model(buf)
buf_from_raw = from_raw.save_raw()
assert buf == buf_from_raw
def test_model_json_io(self):
loc = locale.getpreferredencoding(False)
model_path = 'test_model_json_io.json'

View File

@@ -34,7 +34,8 @@ def test_binary_classification():
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for cls in (xgb.XGBClassifier, xgb.XGBRFClassifier):
for train_index, test_index in kf.split(X, y):
xgb_model = cls(random_state=42).fit(X[train_index], y[train_index])
clf = cls(random_state=42)
xgb_model = clf.fit(X[train_index], y[train_index], eval_metric=['auc', 'logloss'])
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
err = sum(1 for i in range(len(preds))

View File

@@ -1,5 +1,5 @@
# coding: utf-8
from xgboost.compat import SKLEARN_INSTALLED, PANDAS_INSTALLED, DT_INSTALLED
from xgboost.compat import SKLEARN_INSTALLED, PANDAS_INSTALLED
from xgboost.compat import CUDF_INSTALLED, DASK_INSTALLED
@@ -19,7 +19,9 @@ def no_pandas():
def no_dt():
return {'condition': not DT_INSTALLED,
import importlib.util
spec = importlib.util.find_spec('datatable')
return {'condition': spec is None,
'reason': 'Datatable is not installed.'}