Compare commits
11 Commits
release_1.
...
v1.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
738786680b | ||
|
|
04232c01b2 | ||
|
|
0353a78ab7 | ||
|
|
0089a0e6bf | ||
|
|
03a68a1714 | ||
|
|
a0da8a7e0a | ||
|
|
eee4eff49b | ||
|
|
936a854baa | ||
|
|
7856da5827 | ||
|
|
50a0def6c3 | ||
|
|
9116a0ec10 |
11
Jenkinsfile
vendored
11
Jenkinsfile
vendored
@@ -92,7 +92,7 @@ pipeline {
|
|||||||
'test-python-gpu-cuda10.2': { TestPythonGPU(host_cuda_version: '10.2') },
|
'test-python-gpu-cuda10.2': { TestPythonGPU(host_cuda_version: '10.2') },
|
||||||
'test-python-gpu-cuda11.0-cross': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '11.0') },
|
'test-python-gpu-cuda11.0-cross': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '11.0') },
|
||||||
'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
||||||
'test-python-mgpu-cuda10.2': { TestPythonGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2', multi_gpu: true) },
|
'test-python-mgpu-cuda10.2': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.2', multi_gpu: true) },
|
||||||
'test-cpp-gpu-cuda10.2': { TestCppGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2') },
|
'test-cpp-gpu-cuda10.2': { TestCppGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2') },
|
||||||
'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
||||||
'test-jvm-jdk8-cuda10.0': { CrossTestJVMwithJDKGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.0') },
|
'test-jvm-jdk8-cuda10.0': { CrossTestJVMwithJDKGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.0') },
|
||||||
@@ -285,7 +285,7 @@ def BuildCUDA(args) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
def BuildJVMPackagesWithCUDA(args) {
|
def BuildJVMPackagesWithCUDA(args) {
|
||||||
node('linux && gpu') {
|
node('linux && mgpu') {
|
||||||
unstash name: 'srcs'
|
unstash name: 'srcs'
|
||||||
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
||||||
def container_type = "jvm_gpu_build"
|
def container_type = "jvm_gpu_build"
|
||||||
@@ -472,10 +472,11 @@ def DeployJVMPackages(args) {
|
|||||||
unstash name: 'srcs'
|
unstash name: 'srcs'
|
||||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||||
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
||||||
def container_type = "jvm"
|
|
||||||
def docker_binary = "docker"
|
|
||||||
sh """
|
sh """
|
||||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
|
${dockerRun} jvm docker tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 0
|
||||||
|
"""
|
||||||
|
sh """
|
||||||
|
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION=10.0 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 1
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
deleteDir()
|
deleteDir()
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@-SNAPSHOT
|
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost-jvm_2.12</artifactId>
|
<artifactId>xgboost-jvm_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>XGBoost JVM Package</name>
|
<name>XGBoost JVM Package</name>
|
||||||
<description>JVM Package for XGBoost</description>
|
<description>JVM Package for XGBoost</description>
|
||||||
|
|||||||
@@ -6,10 +6,10 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost-jvm_2.12</artifactId>
|
<artifactId>xgboost-jvm_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>xgboost4j-example_2.12</artifactId>
|
<artifactId>xgboost4j-example_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
@@ -26,7 +26,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
|
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.spark</groupId>
|
<groupId>org.apache.spark</groupId>
|
||||||
@@ -37,7 +37,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost4j-flink_${scala.binary.version}</artifactId>
|
<artifactId>xgboost4j-flink_${scala.binary.version}</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
|
|||||||
@@ -6,10 +6,10 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost-jvm_2.12</artifactId>
|
<artifactId>xgboost-jvm_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>xgboost4j-flink_2.12</artifactId>
|
<artifactId>xgboost4j-flink_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
@@ -26,7 +26,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost-jvm_2.12</artifactId>
|
<artifactId>xgboost-jvm_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>xgboost4j-spark_2.12</artifactId>
|
<artifactId>xgboost4j-spark_2.12</artifactId>
|
||||||
<build>
|
<build>
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.spark</groupId>
|
<groupId>org.apache.spark</groupId>
|
||||||
|
|||||||
@@ -6,10 +6,10 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>ml.dmlc</groupId>
|
<groupId>ml.dmlc</groupId>
|
||||||
<artifactId>xgboost-jvm_2.12</artifactId>
|
<artifactId>xgboost-jvm_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
</parent>
|
</parent>
|
||||||
<artifactId>xgboost4j_2.12</artifactId>
|
<artifactId>xgboost4j_2.12</artifactId>
|
||||||
<version>1.2.0-SNAPSHOT</version>
|
<version>1.2.0</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
1.2.0-SNAPSHOT
|
1.2.0
|
||||||
|
|||||||
@@ -738,7 +738,8 @@ async def _predict_async(client: Client, model, data, *args,
|
|||||||
predt = booster.predict(data=local_x,
|
predt = booster.predict(data=local_x,
|
||||||
validate_features=local_x.num_row() != 0,
|
validate_features=local_x.num_row() != 0,
|
||||||
*args)
|
*args)
|
||||||
ret = (delayed(predt), order)
|
columns = 1 if len(predt.shape) == 1 else predt.shape[1]
|
||||||
|
ret = ((delayed(predt), columns), order)
|
||||||
predictions.append(ret)
|
predictions.append(ret)
|
||||||
return predictions
|
return predictions
|
||||||
|
|
||||||
@@ -775,7 +776,9 @@ async def _predict_async(client: Client, model, data, *args,
|
|||||||
# See https://docs.dask.org/en/latest/array-creation.html
|
# See https://docs.dask.org/en/latest/array-creation.html
|
||||||
arrays = []
|
arrays = []
|
||||||
for i, shape in enumerate(shapes):
|
for i, shape in enumerate(shapes):
|
||||||
arrays.append(da.from_delayed(results[i], shape=(shape[0], ),
|
arrays.append(da.from_delayed(
|
||||||
|
results[i][0], shape=(shape[0],)
|
||||||
|
if results[i][1] == 1 else (shape[0], results[i][1]),
|
||||||
dtype=numpy.float32))
|
dtype=numpy.float32))
|
||||||
predictions = await da.concatenate(arrays, axis=0)
|
predictions = await da.concatenate(arrays, axis=0)
|
||||||
return predictions
|
return predictions
|
||||||
@@ -978,6 +981,7 @@ class DaskScikitLearnBase(XGBModel):
|
|||||||
def client(self, clt):
|
def client(self, clt):
|
||||||
self._client = clt
|
self._client = clt
|
||||||
|
|
||||||
|
|
||||||
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
|
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
|
||||||
['estimators', 'model'])
|
['estimators', 'model'])
|
||||||
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
|
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
|
||||||
@@ -1032,9 +1036,6 @@ class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
|
|||||||
['estimators', 'model']
|
['estimators', 'model']
|
||||||
)
|
)
|
||||||
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
|
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
|
||||||
# pylint: disable=missing-docstring
|
|
||||||
_client = None
|
|
||||||
|
|
||||||
async def _fit_async(self, X, y,
|
async def _fit_async(self, X, y,
|
||||||
sample_weights=None,
|
sample_weights=None,
|
||||||
eval_set=None,
|
eval_set=None,
|
||||||
@@ -1078,13 +1079,34 @@ class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
|
|||||||
return self.client.sync(self._fit_async, X, y, sample_weights,
|
return self.client.sync(self._fit_async, X, y, sample_weights,
|
||||||
eval_set, sample_weight_eval_set, verbose)
|
eval_set, sample_weight_eval_set, verbose)
|
||||||
|
|
||||||
async def _predict_async(self, data):
|
async def _predict_proba_async(self, data):
|
||||||
|
_assert_dask_support()
|
||||||
|
|
||||||
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
|
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
|
||||||
missing=self.missing)
|
missing=self.missing)
|
||||||
pred_probs = await predict(client=self.client,
|
pred_probs = await predict(client=self.client,
|
||||||
model=self.get_booster(), data=test_dmatrix)
|
model=self.get_booster(), data=test_dmatrix)
|
||||||
return pred_probs
|
return pred_probs
|
||||||
|
|
||||||
|
def predict_proba(self, data): # pylint: disable=arguments-differ,missing-docstring
|
||||||
|
_assert_dask_support()
|
||||||
|
return self.client.sync(self._predict_proba_async, data)
|
||||||
|
|
||||||
|
async def _predict_async(self, data):
|
||||||
|
_assert_dask_support()
|
||||||
|
|
||||||
|
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
|
||||||
|
missing=self.missing)
|
||||||
|
pred_probs = await predict(client=self.client,
|
||||||
|
model=self.get_booster(), data=test_dmatrix)
|
||||||
|
|
||||||
|
if self.n_classes_ == 2:
|
||||||
|
preds = (pred_probs > 0.5).astype(int)
|
||||||
|
else:
|
||||||
|
preds = da.argmax(pred_probs, axis=1)
|
||||||
|
|
||||||
|
return preds
|
||||||
|
|
||||||
def predict(self, data): # pylint: disable=arguments-differ
|
def predict(self, data): # pylint: disable=arguments-differ
|
||||||
_assert_dask_support()
|
_assert_dask_support()
|
||||||
return self.client.sync(self._predict_async, data)
|
return self.client.sync(self._predict_async, data)
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ __model_doc = '''
|
|||||||
gamma : float
|
gamma : float
|
||||||
Minimum loss reduction required to make a further partition on a leaf
|
Minimum loss reduction required to make a further partition on a leaf
|
||||||
node of the tree.
|
node of the tree.
|
||||||
min_child_weight : int
|
min_child_weight : float
|
||||||
Minimum sum of instance weight(hessian) needed in a child.
|
Minimum sum of instance weight(hessian) needed in a child.
|
||||||
max_delta_step : int
|
max_delta_step : int
|
||||||
Maximum delta step we allow each tree's weight estimation to be.
|
Maximum delta step we allow each tree's weight estimation to be.
|
||||||
@@ -750,7 +750,10 @@ class XGBModel(XGBModelBase):
|
|||||||
|
|
||||||
@xgboost_model_doc(
|
@xgboost_model_doc(
|
||||||
"Implementation of the scikit-learn API for XGBoost classification.",
|
"Implementation of the scikit-learn API for XGBoost classification.",
|
||||||
['model', 'objective'])
|
['model', 'objective'], extra_parameters='''
|
||||||
|
n_estimators : int
|
||||||
|
Number of boosting rounds.
|
||||||
|
''')
|
||||||
class XGBClassifier(XGBModel, XGBClassifierBase):
|
class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||||
# pylint: disable=missing-docstring,invalid-name,too-many-instance-attributes
|
# pylint: disable=missing-docstring,invalid-name,too-many-instance-attributes
|
||||||
def __init__(self, objective="binary:logistic", **kwargs):
|
def __init__(self, objective="binary:logistic", **kwargs):
|
||||||
@@ -1033,7 +1036,10 @@ class XGBRegressor(XGBModel, XGBRegressorBase):
|
|||||||
|
|
||||||
@xgboost_model_doc(
|
@xgboost_model_doc(
|
||||||
"scikit-learn API for XGBoost random forest regression.",
|
"scikit-learn API for XGBoost random forest regression.",
|
||||||
['model', 'objective'])
|
['model', 'objective'], extra_parameters='''
|
||||||
|
n_estimators : int
|
||||||
|
Number of trees in random forest to fit.
|
||||||
|
''')
|
||||||
class XGBRFRegressor(XGBRegressor):
|
class XGBRFRegressor(XGBRegressor):
|
||||||
# pylint: disable=missing-docstring
|
# pylint: disable=missing-docstring
|
||||||
def __init__(self, learning_rate=1, subsample=0.8, colsample_bynode=0.8,
|
def __init__(self, learning_rate=1, subsample=0.8, colsample_bynode=0.8,
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
/*!
|
/*!
|
||||||
* Copyright 2019 by Contributors
|
* Copyright 2019-2020 by Contributors
|
||||||
*/
|
*/
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "xgboost/json.h"
|
#include "xgboost/json.h"
|
||||||
#include "xgboost/logging.h"
|
#include "xgboost/logging.h"
|
||||||
#include "gbtree_model.h"
|
#include "gbtree_model.h"
|
||||||
@@ -41,15 +43,14 @@ void GBTreeModel::SaveModel(Json* p_out) const {
|
|||||||
auto& out = *p_out;
|
auto& out = *p_out;
|
||||||
CHECK_EQ(param.num_trees, static_cast<int>(trees.size()));
|
CHECK_EQ(param.num_trees, static_cast<int>(trees.size()));
|
||||||
out["gbtree_model_param"] = ToJson(param);
|
out["gbtree_model_param"] = ToJson(param);
|
||||||
std::vector<Json> trees_json;
|
std::vector<Json> trees_json(trees.size());
|
||||||
size_t t = 0;
|
|
||||||
for (auto const& tree : trees) {
|
for (size_t t = 0; t < trees.size(); ++t) {
|
||||||
|
auto const& tree = trees[t];
|
||||||
Json tree_json{Object()};
|
Json tree_json{Object()};
|
||||||
tree->SaveModel(&tree_json);
|
tree->SaveModel(&tree_json);
|
||||||
// The field is not used in XGBoost, but might be useful for external project.
|
tree_json["id"] = Integer(static_cast<Integer::Int>(t));
|
||||||
tree_json["id"] = Integer(t);
|
trees_json[t] = std::move(tree_json);
|
||||||
trees_json.emplace_back(tree_json);
|
|
||||||
t++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Json> tree_info_json(tree_info.size());
|
std::vector<Json> tree_info_json(tree_info.size());
|
||||||
@@ -70,9 +71,10 @@ void GBTreeModel::LoadModel(Json const& in) {
|
|||||||
auto const& trees_json = get<Array const>(in["trees"]);
|
auto const& trees_json = get<Array const>(in["trees"]);
|
||||||
trees.resize(trees_json.size());
|
trees.resize(trees_json.size());
|
||||||
|
|
||||||
for (size_t t = 0; t < trees.size(); ++t) {
|
for (size_t t = 0; t < trees_json.size(); ++t) { // NOLINT
|
||||||
trees[t].reset( new RegTree() );
|
auto tree_id = get<Integer>(trees_json[t]["id"]);
|
||||||
trees[t]->LoadModel(trees_json[t]);
|
trees.at(tree_id).reset(new RegTree());
|
||||||
|
trees.at(tree_id)->LoadModel(trees_json[t]);
|
||||||
}
|
}
|
||||||
|
|
||||||
tree_info.resize(param.num_trees);
|
tree_info.resize(param.num_trees);
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ ENV PATH=/opt/python/bin:$PATH
|
|||||||
|
|
||||||
# Create new Conda environment with cuDF, Dask, and cuPy
|
# Create new Conda environment with cuDF, Dask, and cuPy
|
||||||
RUN \
|
RUN \
|
||||||
conda create -n gpu_test -c rapidsai -c nvidia -c conda-forge -c defaults \
|
conda create -n gpu_test -c rapidsai-nightly -c rapidsai -c nvidia -c conda-forge -c defaults \
|
||||||
python=3.7 cudf=0.14 cudatoolkit=$CUDA_VERSION dask dask-cuda dask-cudf cupy \
|
python=3.7 cudf=0.15* cudatoolkit=$CUDA_VERSION dask dask-cuda dask-cudf cupy \
|
||||||
numpy pytest scipy scikit-learn pandas matplotlib wheel python-kubernetes urllib3 graphviz hypothesis
|
numpy pytest scipy scikit-learn pandas matplotlib wheel python-kubernetes urllib3 graphviz hypothesis
|
||||||
|
|
||||||
ENV GOSU_VERSION 1.10
|
ENV GOSU_VERSION 1.10
|
||||||
|
|||||||
@@ -3,22 +3,32 @@
|
|||||||
set -e
|
set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
if [ $# -ne 1 ]; then
|
if [ $# -ne 2 ]; then
|
||||||
echo "Usage: $0 [spark version]"
|
echo "Usage: $0 [spark version] [build_gpu? 0 or 1]"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
spark_version=$1
|
spark_version=$1
|
||||||
|
build_gpu=$2
|
||||||
|
|
||||||
# Initialize local Maven repository
|
# Initialize local Maven repository
|
||||||
./tests/ci_build/initialize_maven.sh
|
./tests/ci_build/initialize_maven.sh
|
||||||
|
|
||||||
rm -rf build/
|
|
||||||
cd jvm-packages
|
cd jvm-packages
|
||||||
|
rm -rf $(find . -name target)
|
||||||
|
rm -rf ../build/
|
||||||
|
|
||||||
# Re-build package without Mock Rabit
|
# Re-build package without Mock Rabit
|
||||||
# Deploy to S3 bucket xgboost-maven-repo
|
# Deploy to S3 bucket xgboost-maven-repo
|
||||||
mvn --no-transfer-progress package deploy -P release-to-s3 -Dspark.version=${spark_version} -DskipTests
|
if [[ "$build_gpu" == "0" ]]
|
||||||
|
then
|
||||||
|
# Build CPU artifact
|
||||||
|
mvn --no-transfer-progress package deploy -P release-to-s3 -Dspark.version=${spark_version} -DskipTests
|
||||||
|
else
|
||||||
|
# Build GPU artifact
|
||||||
|
sed -i -e 's/<artifactId>xgboost\(.*\)_\(.*\)<\/artifactId>/<artifactId>xgboost\1-gpu_\2<\/artifactId>/' $(find . -name pom.xml)
|
||||||
|
mvn --no-transfer-progress package deploy -Duse.cuda=ON -P release-to-s3 -Dspark.version=${spark_version} -DskipTests
|
||||||
|
fi
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
set +e
|
set +e
|
||||||
|
|||||||
@@ -148,7 +148,16 @@ TEST(Learner, JsonModelIO) {
|
|||||||
Json out { Object() };
|
Json out { Object() };
|
||||||
learner->SaveModel(&out);
|
learner->SaveModel(&out);
|
||||||
|
|
||||||
learner->LoadModel(out);
|
dmlc::TemporaryDirectory tmpdir;
|
||||||
|
|
||||||
|
std::ofstream fout (tmpdir.path + "/model.json");
|
||||||
|
fout << out;
|
||||||
|
fout.close();
|
||||||
|
|
||||||
|
auto loaded_str = common::LoadSequentialFile(tmpdir.path + "/model.json");
|
||||||
|
Json loaded = Json::Load(StringView{loaded_str.c_str(), loaded_str.size()});
|
||||||
|
|
||||||
|
learner->LoadModel(loaded);
|
||||||
learner->Configure();
|
learner->Configure();
|
||||||
|
|
||||||
Json new_in { Object() };
|
Json new_in { Object() };
|
||||||
|
|||||||
@@ -121,6 +121,8 @@ eval[test] = {data_path}
|
|||||||
v = xgboost.__version__
|
v = xgboost.__version__
|
||||||
if v.find('SNAPSHOT') != -1:
|
if v.find('SNAPSHOT') != -1:
|
||||||
assert msg.split(':')[1].strip() == v.split('-')[0]
|
assert msg.split(':')[1].strip() == v.split('-')[0]
|
||||||
|
elif v.find('rc') != -1:
|
||||||
|
assert msg.split(':')[1].strip() == v.split('rc')[0]
|
||||||
else:
|
else:
|
||||||
assert msg.split(':')[1].strip() == v
|
assert msg.split(':')[1].strip() == v
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import sys
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
|
from sklearn.datasets import make_classification
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
pytest.skip("Skipping dask tests on Windows", allow_module_level=True)
|
pytest.skip("Skipping dask tests on Windows", allow_module_level=True)
|
||||||
@@ -36,7 +37,7 @@ def generate_array():
|
|||||||
|
|
||||||
|
|
||||||
def test_from_dask_dataframe():
|
def test_from_dask_dataframe():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X, y = generate_array()
|
X, y = generate_array()
|
||||||
|
|
||||||
@@ -74,7 +75,7 @@ def test_from_dask_dataframe():
|
|||||||
|
|
||||||
|
|
||||||
def test_from_dask_array():
|
def test_from_dask_array():
|
||||||
with LocalCluster(n_workers=5, threads_per_worker=5) as cluster:
|
with LocalCluster(n_workers=kWorkers, threads_per_worker=5) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X, y = generate_array()
|
X, y = generate_array()
|
||||||
dtrain = DaskDMatrix(client, X, y)
|
dtrain = DaskDMatrix(client, X, y)
|
||||||
@@ -104,8 +105,28 @@ def test_from_dask_array():
|
|||||||
assert np.all(single_node_predt == from_arr.compute())
|
assert np.all(single_node_predt == from_arr.compute())
|
||||||
|
|
||||||
|
|
||||||
|
def test_dask_predict_shape_infer():
|
||||||
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
|
with Client(cluster) as client:
|
||||||
|
X, y = make_classification(n_samples=1000, n_informative=5,
|
||||||
|
n_classes=3)
|
||||||
|
X_ = dd.from_array(X, chunksize=100)
|
||||||
|
y_ = dd.from_array(y, chunksize=100)
|
||||||
|
dtrain = xgb.dask.DaskDMatrix(client, data=X_, label=y_)
|
||||||
|
|
||||||
|
model = xgb.dask.train(
|
||||||
|
client,
|
||||||
|
{"objective": "multi:softprob", "num_class": 3},
|
||||||
|
dtrain=dtrain
|
||||||
|
)
|
||||||
|
|
||||||
|
preds = xgb.dask.predict(client, model, dtrain)
|
||||||
|
assert preds.shape[0] == preds.compute().shape[0]
|
||||||
|
assert preds.shape[1] == preds.compute().shape[1]
|
||||||
|
|
||||||
|
|
||||||
def test_dask_missing_value_reg():
|
def test_dask_missing_value_reg():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X_0 = np.ones((20 // 2, kCols))
|
X_0 = np.ones((20 // 2, kCols))
|
||||||
X_1 = np.zeros((20 // 2, kCols))
|
X_1 = np.zeros((20 // 2, kCols))
|
||||||
@@ -144,19 +165,19 @@ def test_dask_missing_value_cls():
|
|||||||
missing=0.0)
|
missing=0.0)
|
||||||
cls.client = client
|
cls.client = client
|
||||||
cls.fit(X, y, eval_set=[(X, y)])
|
cls.fit(X, y, eval_set=[(X, y)])
|
||||||
dd_predt = cls.predict(X).compute()
|
dd_pred_proba = cls.predict_proba(X).compute()
|
||||||
|
|
||||||
np_X = X.compute()
|
np_X = X.compute()
|
||||||
np_predt = cls.get_booster().predict(
|
np_pred_proba = cls.get_booster().predict(
|
||||||
xgb.DMatrix(np_X, missing=0.0))
|
xgb.DMatrix(np_X, missing=0.0))
|
||||||
np.testing.assert_allclose(np_predt, dd_predt)
|
np.testing.assert_allclose(np_pred_proba, dd_pred_proba)
|
||||||
|
|
||||||
cls = xgb.dask.DaskXGBClassifier()
|
cls = xgb.dask.DaskXGBClassifier()
|
||||||
assert hasattr(cls, 'missing')
|
assert hasattr(cls, 'missing')
|
||||||
|
|
||||||
|
|
||||||
def test_dask_regressor():
|
def test_dask_regressor():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X, y = generate_array()
|
X, y = generate_array()
|
||||||
regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
|
regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
|
||||||
@@ -178,7 +199,7 @@ def test_dask_regressor():
|
|||||||
|
|
||||||
|
|
||||||
def test_dask_classifier():
|
def test_dask_classifier():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X, y = generate_array()
|
X, y = generate_array()
|
||||||
y = (y * 10).astype(np.int32)
|
y = (y * 10).astype(np.int32)
|
||||||
@@ -201,7 +222,18 @@ def test_dask_classifier():
|
|||||||
assert len(list(history['validation_0'])) == 1
|
assert len(list(history['validation_0'])) == 1
|
||||||
assert len(history['validation_0']['merror']) == 2
|
assert len(history['validation_0']['merror']) == 2
|
||||||
|
|
||||||
|
# Test .predict_proba()
|
||||||
|
probas = classifier.predict_proba(X)
|
||||||
assert classifier.n_classes_ == 10
|
assert classifier.n_classes_ == 10
|
||||||
|
assert probas.ndim == 2
|
||||||
|
assert probas.shape[0] == kRows
|
||||||
|
assert probas.shape[1] == 10
|
||||||
|
|
||||||
|
cls_booster = classifier.get_booster()
|
||||||
|
single_node_proba = cls_booster.inplace_predict(X.compute())
|
||||||
|
|
||||||
|
np.testing.assert_allclose(single_node_proba,
|
||||||
|
probas.compute())
|
||||||
|
|
||||||
# Test with dataframe.
|
# Test with dataframe.
|
||||||
X_d = dd.from_dask_array(X)
|
X_d = dd.from_dask_array(X)
|
||||||
@@ -218,7 +250,7 @@ def test_dask_classifier():
|
|||||||
@pytest.mark.skipif(**tm.no_sklearn())
|
@pytest.mark.skipif(**tm.no_sklearn())
|
||||||
def test_sklearn_grid_search():
|
def test_sklearn_grid_search():
|
||||||
from sklearn.model_selection import GridSearchCV
|
from sklearn.model_selection import GridSearchCV
|
||||||
with LocalCluster(n_workers=4) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
X, y = generate_array()
|
X, y = generate_array()
|
||||||
reg = xgb.dask.DaskXGBRegressor(learning_rate=0.1,
|
reg = xgb.dask.DaskXGBRegressor(learning_rate=0.1,
|
||||||
@@ -292,7 +324,9 @@ def run_empty_dmatrix_cls(client, parameters):
|
|||||||
evals=[(dtrain, 'validation')],
|
evals=[(dtrain, 'validation')],
|
||||||
num_boost_round=2)
|
num_boost_round=2)
|
||||||
predictions = xgb.dask.predict(client=client, model=out,
|
predictions = xgb.dask.predict(client=client, model=out,
|
||||||
data=dtrain).compute()
|
data=dtrain)
|
||||||
|
assert predictions.shape[1] == n_classes
|
||||||
|
predictions = predictions.compute()
|
||||||
_check_outputs(out, predictions)
|
_check_outputs(out, predictions)
|
||||||
|
|
||||||
# train has more rows than evals
|
# train has more rows than evals
|
||||||
@@ -315,7 +349,7 @@ def run_empty_dmatrix_cls(client, parameters):
|
|||||||
# environment and Exact doesn't support it.
|
# environment and Exact doesn't support it.
|
||||||
|
|
||||||
def test_empty_dmatrix_hist():
|
def test_empty_dmatrix_hist():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
parameters = {'tree_method': 'hist'}
|
parameters = {'tree_method': 'hist'}
|
||||||
run_empty_dmatrix_reg(client, parameters)
|
run_empty_dmatrix_reg(client, parameters)
|
||||||
@@ -323,7 +357,7 @@ def test_empty_dmatrix_hist():
|
|||||||
|
|
||||||
|
|
||||||
def test_empty_dmatrix_approx():
|
def test_empty_dmatrix_approx():
|
||||||
with LocalCluster(n_workers=5) as cluster:
|
with LocalCluster(n_workers=kWorkers) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
parameters = {'tree_method': 'approx'}
|
parameters = {'tree_method': 'approx'}
|
||||||
run_empty_dmatrix_reg(client, parameters)
|
run_empty_dmatrix_reg(client, parameters)
|
||||||
@@ -397,7 +431,13 @@ async def run_dask_classifier_asyncio(scheduler_address):
|
|||||||
assert len(list(history['validation_0'])) == 1
|
assert len(list(history['validation_0'])) == 1
|
||||||
assert len(history['validation_0']['merror']) == 2
|
assert len(history['validation_0']['merror']) == 2
|
||||||
|
|
||||||
|
# Test .predict_proba()
|
||||||
|
probas = await classifier.predict_proba(X)
|
||||||
assert classifier.n_classes_ == 10
|
assert classifier.n_classes_ == 10
|
||||||
|
assert probas.ndim == 2
|
||||||
|
assert probas.shape[0] == kRows
|
||||||
|
assert probas.shape[1] == 10
|
||||||
|
|
||||||
|
|
||||||
# Test with dataframe.
|
# Test with dataframe.
|
||||||
X_d = dd.from_dask_array(X)
|
X_d = dd.from_dask_array(X)
|
||||||
|
|||||||
Reference in New Issue
Block a user