diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..b63ffc91c --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,55 @@ +// -*- mode: groovy -*- +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// command to start a docker container +docker_run = 'tests/ci_build/ci_build.sh' + +// timeout in minutes +max_time = 60 + +// initialize source codes +def init_git() { + retry(5) { + try { + timeout(time: 2, unit: 'MINUTES') { + checkout scm + sh 'git submodule update --init' + } + } catch (exc) { + deleteDir() + error "Failed to fetch source codes" + } + } +} + +stage('Build') { + node('GPU' && 'linux') { + ws('workspace/xgboost/build-gpu-cmake') { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} gpu tests/ci_build/build_gpu_cmake.sh" + } + } + } + node('GPU' && 'linux') { + ws('workspace/xgboost/build-gpu-make') { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} gpu make PLUGIN_UPDATER_GPU=ON" + } + } + } +} + + +stage('Unit Test') { + node('GPU' && 'linux') { + ws('workspace/xgboost/unit-test') { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} gpu tests/ci_build/test_gpu.ssh" + } + } + } +} diff --git a/tests/ci_build/Dockerfile.gpu b/tests/ci_build/Dockerfile.gpu new file mode 100644 index 000000000..4ca3a7b82 --- /dev/null +++ b/tests/ci_build/Dockerfile.gpu @@ -0,0 +1,16 @@ +FROM nvidia/cuda:8.0-devel-ubuntu14.04 + +RUN apt-get update && apt-get -y upgrade +# CMAKE +RUN sudo apt-get install -y build-essential +RUN apt-get install -y wget +RUN wget http://www.cmake.org/files/v3.5/cmake-3.5.2.tar.gz +RUN tar -xvzf cmake-3.5.2.tar.gz +RUN cd cmake-3.5.2/ && ./configure && make && sudo make install + +# BLAS +RUN apt-get install -y libatlas-base-dev + +# PYTHON2 +RUN apt-get install -y python-setuptools python-pip python-dev unzip gfortran +RUN pip install numpy nose scipy scikit-learn diff --git a/tests/ci_build/build_gpu_cmake.sh b/tests/ci_build/build_gpu_cmake.sh new file mode 100644 index 000000000..d8d0b9726 --- /dev/null +++ b/tests/ci_build/build_gpu_cmake.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +mkdir build +cd build +cmake .. -DPLUGIN_UPDATER_GPU=ON +make diff --git a/tests/ci_build/ci_build.sh b/tests/ci_build/ci_build.sh new file mode 100644 index 000000000..46a9d8880 --- /dev/null +++ b/tests/ci_build/ci_build.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +# +# Execute command within a docker container +# +# Usage: ci_build.sh [--dockerfile ] [-it] +# +# +# CONTAINER_TYPE: Type of the docker container used the run the build: e.g., +# (cpu | gpu) +# +# DOCKERFILE_PATH: (Optional) Path to the Dockerfile used for docker build. If +# this optional value is not supplied (via the --dockerfile +# flag), will use Dockerfile.CONTAINER_TYPE in default +# +# COMMAND: Command to be executed in the docker container +# +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Get the command line arguments. +CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' ) +shift 1 + +# Dockerfile to be used in docker build +DOCKERFILE_PATH="${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" +DOCKER_CONTEXT_PATH="${SCRIPT_DIR}" + +if [[ "$1" == "--dockerfile" ]]; then + DOCKERFILE_PATH="$2" + DOCKER_CONTEXT_PATH=$(dirname "${DOCKERFILE_PATH}") + echo "Using custom Dockerfile path: ${DOCKERFILE_PATH}" + echo "Using custom docker build context path: ${DOCKER_CONTEXT_PATH}" + shift 2 +fi + +if [[ "$1" == "-it" ]]; then + CI_DOCKER_EXTRA_PARAMS+=('-it') + shift 1 +fi + +if [[ ! -f "${DOCKERFILE_PATH}" ]]; then + echo "Invalid Dockerfile path: \"${DOCKERFILE_PATH}\"" + exit 1 +fi + +COMMAND=("$@") + +# Validate command line arguments. +if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then + supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \ + sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' ) + echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND" + echo " CONTAINER_TYPE can be one of [${supported_container_types}]" + echo " COMMAND is a command (with arguments) to run inside" + echo " the container." + exit 1 +fi + +# Use nvidia-docker if the container is GPU. +if [[ "${CONTAINER_TYPE}" == *"gpu"* ]]; then + DOCKER_BINARY="nvidia-docker" +else + DOCKER_BINARY="docker" +fi + +# Helper function to traverse directories up until given file is found. +function upsearch () { + test / == "$PWD" && return || \ + test -e "$1" && echo "$PWD" && return || \ + cd .. && upsearch "$1" +} + +# Set up WORKSPACE. Jenkins will set them for you or we pick +# reasonable defaults if you run it outside of Jenkins. +WORKSPACE="${WORKSPACE:-${SCRIPT_DIR}/../../}" + +# Determine the docker image name +DOCKER_IMG_NAME="xgb-ci.${CONTAINER_TYPE}" + +# Under Jenkins matrix build, the build tag may contain characters such as +# commas (,) and equal signs (=), which are not valid inside docker image names. +DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g') + +# Convert to all lower-case, as per requirement of Docker image names +DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]') + +# skip with_the_same_user for non-linux +uname=`uname` +if [[ "$uname" == "Linux" ]]; then + PRE_COMMAND="tests/ci_build/with_the_same_user" +else + PRE_COMMAND="" +fi + +# Print arguments. +echo "WORKSPACE: ${WORKSPACE}" +echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[@]}" +echo "COMMAND: ${COMMAND[@]}" +echo "CONTAINER_TYPE: ${CONTAINER_TYPE}" +echo "BUILD_TAG: ${BUILD_TAG}" +echo "NODE_NAME: ${NODE_NAME}" +echo "DOCKER CONTAINER NAME: ${DOCKER_IMG_NAME}" +echo "PRE_COMMAND: ${PRE_COMMAND}" +echo "" + + +# Build the docker container. +echo "Building container (${DOCKER_IMG_NAME})..." +docker build -t ${DOCKER_IMG_NAME} \ + -f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}" + +# Check docker build status +if [[ $? != "0" ]]; then + echo "ERROR: docker build failed." + exit 1 +fi + + +# Run the command inside the container. +echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..." + +# By default we cleanup - remove the container once it finish running (--rm) +# and share the PID namespace (--pid=host) so the process inside does not have +# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it). +${DOCKER_BINARY} run --rm --pid=host \ + -v ${WORKSPACE}:/workspace \ + -w /workspace \ + -e "CI_BUILD_HOME=${WORKSPACE}" \ + -e "CI_BUILD_USER=$(id -u -n)" \ + -e "CI_BUILD_UID=$(id -u)" \ + -e "CI_BUILD_GROUP=$(id -g -n)" \ + -e "CI_BUILD_GID=$(id -g)" \ + ${CI_DOCKER_EXTRA_PARAMS[@]} \ + ${DOCKER_IMG_NAME} \ + ${PRE_COMMAND} \ + ${COMMAND[@]} diff --git a/tests/ci_build/test_gpu.sh b/tests/ci_build/test_gpu.sh new file mode 100644 index 000000000..52124505f --- /dev/null +++ b/tests/ci_build/test_gpu.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +mkdir build +cd build +cmake .. -DPLUGIN_UPDATER_GPU=ON +make +cd .. +cd python-package +python setup.py install --user +cd ../plugin/updater_gpu +python -m nose test/python diff --git a/tests/ci_build/with_the_same_user b/tests/ci_build/with_the_same_user new file mode 100644 index 000000000..922015602 --- /dev/null +++ b/tests/ci_build/with_the_same_user @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# This script is a wrapper creating the same user inside container as the one +# running the ci_build.sh outside the container. It also set the home directory +# for the user inside container to match the same absolute path as the workspace +# outside of container. Do not run this manually. It does not make sense. It is +# intended to be called by ci_build.sh only. + +set -e + +COMMAND=("$@") + +if ! touch /this_is_writable_file_system; then + echo "You can't write to your filesystem!" + echo "If you are in Docker you should check you do not have too many images" \ + "with too many files in them. Docker has some issue with it." + exit 1 +else + rm /this_is_writable_file_system +fi + +getent group "${CI_BUILD_GID}" || addgroup --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}" +getent passwd "${CI_BUILD_UID}" || adduser --gid "${CI_BUILD_GID}" --uid "${CI_BUILD_UID}" \ + --gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \ + --disabled-password --home "${CI_BUILD_HOME}" --quiet "${CI_BUILD_USER}" +usermod -a -G sudo "${CI_BUILD_USER}" +echo "${CI_BUILD_USER} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-nopasswd-sudo + +sudo -u "#${CI_BUILD_UID}" --preserve-env "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ +"HOME=${CI_BUILD_HOME}" ${COMMAND[@]}