From 4f26d98150866702704a4cc99529bafeccc19749 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 2 Jan 2016 02:52:13 -0800 Subject: [PATCH] [Update] remove rabit subtree, use submodule, move code --- {old_src/gbm => include/xgboost}/gbm.h | 0 old_src/gbm/gbm.cpp | 21 - old_src/tree/updater.cpp | 35 - .../gblinear-inl.hpp => src/gbm/gblinear.cc | 0 .../gbm/gbtree-inl.hpp => src/gbm/gbtree.cc | 0 subtree/README.md | 5 - subtree/rabit/.gitignore | 39 - subtree/rabit/.travis.yml | 51 - subtree/rabit/LICENSE | 28 - subtree/rabit/Makefile | 76 -- subtree/rabit/README.md | 39 - subtree/rabit/doc/.gitignore | 5 - subtree/rabit/doc/Doxyfile | 287 ---- subtree/rabit/doc/Makefile | 192 --- subtree/rabit/doc/conf.py | 184 --- subtree/rabit/doc/cpp_api.md | 9 - subtree/rabit/doc/guide.md | 413 ------ subtree/rabit/doc/index.md | 24 - subtree/rabit/doc/parameters.md | 21 - subtree/rabit/doc/python-requirements.txt | 4 - subtree/rabit/doc/python_api.md | 11 - subtree/rabit/doc/sphinx_util.py | 16 - subtree/rabit/guide/Makefile | 26 - subtree/rabit/guide/README | 1 - subtree/rabit/guide/basic.cc | 35 - subtree/rabit/guide/basic.py | 25 - subtree/rabit/guide/broadcast.cc | 16 - subtree/rabit/guide/broadcast.py | 22 - subtree/rabit/guide/lazy_allreduce.cc | 33 - subtree/rabit/guide/lazy_allreduce.py | 31 - subtree/rabit/include/README.md | 7 - subtree/rabit/include/dmlc/README.md | 4 - subtree/rabit/include/dmlc/io.h | 423 ------ subtree/rabit/include/rabit.h | 342 ----- subtree/rabit/include/rabit/engine.h | 260 ---- subtree/rabit/include/rabit/io.h | 106 -- subtree/rabit/include/rabit/rabit-inl.h | 328 ----- subtree/rabit/include/rabit/timer.h | 41 - subtree/rabit/include/rabit/utils.h | 191 --- subtree/rabit/include/rabit_serializable.h | 27 - subtree/rabit/lib/README.md | 15 - subtree/rabit/scripts/travis_runtest.sh | 8 - subtree/rabit/scripts/travis_script.sh | 22 - subtree/rabit/src/README.md | 6 - subtree/rabit/src/allreduce_base.cc | 892 ------------- subtree/rabit/src/allreduce_base.h | 527 -------- subtree/rabit/src/allreduce_mock.h | 178 --- subtree/rabit/src/allreduce_robust-inl.h | 169 --- subtree/rabit/src/allreduce_robust.cc | 1183 ----------------- subtree/rabit/src/allreduce_robust.h | 553 -------- subtree/rabit/src/engine.cc | 84 -- subtree/rabit/src/engine_base.cc | 15 - subtree/rabit/src/engine_empty.cc | 118 -- subtree/rabit/src/engine_mock.cc | 16 - subtree/rabit/src/engine_mpi.cc | 211 --- subtree/rabit/src/socket.h | 523 -------- subtree/rabit/test/.gitignore | 4 - subtree/rabit/test/Makefile | 41 - subtree/rabit/test/README.md | 18 - subtree/rabit/test/lazy_recover.cc | 126 -- subtree/rabit/test/local_recover.cc | 138 -- subtree/rabit/test/local_recover.py | 25 - subtree/rabit/test/model_recover.cc | 127 -- subtree/rabit/test/speed_runner.py | 34 - subtree/rabit/test/speed_test.cc | 100 -- subtree/rabit/test/test.mk | 29 - subtree/rabit/tracker/README.md | 12 - subtree/rabit/tracker/rabit_demo.py | 96 -- .../rabit/tracker/rabit_hadoop_streaming.py | 165 --- subtree/rabit/tracker/rabit_mpi.py | 43 - subtree/rabit/tracker/rabit_sge.py | 70 - subtree/rabit/tracker/rabit_tracker.py | 317 ----- subtree/rabit/tracker/rabit_yarn.py | 140 -- subtree/rabit/windows/.gitignore | 9 - subtree/rabit/windows/README.md | 12 - subtree/rabit/windows/basic/basic.vcxproj | 118 -- subtree/rabit/windows/rabit.sln | 50 - subtree/rabit/windows/rabit/rabit.vcxproj | 133 -- .../rabit_wrapper/rabit_wrapper.vcxproj | 121 -- subtree/rabit/wrapper/rabit.py | 327 ----- subtree/rabit/wrapper/rabit_wrapper.cc | 240 ---- subtree/rabit/wrapper/rabit_wrapper.h | 126 -- subtree/rabit/yarn/.gitignore | 4 - subtree/rabit/yarn/README.md | 5 - subtree/rabit/yarn/build.sh | 8 - subtree/rabit/yarn/run_hdfs_prog.py | 45 - .../hadoop/yarn/rabit/ApplicationMaster.java | 570 -------- .../org/apache/hadoop/yarn/rabit/Client.java | 269 ---- .../apache/hadoop/yarn/rabit/TaskRecord.java | 24 - 89 files changed, 11444 deletions(-) rename {old_src/gbm => include/xgboost}/gbm.h (100%) delete mode 100644 old_src/gbm/gbm.cpp delete mode 100644 old_src/tree/updater.cpp rename old_src/gbm/gblinear-inl.hpp => src/gbm/gblinear.cc (100%) rename old_src/gbm/gbtree-inl.hpp => src/gbm/gbtree.cc (100%) delete mode 100644 subtree/README.md delete mode 100644 subtree/rabit/.gitignore delete mode 100644 subtree/rabit/.travis.yml delete mode 100644 subtree/rabit/LICENSE delete mode 100644 subtree/rabit/Makefile delete mode 100644 subtree/rabit/README.md delete mode 100644 subtree/rabit/doc/.gitignore delete mode 100644 subtree/rabit/doc/Doxyfile delete mode 100644 subtree/rabit/doc/Makefile delete mode 100644 subtree/rabit/doc/conf.py delete mode 100644 subtree/rabit/doc/cpp_api.md delete mode 100644 subtree/rabit/doc/guide.md delete mode 100644 subtree/rabit/doc/index.md delete mode 100644 subtree/rabit/doc/parameters.md delete mode 100644 subtree/rabit/doc/python-requirements.txt delete mode 100644 subtree/rabit/doc/python_api.md delete mode 100644 subtree/rabit/doc/sphinx_util.py delete mode 100644 subtree/rabit/guide/Makefile delete mode 100644 subtree/rabit/guide/README delete mode 100644 subtree/rabit/guide/basic.cc delete mode 100755 subtree/rabit/guide/basic.py delete mode 100644 subtree/rabit/guide/broadcast.cc delete mode 100755 subtree/rabit/guide/broadcast.py delete mode 100644 subtree/rabit/guide/lazy_allreduce.cc delete mode 100755 subtree/rabit/guide/lazy_allreduce.py delete mode 100644 subtree/rabit/include/README.md delete mode 100644 subtree/rabit/include/dmlc/README.md delete mode 100644 subtree/rabit/include/dmlc/io.h delete mode 100644 subtree/rabit/include/rabit.h delete mode 100644 subtree/rabit/include/rabit/engine.h delete mode 100644 subtree/rabit/include/rabit/io.h delete mode 100644 subtree/rabit/include/rabit/rabit-inl.h delete mode 100644 subtree/rabit/include/rabit/timer.h delete mode 100644 subtree/rabit/include/rabit/utils.h delete mode 100644 subtree/rabit/include/rabit_serializable.h delete mode 100644 subtree/rabit/lib/README.md delete mode 100755 subtree/rabit/scripts/travis_runtest.sh delete mode 100755 subtree/rabit/scripts/travis_script.sh delete mode 100644 subtree/rabit/src/README.md delete mode 100644 subtree/rabit/src/allreduce_base.cc delete mode 100644 subtree/rabit/src/allreduce_base.h delete mode 100644 subtree/rabit/src/allreduce_mock.h delete mode 100644 subtree/rabit/src/allreduce_robust-inl.h delete mode 100644 subtree/rabit/src/allreduce_robust.cc delete mode 100644 subtree/rabit/src/allreduce_robust.h delete mode 100644 subtree/rabit/src/engine.cc delete mode 100644 subtree/rabit/src/engine_base.cc delete mode 100644 subtree/rabit/src/engine_empty.cc delete mode 100644 subtree/rabit/src/engine_mock.cc delete mode 100644 subtree/rabit/src/engine_mpi.cc delete mode 100644 subtree/rabit/src/socket.h delete mode 100644 subtree/rabit/test/.gitignore delete mode 100644 subtree/rabit/test/Makefile delete mode 100644 subtree/rabit/test/README.md delete mode 100644 subtree/rabit/test/lazy_recover.cc delete mode 100644 subtree/rabit/test/local_recover.cc delete mode 100755 subtree/rabit/test/local_recover.py delete mode 100644 subtree/rabit/test/model_recover.cc delete mode 100644 subtree/rabit/test/speed_runner.py delete mode 100644 subtree/rabit/test/speed_test.cc delete mode 100644 subtree/rabit/test/test.mk delete mode 100644 subtree/rabit/tracker/README.md delete mode 100755 subtree/rabit/tracker/rabit_demo.py delete mode 100755 subtree/rabit/tracker/rabit_hadoop_streaming.py delete mode 100755 subtree/rabit/tracker/rabit_mpi.py delete mode 100755 subtree/rabit/tracker/rabit_sge.py delete mode 100644 subtree/rabit/tracker/rabit_tracker.py delete mode 100755 subtree/rabit/tracker/rabit_yarn.py delete mode 100644 subtree/rabit/windows/.gitignore delete mode 100644 subtree/rabit/windows/README.md delete mode 100644 subtree/rabit/windows/basic/basic.vcxproj delete mode 100644 subtree/rabit/windows/rabit.sln delete mode 100644 subtree/rabit/windows/rabit/rabit.vcxproj delete mode 100644 subtree/rabit/windows/rabit_wrapper/rabit_wrapper.vcxproj delete mode 100644 subtree/rabit/wrapper/rabit.py delete mode 100644 subtree/rabit/wrapper/rabit_wrapper.cc delete mode 100644 subtree/rabit/wrapper/rabit_wrapper.h delete mode 100644 subtree/rabit/yarn/.gitignore delete mode 100644 subtree/rabit/yarn/README.md delete mode 100755 subtree/rabit/yarn/build.sh delete mode 100755 subtree/rabit/yarn/run_hdfs_prog.py delete mode 100644 subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/ApplicationMaster.java delete mode 100644 subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/Client.java delete mode 100644 subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/TaskRecord.java diff --git a/old_src/gbm/gbm.h b/include/xgboost/gbm.h similarity index 100% rename from old_src/gbm/gbm.h rename to include/xgboost/gbm.h diff --git a/old_src/gbm/gbm.cpp b/old_src/gbm/gbm.cpp deleted file mode 100644 index 13ad44c57..000000000 --- a/old_src/gbm/gbm.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright by Contributors -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -#include -#include "./gbm.h" -#include "./gbtree-inl.hpp" -#include "./gblinear-inl.hpp" - -namespace xgboost { -namespace gbm { -IGradBooster* CreateGradBooster(const char *name) { - using namespace std; - if (!strcmp("gbtree", name)) return new GBTree(); - if (!strcmp("gblinear", name)) return new GBLinear(); - utils::Error("unknown booster type: %s", name); - return NULL; -} -} // namespace gbm -} // namespace xgboost - diff --git a/old_src/tree/updater.cpp b/old_src/tree/updater.cpp deleted file mode 100644 index eb2e06925..000000000 --- a/old_src/tree/updater.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2014 by Contributors -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -#include -#include "./updater.h" -#include "./updater_prune-inl.hpp" -#include "./updater_refresh-inl.hpp" -#include "./updater_colmaker-inl.hpp" -#ifndef XGBOOST_STRICT_CXX98_ -#include "./updater_sync-inl.hpp" -#include "./updater_distcol-inl.hpp" -#include "./updater_histmaker-inl.hpp" -#include "./updater_skmaker-inl.hpp" -#endif - -namespace xgboost { -namespace tree { -IUpdater* CreateUpdater(const char *name) { - using namespace std; - if (!strcmp(name, "prune")) return new TreePruner(); - if (!strcmp(name, "refresh")) return new TreeRefresher(); - if (!strcmp(name, "grow_colmaker")) return new ColMaker(); -#ifndef XGBOOST_STRICT_CXX98_ - if (!strcmp(name, "sync")) return new TreeSyncher(); - if (!strcmp(name, "grow_histmaker")) return new CQHistMaker(); - if (!strcmp(name, "grow_skmaker")) return new SketchMaker(); - if (!strcmp(name, "distcol")) return new DistColMaker(); -#endif - utils::Error("unknown updater:%s", name); - return NULL; -} - -} // namespace tree -} // namespace xgboost diff --git a/old_src/gbm/gblinear-inl.hpp b/src/gbm/gblinear.cc similarity index 100% rename from old_src/gbm/gblinear-inl.hpp rename to src/gbm/gblinear.cc diff --git a/old_src/gbm/gbtree-inl.hpp b/src/gbm/gbtree.cc similarity index 100% rename from old_src/gbm/gbtree-inl.hpp rename to src/gbm/gbtree.cc diff --git a/subtree/README.md b/subtree/README.md deleted file mode 100644 index 9c3df6609..000000000 --- a/subtree/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This folder contains git subtree projects of xgboost. -Do not make changes to the subtree projects in xgboost, -push changes to the original project instead and changes will be pulled back to this folder - -* rabit: https://github.com/tqchen/rabit diff --git a/subtree/rabit/.gitignore b/subtree/rabit/.gitignore deleted file mode 100644 index 121caaafe..000000000 --- a/subtree/rabit/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch -*.lnk -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app -*~ -*.pyc -*.mpi -*.exe -*.txt -*tmp* -*.rabit -*.mock -dmlc-core -recommonmark -recom diff --git a/subtree/rabit/.travis.yml b/subtree/rabit/.travis.yml deleted file mode 100644 index 339f5c692..000000000 --- a/subtree/rabit/.travis.yml +++ /dev/null @@ -1,51 +0,0 @@ -# disable sudo to use container based build -sudo: false - -# Use Build Matrix to do lint and build seperately -env: - matrix: - - TASK=lint LINT_LANG=cpp - - TASK=lint LINT_LANG=python - - TASK=doc - - TASK=build CXX=g++ - - TASK=test CXX=g++ - -# dependent apt packages -addons: - apt: - packages: - - doxygen - - libopenmpi-dev - - wget - - git - - libcurl4-openssl-dev - - unzip - - python-numpy - -before_install: - - git clone https://github.com/dmlc/dmlc-core - - export TRAVIS=dmlc-core/scripts/travis/ - - source ${TRAVIS}/travis_setup_env.sh - -install: - - pip install cpplint pylint --user `whoami` - -script: scripts/travis_script.sh - - -before_cache: - - ${TRAVIS}/travis_before_cache.sh - - -cache: - directories: - - ${HOME}/.cache/usr - - -notifications: -# Emails are sent to the committer's git-configured email address by default, - email: - on_success: change - on_failure: always - - diff --git a/subtree/rabit/LICENSE b/subtree/rabit/LICENSE deleted file mode 100644 index 2485f4eaa..000000000 --- a/subtree/rabit/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2014 by Contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of rabit nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/subtree/rabit/Makefile b/subtree/rabit/Makefile deleted file mode 100644 index 8c9d9f403..000000000 --- a/subtree/rabit/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -ifndef CXX -export CXX = g++ -endif -export MPICXX = mpicxx -export LDFLAGS= -Llib -lrt -export WARNFLAGS= -Wall -Wextra -Wno-unused-parameter -Wno-unknown-pragmas -std=c++0x -export CFLAGS = -O3 -msse2 $(WARNFLAGS) - -ifndef WITH_FPIC - WITH_FPIC = 1 -endif -ifeq ($(WITH_FPIC), 1) - CFLAGS += -fPIC -endif - -ifndef LINT_LANG - LINT_LANG="all" -endif - -# build path -BPATH=. -# objectives that makes up rabit library -MPIOBJ= $(BPATH)/engine_mpi.o -OBJ= $(BPATH)/allreduce_base.o $(BPATH)/allreduce_robust.o $(BPATH)/engine.o $(BPATH)/engine_empty.o $(BPATH)/engine_mock.o\ - $(BPATH)/rabit_wrapper.o $(BPATH)/engine_base.o -SLIB= wrapper/librabit_wrapper.so wrapper/librabit_wrapper_mock.so wrapper/librabit_wrapper_mpi.so -ALIB= lib/librabit.a lib/librabit_mpi.a lib/librabit_empty.a lib/librabit_mock.a lib/librabit_base.a -HEADERS=src/*.h include/*.h include/rabit/*.h -DMLC=dmlc-core - -.PHONY: clean all install mpi python lint doc doxygen - -all: lib/librabit.a lib/librabit_mock.a wrapper/librabit_wrapper.so wrapper/librabit_wrapper_mock.so lib/librabit_base.a -mpi: lib/librabit_mpi.a wrapper/librabit_wrapper_mpi.so -python: wrapper/librabit_wrapper.so wrapper/librabit_wrapper_mock.so - -$(BPATH)/allreduce_base.o: src/allreduce_base.cc $(HEADERS) -$(BPATH)/engine.o: src/engine.cc $(HEADERS) -$(BPATH)/allreduce_robust.o: src/allreduce_robust.cc $(HEADERS) -$(BPATH)/engine_mpi.o: src/engine_mpi.cc $(HEADERS) -$(BPATH)/engine_empty.o: src/engine_empty.cc $(HEADERS) -$(BPATH)/engine_mock.o: src/engine_mock.cc $(HEADERS) -$(BPATH)/engine_base.o: src/engine_base.cc $(HEADERS) - -lib/librabit.a: $(BPATH)/allreduce_base.o $(BPATH)/allreduce_robust.o $(BPATH)/engine.o -lib/librabit_base.a: $(BPATH)/allreduce_base.o $(BPATH)/engine_base.o -lib/librabit_mock.a: $(BPATH)/allreduce_base.o $(BPATH)/allreduce_robust.o $(BPATH)/engine_mock.o -lib/librabit_empty.a: $(BPATH)/engine_empty.o -lib/librabit_mpi.a: $(MPIOBJ) -# wrapper code -$(BPATH)/rabit_wrapper.o: wrapper/rabit_wrapper.cc -wrapper/librabit_wrapper.so: $(BPATH)/rabit_wrapper.o lib/librabit.a -wrapper/librabit_wrapper_mock.so: $(BPATH)/rabit_wrapper.o lib/librabit_mock.a -wrapper/librabit_wrapper_mpi.so: $(BPATH)/rabit_wrapper.o lib/librabit_mpi.a - -$(OBJ) : - $(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c %.cc, $^) ) - -$(MPIOBJ) : - $(MPICXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c %.cc, $^) ) - -$(ALIB): - ar cr $@ $+ - -$(SLIB) : - $(CXX) $(CFLAGS) -shared -o $@ $(filter %.cpp %.o %.c %.cc %.a, $^) $(LDFLAGS) - -lint: - $(DMLC)/scripts/lint.py rabit $(LINT_LANG) src include wrapper - -doc doxygen: - cd include; doxygen ../doc/Doxyfile; cd - - -clean: - $(RM) $(OBJ) $(MPIOBJ) $(ALIB) $(MPIALIB) $(SLIB) *~ src/*~ include/*~ include/*/*~ wrapper/*~ - diff --git a/subtree/rabit/README.md b/subtree/rabit/README.md deleted file mode 100644 index 9302a2199..000000000 --- a/subtree/rabit/README.md +++ /dev/null @@ -1,39 +0,0 @@ -## rabit: Reliable Allreduce and Broadcast Interface -[![Build Status](https://travis-ci.org/dmlc/rabit.svg?branch=master)](https://travis-ci.org/dmlc/rabit) -[![Documentation Status](https://readthedocs.org/projects/rabit/badge/?version=latest)](http://rabit.readthedocs.org/) - -rabit is a light weight library that provides a fault tolerant interface of Allreduce and Broadcast. It is designed to support easy implementations of distributed machine learning programs, many of which fall naturally under the Allreduce abstraction. The goal of rabit is to support ***portable*** , ***scalable*** and ***reliable*** distributed machine learning programs. - -* [Tutorial](guide) -* [API Documentation](http://homes.cs.washington.edu/~tqchen/rabit/doc) -* You can also directly read the [interface header](include/rabit.h) -* [Distributed Machine Learning Tools](https://github.com/dmlc/wormhole) - - Rabit is one of the backbone library to support wormhole machine learning tools - -Features -==== -All these features comes from the facts about small rabbit:) -* Portable: rabit is light weight and runs everywhere - - Rabit is a library instead of a framework, a program only needs to link the library to run - - Rabit only replies on a mechanism to start program, which was provided by most framework - - You can run rabit programs on many platforms, including Yarn(Hadoop), MPI using the same code -* Scalable and Flexible: rabit runs fast - * Rabit program use Allreduce to communicate, and do not suffer the cost between iterations of MapReduce abstraction. - - Programs can call rabit functions in any order, as opposed to frameworks where callbacks are offered and called by the framework, i.e. inversion of control principle. - - Programs persist over all the iterations, unless they fail and recover. -* Reliable: rabit dig burrows to avoid disasters - - Rabit programs can recover the model and results using synchronous function calls. - -Use Rabit -==== -* Type make in the root folder will compile the rabit library in lib folder -* Add lib to the library path and include to the include path of compiler -* Languages: You can use rabit in C++ and python - - It is also possible to port the library to other languages - -Contributing -==== -Rabit is an open-source library, contributions are welcomed, including: -* The rabit core library. -* Customized tracker script for new platforms and interface of new languages. -* Tutorial and examples about the library. diff --git a/subtree/rabit/doc/.gitignore b/subtree/rabit/doc/.gitignore deleted file mode 100644 index 95f88be43..000000000 --- a/subtree/rabit/doc/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -html -latex -*.sh -_* -doxygen diff --git a/subtree/rabit/doc/Doxyfile b/subtree/rabit/doc/Doxyfile deleted file mode 100644 index 2c9c64ea7..000000000 --- a/subtree/rabit/doc/Doxyfile +++ /dev/null @@ -1,287 +0,0 @@ -# Doxyfile 1.7.6.1 - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- -DOXYFILE_ENCODING = UTF-8 -PROJECT_NAME = "rabit" -PROJECT_NUMBER = -PROJECT_BRIEF = -PROJECT_LOGO = -OUTPUT_DIRECTORY = ../doc/doxygen -CREATE_SUBDIRS = NO -OUTPUT_LANGUAGE = English -BRIEF_MEMBER_DESC = YES -REPEAT_BRIEF = YES -ABBREVIATE_BRIEF = -ALWAYS_DETAILED_SEC = NO -INLINE_INHERITED_MEMB = NO -FULL_PATH_NAMES = YES -STRIP_FROM_PATH = -STRIP_FROM_INC_PATH = -SHORT_NAMES = NO -JAVADOC_AUTOBRIEF = NO -QT_AUTOBRIEF = NO -MULTILINE_CPP_IS_BRIEF = NO -INHERIT_DOCS = YES -SEPARATE_MEMBER_PAGES = NO -TAB_SIZE = 8 -ALIASES = -TCL_SUBST = -OPTIMIZE_OUTPUT_FOR_C = YES -OPTIMIZE_OUTPUT_JAVA = NO -OPTIMIZE_FOR_FORTRAN = NO -OPTIMIZE_OUTPUT_VHDL = NO -EXTENSION_MAPPING = -BUILTIN_STL_SUPPORT = NO -CPP_CLI_SUPPORT = NO -SIP_SUPPORT = NO -IDL_PROPERTY_SUPPORT = YES -DISTRIBUTE_GROUP_DOC = NO -SUBGROUPING = YES -INLINE_GROUPED_CLASSES = NO -INLINE_SIMPLE_STRUCTS = NO -TYPEDEF_HIDES_STRUCT = NO -SYMBOL_CACHE_SIZE = 0 -LOOKUP_CACHE_SIZE = 0 -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- -EXTRACT_ALL = NO -EXTRACT_PRIVATE = NO -EXTRACT_STATIC = NO -EXTRACT_LOCAL_CLASSES = YES -EXTRACT_LOCAL_METHODS = NO -EXTRACT_ANON_NSPACES = NO -HIDE_UNDOC_MEMBERS = NO -HIDE_UNDOC_CLASSES = YES -HIDE_FRIEND_COMPOUNDS = NO -HIDE_IN_BODY_DOCS = NO -INTERNAL_DOCS = NO -CASE_SENSE_NAMES = YES -HIDE_SCOPE_NAMES = NO -SHOW_INCLUDE_FILES = YES -FORCE_LOCAL_INCLUDES = NO -INLINE_INFO = YES -SORT_MEMBER_DOCS = YES -SORT_BRIEF_DOCS = NO -SORT_MEMBERS_CTORS_1ST = NO -SORT_GROUP_NAMES = NO -SORT_BY_SCOPE_NAME = NO -STRICT_PROTO_MATCHING = NO -GENERATE_TODOLIST = YES -GENERATE_TESTLIST = YES -GENERATE_BUGLIST = YES -GENERATE_DEPRECATEDLIST= YES -ENABLED_SECTIONS = -MAX_INITIALIZER_LINES = 30 -SHOW_USED_FILES = YES -SHOW_DIRECTORIES = NO -SHOW_FILES = YES -SHOW_NAMESPACES = YES -FILE_VERSION_FILTER = -LAYOUT_FILE = -CITE_BIB_FILES = -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- -QUIET = NO -WARNINGS = YES -WARN_IF_UNDOCUMENTED = YES -WARN_IF_DOC_ERROR = YES -WARN_NO_PARAMDOC = YES -WARN_FORMAT = "$file:$line: $text" -WARN_LOGFILE = -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- -INPUT = . dmlc -INPUT_ENCODING = UTF-8 -FILE_PATTERNS = -RECURSIVE = NO -EXCLUDE = -EXCLUDE_SYMLINKS = NO -EXCLUDE_PATTERNS = *-inl.hpp -EXCLUDE_SYMBOLS = -EXAMPLE_PATH = -EXAMPLE_PATTERNS = -EXAMPLE_RECURSIVE = NO -IMAGE_PATH = -INPUT_FILTER = -FILTER_PATTERNS = -FILTER_SOURCE_FILES = NO -FILTER_SOURCE_PATTERNS = -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- -SOURCE_BROWSER = NO -INLINE_SOURCES = NO -STRIP_CODE_COMMENTS = YES -REFERENCED_BY_RELATION = NO -REFERENCES_RELATION = NO -REFERENCES_LINK_SOURCE = YES -USE_HTAGS = NO -VERBATIM_HEADERS = YES -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- -ALPHABETICAL_INDEX = YES -COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- -GENERATE_HTML = YES -HTML_OUTPUT = html -HTML_FILE_EXTENSION = .html -HTML_HEADER = -HTML_FOOTER = -HTML_STYLESHEET = -HTML_EXTRA_FILES = -HTML_COLORSTYLE_HUE = 220 -HTML_COLORSTYLE_SAT = 100 -HTML_COLORSTYLE_GAMMA = 80 -HTML_TIMESTAMP = YES -HTML_ALIGN_MEMBERS = YES -HTML_DYNAMIC_SECTIONS = NO -GENERATE_DOCSET = NO -DOCSET_FEEDNAME = "Doxygen generated docs" -DOCSET_BUNDLE_ID = org.doxygen.Project -DOCSET_PUBLISHER_ID = org.doxygen.Publisher -DOCSET_PUBLISHER_NAME = Publisher -GENERATE_HTMLHELP = NO -CHM_FILE = -HHC_LOCATION = -GENERATE_CHI = NO -CHM_INDEX_ENCODING = -BINARY_TOC = NO -TOC_EXPAND = NO -GENERATE_QHP = NO -QCH_FILE = -QHP_NAMESPACE = org.doxygen.Project -QHP_VIRTUAL_FOLDER = doc -QHP_CUST_FILTER_NAME = -QHP_CUST_FILTER_ATTRS = -QHP_SECT_FILTER_ATTRS = -QHG_LOCATION = -GENERATE_ECLIPSEHELP = NO -ECLIPSE_DOC_ID = org.doxygen.Project -DISABLE_INDEX = NO -GENERATE_TREEVIEW = NO -ENUM_VALUES_PER_LINE = 4 -USE_INLINE_TREES = NO -TREEVIEW_WIDTH = 250 -EXT_LINKS_IN_WINDOW = NO -FORMULA_FONTSIZE = 10 -FORMULA_TRANSPARENT = YES -USE_MATHJAX = NO -MATHJAX_RELPATH = http://www.mathjax.org/mathjax -MATHJAX_EXTENSIONS = -SEARCHENGINE = YES -SERVER_BASED_SEARCH = NO -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- -GENERATE_LATEX = YES -LATEX_OUTPUT = latex -LATEX_CMD_NAME = latex -MAKEINDEX_CMD_NAME = makeindex -COMPACT_LATEX = NO -PAPER_TYPE = a4 -EXTRA_PACKAGES = -LATEX_HEADER = -LATEX_FOOTER = -PDF_HYPERLINKS = YES -USE_PDFLATEX = YES -LATEX_BATCHMODE = NO -LATEX_HIDE_INDICES = NO -LATEX_SOURCE_CODE = NO -LATEX_BIB_STYLE = plain -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- -GENERATE_RTF = NO -RTF_OUTPUT = rtf -COMPACT_RTF = NO -RTF_HYPERLINKS = NO -RTF_STYLESHEET_FILE = -RTF_EXTENSIONS_FILE = -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- -GENERATE_MAN = NO -MAN_OUTPUT = man -MAN_EXTENSION = .3 -MAN_LINKS = NO -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- -GENERATE_XML = YES -XML_OUTPUT = xml -XML_SCHEMA = -XML_DTD = -XML_PROGRAMLISTING = YES -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- -GENERATE_AUTOGEN_DEF = NO -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- -GENERATE_PERLMOD = NO -PERLMOD_LATEX = NO -PERLMOD_PRETTY = YES -PERLMOD_MAKEVAR_PREFIX = -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- -ENABLE_PREPROCESSING = NO -MACRO_EXPANSION = NO -EXPAND_ONLY_PREDEF = NO -SEARCH_INCLUDES = YES -INCLUDE_PATH = -INCLUDE_FILE_PATTERNS = -PREDEFINED = -EXPAND_AS_DEFINED = -SKIP_FUNCTION_MACROS = YES -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- -TAGFILES = -GENERATE_TAGFILE = -ALLEXTERNALS = NO -EXTERNAL_GROUPS = YES -PERL_PATH = /usr/bin/perl -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- -CLASS_DIAGRAMS = YES -MSCGEN_PATH = -HIDE_UNDOC_RELATIONS = YES -HAVE_DOT = NO -DOT_NUM_THREADS = 0 -DOT_FONTNAME = Helvetica -DOT_FONTSIZE = 10 -DOT_FONTPATH = -CLASS_GRAPH = YES -COLLABORATION_GRAPH = YES -GROUP_GRAPHS = YES -UML_LOOK = NO -TEMPLATE_RELATIONS = NO -INCLUDE_GRAPH = YES -INCLUDED_BY_GRAPH = YES -CALL_GRAPH = NO -CALLER_GRAPH = NO -GRAPHICAL_HIERARCHY = YES -DIRECTORY_GRAPH = YES -DOT_IMAGE_FORMAT = png -INTERACTIVE_SVG = NO -DOT_PATH = -DOTFILE_DIRS = -MSCFILE_DIRS = -DOT_GRAPH_MAX_NODES = 50 -MAX_DOT_GRAPH_DEPTH = 0 -DOT_TRANSPARENT = NO -DOT_MULTI_TARGETS = YES -GENERATE_LEGEND = YES -DOT_CLEANUP = YES diff --git a/subtree/rabit/doc/Makefile b/subtree/rabit/doc/Makefile deleted file mode 100644 index 40bba2a28..000000000 --- a/subtree/rabit/doc/Makefile +++ /dev/null @@ -1,192 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rabit.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rabit.qhc" - -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/rabit" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rabit" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/subtree/rabit/doc/conf.py b/subtree/rabit/doc/conf.py deleted file mode 100644 index ef89de489..000000000 --- a/subtree/rabit/doc/conf.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- coding: utf-8 -*- -# -# documentation build configuration file, created by -# sphinx-quickstart on Thu Jul 23 19:40:08 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. -import sys -import os, subprocess -import shlex -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -libpath = os.path.join(curr_path, '../wrapper/') -sys.path.insert(0, os.path.join(curr_path, '../wrapper/')) -sys.path.insert(0, curr_path) -from sphinx_util import MarkdownParser, AutoStructify - -# -- General configuration ------------------------------------------------ - -# General information about the project. -project = u'rabit' -copyright = u'2015, rabit developers' -author = u'rabit developers' -github_doc_root = 'https://github.com/dmlc/rabit/tree/master/doc/' - -# add markdown parser -MarkdownParser.github_doc_root = github_doc_root -source_parsers = { - '.md': MarkdownParser, -} -# Version information. -import rabit - -version = rabit.__version__ -release = rabit.__version__ - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.mathjax', - 'breathe', -] - -# Use breathe to include doxygen documents -breathe_projects = {'rabit' : 'doxygen/xml/'} -breathe_default_project = 'rabit' - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = ['.rst', '.md'] - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme = 'alabaster' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = project + 'doc' - -# -- Options for LaTeX output --------------------------------------------- -latex_elements = { -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'rabit.tex', project, - author, 'manual'), -] - -# hook for doxygen -def run_doxygen(folder): - """Run the doxygen make command in the designated folder.""" - try: - retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True) - if retcode < 0: - sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) - except OSError as e: - sys.stderr.write("doxygen execution failed: %s" % e) - - -def run_build_lib(folder): - """Run the doxygen make command in the designated folder.""" - try: - retcode = subprocess.call("cd %s; make" % folder, shell=True) - retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True) - retcode = subprocess.call("mkdir _build", shell=True) - retcode = subprocess.call("mkdir _build/html", shell=True) - retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True) - if retcode < 0: - sys.stderr.write("build terminated by signal %s" % (-retcode)) - except OSError as e: - sys.stderr.write("build execution failed: %s" % e) - - -def generate_doxygen_xml(app): - """Run the doxygen make commands if we're on the ReadTheDocs server""" - read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True' - if read_the_docs_build: - run_doxygen('..') - sys.stderr.write('Check if shared lib exists\n') - run_build_lib('..') - sys.stderr.write('The wrapper path: %s\n' % str(os.listdir('../wrapper'))) - rabit._loadlib() - - -def setup(app): - # Add hook for building doxygen xml when needed - app.connect("builder-inited", generate_doxygen_xml) - app.add_config_value('recommonmark_config', { - 'url_resolver': lambda url: github_doc_root + url, - }, True) - app.add_transform(AutoStructify) diff --git a/subtree/rabit/doc/cpp_api.md b/subtree/rabit/doc/cpp_api.md deleted file mode 100644 index c6184aa08..000000000 --- a/subtree/rabit/doc/cpp_api.md +++ /dev/null @@ -1,9 +0,0 @@ -C++ Library API of Rabit -======================== -This page contains document of Library API of rabit. - -```eval_rst -.. toctree:: - -.. doxygennamespace:: rabit -``` diff --git a/subtree/rabit/doc/guide.md b/subtree/rabit/doc/guide.md deleted file mode 100644 index e2bfa5ce8..000000000 --- a/subtree/rabit/doc/guide.md +++ /dev/null @@ -1,413 +0,0 @@ -Tutorial -======== -This is rabit's tutorial, a ***Reliable Allreduce and Broadcast Interface***. -All the example codes are in the [guide](https://github.com/dmlc/rabit/blob/master/guide/) folder of the project. -To run the examples locally, you will need to build them with ```make```. - -**List of Topics** -* [What is Allreduce](#what-is-allreduce) -* [Common Use Case](#common-use-case) -* [Use Rabit API](#use-rabit-api) - - [Structure of a Rabit Program](#structure-of-a-rabit-program) - - [Allreduce and Lazy Preparation](#allreduce-and-lazy-preparation) - - [Checkpoint and LazyCheckpoint](#checkpoint-and-lazycheckpoint) -* [Compile Programs with Rabit](#compile-programs-with-rabit) -* [Running Rabit Jobs](#running-rabit-jobs) - - [Running Rabit on Hadoop](#running-rabit-on-hadoop) - - [Running Rabit using MPI](#running-rabit-using-mpi) - - [Customize Tracker Script](#customize-tracker-script) -* [Fault Tolerance](#fault-tolerance) - -What is Allreduce ------------------ -The main methods provided by rabit are Allreduce and Broadcast. Allreduce performs reduction across different computation nodes, -and returns the result to every node. To understand the behavior of the function, consider the following example in [basic.cc](../guide/basic.cc) (there is a python example right after this if you are more familiar with python). -```c++ -#include -using namespace rabit; -const int N = 3; -int main(int argc, char *argv[]) { - int a[N]; - rabit::Init(argc, argv); - for (int i = 0; i < N; ++i) { - a[i] = rabit::GetRank() + i; - } - printf("@node[%d] before-allreduce: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // allreduce take max of each elements in all processes - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // second allreduce that sums everything up - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - rabit::Finalize(); - return 0; -} -``` -You can run the example using the rabit_demo.py script. The following command -starts the rabit program with two worker processes. -```bash -../tracker/rabit_demo.py -n 2 basic.rabit -``` -This will start two processes, one process with rank 0 and the other with rank 1, both processes run the same code. -The ```rabit::GetRank()``` function returns the rank of current process. - -Before the call to Allreduce, process 0 contains the array ```a = {0, 1, 2}```, while process 1 has the array -```a = {1, 2, 3}```. After the call to Allreduce, the array contents in all processes are replaced by the -reduction result (in this case, the maximum value in each position across all the processes). So, after the -Allreduce call, the result will become ```a = {1, 2, 3}```. -Rabit provides different reduction operators, for example, if you change ```op::Max``` to ```op::Sum```, -the reduction operation will be a summation, and the result will become ```a = {1, 3, 5}```. -You can also run the example with different processes by setting -n to different values. - -If you are more familiar with python, you can also use rabit in python. The same example as before can be found in [basic.py](../guide/basic.py): - -```python -import numpy as np -import rabit - -rabit.init() -n = 3 -rank = rabit.get_rank() -a = np.zeros(n) -for i in xrange(n): - a[i] = rank + i - -print '@node[%d] before-allreduce: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.MAX) -print '@node[%d] after-allreduce-max: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.SUM) -print '@node[%d] after-allreduce-sum: a=%s' % (rank, str(a)) -rabit.finalize() -``` -You can run the program using the following command -```bash -../tracker/rabit_demo.py -n 2 basic.py -``` - -Broadcast is another method provided by rabit besides Allreduce. This function allows one node to broadcast its -local data to all other nodes. The following code in [broadcast.cc](../guide/broadcast.cc) broadcasts a string from -node 0 to all other nodes. -```c++ -#include -using namespace rabit; -const int N = 3; -int main(int argc, char *argv[]) { - rabit::Init(argc, argv); - std::string s; - if (rabit::GetRank() == 0) s = "hello world"; - printf("@node[%d] before-broadcast: s=\"%s\"\n", - rabit::GetRank(), s.c_str()); - // broadcast s from node 0 to all other nodes - rabit::Broadcast(&s, 0); - printf("@node[%d] after-broadcast: s=\"%s\"\n", - rabit::GetRank(), s.c_str()); - rabit::Finalize(); - return 0; -} -``` -The following command starts the program with three worker processes. -```bash -../tracker/rabit_demo.py -n 3 broadcast.rabit -``` -Besides strings, rabit also allows to broadcast constant size array and vectors. - -The counterpart in python can be found in [broadcast.py](../guide/broadcast.py). Here is a snippet so that you can get a better sense of how simple is to use the python library: - -```python -import rabit -rabit.init() -n = 3 -rank = rabit.get_rank() -s = None -if rank == 0: - s = {'hello world':100, 2:3} -print '@node[%d] before-broadcast: s=\"%s\"' % (rank, str(s)) -s = rabit.broadcast(s, 0) -print '@node[%d] after-broadcast: s=\"%s\"' % (rank, str(s)) -rabit.finalize() -``` - -Common Use Case ---------------- -Many distributed machine learning algorithms involve splitting the data into different nodes, -computing statistics locally, and finally aggregating them. Such workflow is usually done repetitively through many iterations before the algorithm converges. Allreduce naturally meets the structure of such programs, -common use cases include: - -* Aggregation of gradient values, which can be used in optimization methods such as L-BFGS. -* Aggregation of other statistics, which can be used in KMeans and Gaussian Mixture Models. -* Find the best split candidate and aggregation of split statistics, used for tree based models. - -Rabit is a reliable and portable library for distributed machine learning programs, that allow programs to run reliably on different platforms. - -Use Rabit API -------------- -This section introduces topics about how to use rabit API. -You can always refer to [API Documentation](http://homes.cs.washington.edu/~tqchen/rabit/doc) for definition of each functions. -This section trys to gives examples of different aspectes of rabit API. - -#### Structure of a Rabit Program -The following code illustrates the common structure of a rabit program. This is an abstract example, -you can also refer to [wormhole](https://github.com/dmlc/wormhole/blob/master/learn/kmeans/kmeans.cc) for an example implementation of kmeans algorithm. - -```c++ -#include -int main(int argc, char *argv[]) { - ... - rabit::Init(argc, argv); - // load the latest checked model - int version = rabit::LoadCheckPoint(&model); - // initialize the model if it is the first version - if (version == 0) model.InitModel(); - // the version number marks the iteration to resume - for (int iter = version; iter < max_iter; ++iter) { - // at this point, the model object should allow us to recover the program state - ... - // each iteration can contain multiple calls of allreduce/broadcast - rabit::Allreduce(&data[0], n); - ... - // checkpoint model after one iteration finishes - rabit::CheckPoint(&model); - } - rabit::Finalize(); - return 0; -} -``` - -Besides the common Allreduce and Broadcast functions, there are two additional functions: ```LoadCheckPoint``` -and ```CheckPoint```. These two functions are used for fault-tolerance purposes. -As mentioned before, traditional machine learning programs involve several iterations. In each iteration, we start with a model, make some calls -to Allreduce or Broadcast and update the model. The calling sequence in each iteration does not need to be the same. - -* When the nodes start from the beginning (i.e. iteration 0), ```LoadCheckPoint``` returns 0, so we can initialize the model. -* ```CheckPoint``` saves the model after each iteration. - - Efficiency Note: the model is only kept in local memory and no save to disk is performed when calling Checkpoint -* When a node goes down and restarts, ```LoadCheckPoint``` will recover the latest saved model, and -* When a node goes down, the rest of the nodes will block in the call of Allreduce/Broadcast and wait for - the recovery of the failed node until it catches up. - -Please see the [Fault Tolerance](#fault-tolerance) section to understand the recovery procedure executed by rabit. - -#### Allreduce and Lazy Preparation -Allreduce is one of the most important function provided by rabit. You can call allreduce by specifying the -reduction operator, pointer to the data and size of the buffer, as follows -```c++ -Allreduce(pointer_of_data, size_of_data); -``` -This is the basic use case of Allreduce function. It is common that user writes the code to prepare the data needed -into the data buffer, pass the data to Allreduce function, and get the reduced result. However, when a node restarts -from failure, we can directly recover the result from other nodes(see also [Fault Tolerance](#fault-tolerance)) and -the data preparation procedure no longer necessary. Rabit Allreduce add an optional parameter preparation function -to support such scenario. User can pass in a function that corresponds to the data preparation procedure to Allreduce -calls, and the data preparation function will only be called when necessary. We use [lazy_allreduce.cc](../guide/lazy_allreduce.cc) -as an example to demonstrate this feature. It is modified from [basic.cc](../guide/basic.cc), and you can compare the two codes. -```c++ -#include -using namespace rabit; -const int N = 3; -int main(int argc, char *argv[]) { - int a[N] = {0}; - rabit::Init(argc, argv); - // lazy preparation function - auto prepare = [&]() { - printf("@node[%d] run prepare function\n", rabit::GetRank()); - for (int i = 0; i < N; ++i) { - a[i] = rabit::GetRank() + i; - } - }; - printf("@node[%d] before-allreduce: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // allreduce take max of each elements in all processes - Allreduce(&a[0], N, prepare); - printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // rum second allreduce - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - rabit::Finalize(); - return 0; -} -``` -Here we use features of C++11 because the lambda function makes things much shorter. -There is also C++ compatible callback interface provided in the [API](http://homes.cs.washington.edu/~tqchen/rabit/doc). -You can compile the program by typing ```make lazy_allreduce.mock```. We link against the mock library so that we can see -the effect when a process goes down. You can run the program using the following command -```bash -../tracker/rabit_demo.py -n 2 lazy_allreduce.mock mock=0,0,1,0 -``` -The additional arguments ```mock=0,0,1,0``` will cause node 0 to kill itself before second call of Allreduce (see also [mock test](#link-against-mock-test-rabit-library)). -You will find that the prepare function's print is only executed once and node 0 will no longer execute the preparation function when it restarts from failure. - -You can also find python version of the example in [lazy_allreduce.py](../guide/lazy_allreduce.py), and run it using the followin command -```bash -../tracker/rabit_demo.py -n 2 lazy_allreduce.py mock=0,0,1,0 - -``` - -Since lazy preparation function may not be called during execution. User should be careful when using this feature. For example, a possible mistake -could be putting some memory allocation code in the lazy preparation function, and the computing memory was not allocated when lazy preparation function is not called. -The example in [lazy_allreduce.cc](../guide/lazy_allreduce.cc) provides a simple way to migrate normal prepration code([basic.cc](../guide/basic.cc)) to lazy version: wrap the preparation -code with a lambda function, and pass it to allreduce. - -#### Checkpoint and LazyCheckpoint -Common machine learning algorithms usually involves iterative computation. As mentioned in the section ([Structure of a Rabit Program](#structure-of-a-rabit-program)), -user can and should use Checkpoint to ```save``` the progress so far, so that when a node fails, the latest checkpointed model can be loaded. - -There are two model arguments you can pass to Checkpoint and LoadCheckpoint: ```global_model``` and ```local_model```: -* ```global_model``` refers to the model that is commonly shared across all the nodes - - For example, the centriods of clusters in kmeans is shared across all nodes -* ```local_model``` refers to the model that is specifically tied to the current node - - For example, in topic modeling, the topic assignments of subset of documents in current node is local model - -Because the different nature of the two types of models, different strategy will be used for them. -```global_model``` is simply saved in local memory of each node, while ```local_model``` will replicated to some other -nodes (selected using a ring replication strategy). The checkpoint is only saved in the memory without touching the disk which makes rabit programs more efficient. -User is encouraged to use ```global_model``` only when is sufficient for better efficiency. - -To enable a model class to be checked pointed, user can implement a [serialization interface](../include/rabit_serialization.h). The serialization interface already -provide serialization functions of STL vector and string. For python API, user can checkpoint any python object that can be pickled. - -There is a special Checkpoint function called [LazyCheckpoint](http://homes.cs.washington.edu/~tqchen/rabit/doc/namespacerabit.html#a99f74c357afa5fba2c80cc0363e4e459), -which can be used for ```global_model``` only cases under certain condition. -When LazyCheckpoint is called, no action is taken and the rabit engine only remembers the pointer to the model. -The serialization will only happen when another node fails and the recovery starts. So user basically pays no extra cost calling LazyCheckpoint. -To use this function, the user need to ensure the model remain unchanged until the last call of Allreduce/Broadcast in the current version finishes. -So that when recovery procedure happens in these function calls, the serialized model will be the same. - -For example, consider the following calling sequence -``` -LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint -``` -The user must only change the model in code3. Such condition can usually be satiesfied in many scenarios, and user can use LazyCheckpoint to further -improve the efficiency of the program. - - -Compile Programs with Rabit ---------------------------- -Rabit is a portable library, to use it, you only need to include the rabit header file. -* You will need to add the path to [../include](../include) to the header search path of the compiler - - Solution 1: add ```-I/path/to/rabit/include``` to the compiler flag in gcc or clang - - Solution 2: add the path to the environment variable CPLUS_INCLUDE_PATH -* You will need to add the path to [../lib](../lib) to the library search path of the compiler - - Solution 1: add ```-L/path/to/rabit/lib``` to the linker flag - - Solution 2: add the path to environment variable LIBRARY_PATH AND LD_LIBRARY_PATH -* Link against lib/rabit.a - - Add ```-lrabit``` to the linker flag - -The procedure above allows you to compile a program with rabit. The following two sections contain additional -options you can use to link against different backends other than the normal one. - -#### Link against MPI Allreduce -You can link against ```rabit_mpi.a``` instead of using MPI Allreduce, however, the resulting program is backed by MPI and -is not fault tolerant anymore. -* Simply change the linker flag from ```-lrabit``` to ```-lrabit_mpi``` -* The final linking needs to be done by mpi wrapper compiler ```mpicxx``` - -#### Link against Mock Test Rabit Library -If you want to use a mock to test the program in order to see the behavior of the code when some nodes go down, you can link against ```rabit_mock.a``` . -* Simply change the linker flag from ```-lrabit``` to ```-lrabit_mock``` - -The resulting rabit mock program can take in additional arguments in the following format -``` -mock=rank,version,seq,ndeath -``` - -The four integers specify an event that will cause the program to ```commit suicide```(exit with -2) -* rank specifies the rank of the node to kill -* version specifies the version (iteration) of the model where you want the process to die -* seq specifies the sequence number of the Allreduce/Broadcast call since last checkpoint, where the process will be killed -* ndeath specifies how many times this node died already - -For example, consider the following script in the test case -```bash -../tracker/rabit_demo.py -n 10 test_model_recover 10000\ - mock=0,0,1,0 mock=1,1,1,0 mock=1,1,1,1 -``` -* The first mock will cause node 0 to exit when calling the second Allreduce/Broadcast (seq = 1) in iteration 0 -* The second mock will cause node 1 to exit when calling the second Allreduce/Broadcast (seq = 1) in iteration 1 -* The third mock will cause node 1 to exit again when calling second Allreduce/Broadcast (seq = 1) in iteration 1 - - Note that ndeath = 1 means this will happen only if node 1 died once, which is our case - -Running Rabit Jobs ------------------- -Rabit is a portable library that can run on multiple platforms. - -#### Running Rabit Locally -* You can use [../tracker/rabit_demo.py](https://github.com/dmlc/rabit/blob/master/tracker/rabit_demo.py) to start n processes locally -* This script will restart the program when it exits with -2, so it can be used for [mock test](#link-against-mock-test-library) - -#### Running Rabit on Hadoop -* You can use [../tracker/rabit_yarn.py](https://github.com/dmlc/rabit/blob/master/tracker/rabit_yarn.py) to run rabit programs as Yarn application -* This will start rabit programs as yarn applications - - This allows multi-threading programs in each node, which can be more efficient - - An easy multi-threading solution could be to use OpenMP with rabit code -* It is also possible to run rabit program via hadoop streaming, however, YARN is highly recommended. - -#### Running Rabit using MPI -* You can submit rabit programs to an MPI cluster using [../tracker/rabit_mpi.py](https://github.com/dmlc/rabit/blob/master/tracker/rabit_mpi.py). -* If you linked your code against librabit_mpi.a, then you can directly use mpirun to submit the job - -#### Customize Tracker Script -You can also modify the tracker script to allow rabit to run on other platforms. To do so, refer to existing -tracker scripts, such as [../tracker/rabit_yarn.py](../tracker/rabit_yarn.py) and [../tracker/rabit_mpi.py](https://github.com/dmlc/rabit/blob/master/tracker/rabit_mpi.py) to get a sense of how it is done. - -You will need to implement a platform dependent submission function with the following definition -```python -def fun_submit(nworkers, worker_args, worker_envs): - """ - customized submit script, that submits nslave jobs, - each must contain args as parameter - note this can be a lambda closure - Parameters - nworkers number of worker processes to start - worker_args addtiional arguments that needs to be passed to worker - worker_envs enviroment variables that need to be set to the worker - """ -``` -The submission function should start nworkers processes in the platform, and append worker_args to the end of the other arguments. -Then you can simply call ```tracker.submit``` with fun_submit to submit jobs to the target platform - -Note that the current rabit tracker does not restart a worker when it dies, the restart of a node is done by the platform, otherwise we should write the fail-restart logic in the custom script. -* Fail-restart is usually provided by most platforms. - - rabit-yarn provides such functionality in YARN - -Fault Tolerance ---------------- -This section introduces how fault tolerance works in rabit. -The following figure shows how rabit deals with failures. - -![](http://homes.cs.washington.edu/~tqchen/rabit/fig/fault-tol.png) - -The scenario is as follows: -* Node 1 fails between the first and second call of Allreduce after the second checkpoint -* The other nodes wait in the call of the second Allreduce in order to help node 1 to recover. -* When node 1 restarts, it will call ```LoadCheckPoint```, and get the latest checkpoint from one of the existing nodes. -* Then node 1 can start from the latest checkpoint and continue running. -* When node 1 calls the first Allreduce again, as the other nodes already know the result, node 1 can get it from one of them. -* When node 1 reaches the second Allreduce, the other nodes find out that node 1 has catched up and they can continue the program normally. - -This fault tolerance model is based on a key property of Allreduce and -Broadcast: All the nodes get the same result after calling Allreduce/Broadcast. -Because of this property, any node can record the results of history -Allreduce/Broadcast calls. When a node is recovered, it can fetch the lost -results from some alive nodes and rebuild its model. - -The checkpoint is introduced so that we can discard the history results of -Allreduce/Broadcast calls before the latest checkpoint. This saves memory -consumption used for backup. The checkpoint of each node is a model defined by -users and can be split into 2 parts: a global model and a local model. The -global model is shared by all nodes and can be backed up by any nodes. The -local model of a node is replicated to some other nodes (selected using a ring -replication strategy). The checkpoint is only saved in the memory without -touching the disk which makes rabit programs more efficient. The strategy of -rabit is different from the fail-restart strategy where all the nodes restart -from the same checkpoint when any of them fail. In rabit, all the alive nodes -will block in the Allreduce call and help the recovery. To catch up, the -recovered node fetches its latest checkpoint and the results of -Allreduce/Broadcast calls after the checkpoint from some alive nodes. - -This is just a conceptual introduction to rabit's fault tolerance model. The actual implementation is more sophisticated, -and can deal with more complicated cases such as multiple nodes failure and node failure during recovery phase. diff --git a/subtree/rabit/doc/index.md b/subtree/rabit/doc/index.md deleted file mode 100644 index d209d95ba..000000000 --- a/subtree/rabit/doc/index.md +++ /dev/null @@ -1,24 +0,0 @@ -Rabit Documentation -===================== -rabit is a light weight library that provides a fault tolerant interface of Allreduce and Broadcast. It is designed to support easy implementations of distributed machine learning programs, many of which fall naturally under the Allreduce abstraction. The goal of rabit is to support **portable** , **scalable** and **reliable** distributed machine learning programs. - -API Documents -------------- -```eval_rst - -.. toctree:: - :maxdepth: 2 - - python_api.md - cpp_api.md - parameters.md - guide.md -``` -Indices and tables ------------------- - -```eval_rst -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` -``` \ No newline at end of file diff --git a/subtree/rabit/doc/parameters.md b/subtree/rabit/doc/parameters.md deleted file mode 100644 index 37580d5a1..000000000 --- a/subtree/rabit/doc/parameters.md +++ /dev/null @@ -1,21 +0,0 @@ -Parameters -========== -This section list all the parameters that can be passed to rabit::Init function as argv. -All the parameters are passed in as string in format of ``parameter-name=parameter-value``. -In most setting these parameters have default value or will be automatically detected, -and do not need to be manually configured. - -* rabit_tracker_uri [passed in automatically by tracker] - - The uri/ip of rabit tracker -* rabit_tracker_port [passed in automatically by tracker] - - The port of rabit tracker -* rabit_task_id [automatically detected] - - The unique identifier of computing process - - When running on hadoop, this is automatically extracted from enviroment variable -* rabit_reduce_buffer [default = 256MB] - - The memory buffer used to store intermediate result of reduction - - Format "digits + unit", can be 128M, 1G -* rabit_global_replica [default = 5] - - Number of replication copies of result kept for each Allreduce/Broadcast call -* rabit_local_replica [default = 2] - - Number of replication of local model in check point diff --git a/subtree/rabit/doc/python-requirements.txt b/subtree/rabit/doc/python-requirements.txt deleted file mode 100644 index 5970c4367..000000000 --- a/subtree/rabit/doc/python-requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -numpy -breathe -commonmark - diff --git a/subtree/rabit/doc/python_api.md b/subtree/rabit/doc/python_api.md deleted file mode 100644 index 8a0eda921..000000000 --- a/subtree/rabit/doc/python_api.md +++ /dev/null @@ -1,11 +0,0 @@ -Python API of Rabit -=================== -This page contains document of python API of rabit. - -```eval_rst -.. toctree:: - -.. automodule:: rabit - :members: - :show-inheritance: -``` diff --git a/subtree/rabit/doc/sphinx_util.py b/subtree/rabit/doc/sphinx_util.py deleted file mode 100644 index f6a33ffa3..000000000 --- a/subtree/rabit/doc/sphinx_util.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -"""Helper utilty function for customization.""" -import sys -import os -import docutils -import subprocess - -if os.environ.get('READTHEDOCS', None) == 'True': - subprocess.call('cd ..; rm -rf recommonmark;' + - 'git clone https://github.com/tqchen/recommonmark', shell=True) - -sys.path.insert(0, os.path.abspath('../recommonmark/')) -from recommonmark import parser, transform - -MarkdownParser = parser.CommonMarkParser -AutoStructify = transform.AutoStructify diff --git a/subtree/rabit/guide/Makefile b/subtree/rabit/guide/Makefile deleted file mode 100644 index 7213e1bf7..000000000 --- a/subtree/rabit/guide/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -export CC = gcc -export CXX = g++ -export MPICXX = mpicxx -export LDFLAGS= -pthread -lm -L../lib -export CFLAGS = -Wall -O3 -msse2 -Wno-unknown-pragmas -fPIC -I../include - -.PHONY: clean all lib libmpi -BIN = basic.rabit broadcast.rabit -MOCKBIN= lazy_allreduce.mock - -all: $(BIN) -basic.rabit: basic.cc lib -broadcast.rabit: broadcast.cc lib -lazy_allreduce.mock: lazy_allreduce.cc lib - -$(BIN) : - $(CXX) $(CFLAGS) -o $@ $(filter %.cpp %.o %.c %.cc, $^) $(LDFLAGS) -lrabit - -$(MOCKBIN) : - $(CXX) $(CFLAGS) -std=c++11 -o $@ $(filter %.cpp %.o %.c %.cc, $^) $(LDFLAGS) -lrabit_mock - -$(OBJ) : - $(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c %.cc, $^) ) - -clean: - $(RM) $(OBJ) $(BIN) $(MOCKBIN) *~ ../src/*~ \ No newline at end of file diff --git a/subtree/rabit/guide/README b/subtree/rabit/guide/README deleted file mode 100644 index 2483d683f..000000000 --- a/subtree/rabit/guide/README +++ /dev/null @@ -1 +0,0 @@ -See tutorial at ../doc/guide.md \ No newline at end of file diff --git a/subtree/rabit/guide/basic.cc b/subtree/rabit/guide/basic.cc deleted file mode 100644 index a9a729170..000000000 --- a/subtree/rabit/guide/basic.cc +++ /dev/null @@ -1,35 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file basic.cc - * \brief This is an example demonstrating what is Allreduce - * - * \author Tianqi Chen - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#include -#include -using namespace rabit; -int main(int argc, char *argv[]) { - int N = 3; - if (argc > 1) { - N = atoi(argv[1]); - } - std::vector a(N); - rabit::Init(argc, argv); - for (int i = 0; i < N; ++i) { - a[i] = rabit::GetRank() + i; - } - printf("@node[%d] before-allreduce: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // allreduce take max of each elements in all processes - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // second allreduce that sums everything up - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/guide/basic.py b/subtree/rabit/guide/basic.py deleted file mode 100755 index becdae07d..000000000 --- a/subtree/rabit/guide/basic.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -""" -demo python script of rabit -""" -import os -import sys -import numpy as np -# import rabit, the tracker script will setup the lib path correctly -# for normal run without tracker script, add following line -# sys.path.append(os.path.dirname(__file__) + '/../wrapper') -import rabit - -rabit.init() -n = 3 -rank = rabit.get_rank() -a = np.zeros(n) -for i in xrange(n): - a[i] = rank + i - -print '@node[%d] before-allreduce: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.MAX) -print '@node[%d] after-allreduce-max: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.SUM) -print '@node[%d] after-allreduce-sum: a=%s' % (rank, str(a)) -rabit.finalize() diff --git a/subtree/rabit/guide/broadcast.cc b/subtree/rabit/guide/broadcast.cc deleted file mode 100644 index 83dbe67fe..000000000 --- a/subtree/rabit/guide/broadcast.cc +++ /dev/null @@ -1,16 +0,0 @@ -#include -using namespace rabit; -const int N = 3; -int main(int argc, char *argv[]) { - rabit::Init(argc, argv); - std::string s; - if (rabit::GetRank() == 0) s = "hello world"; - printf("@node[%d] before-broadcast: s=\"%s\"\n", - rabit::GetRank(), s.c_str()); - // broadcast s from node 0 to all other nodes - rabit::Broadcast(&s, 0); - printf("@node[%d] after-broadcast: s=\"%s\"\n", - rabit::GetRank(), s.c_str()); - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/guide/broadcast.py b/subtree/rabit/guide/broadcast.py deleted file mode 100755 index defe69eaa..000000000 --- a/subtree/rabit/guide/broadcast.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python -""" -demo python script of rabit -""" -import os -import sys -# add path to wrapper -# for normal run without tracker script, add following line -# sys.path.append(os.path.dirname(__file__) + '/../wrapper') -import rabit - -rabit.init() -n = 3 -rank = rabit.get_rank() -s = None -if rank == 0: - s = {'hello world':100, 2:3} -print '@node[%d] before-broadcast: s=\"%s\"' % (rank, str(s)) -s = rabit.broadcast(s, 0) - -print '@node[%d] after-broadcast: s=\"%s\"' % (rank, str(s)) -rabit.finalize() diff --git a/subtree/rabit/guide/lazy_allreduce.cc b/subtree/rabit/guide/lazy_allreduce.cc deleted file mode 100644 index b54776ecc..000000000 --- a/subtree/rabit/guide/lazy_allreduce.cc +++ /dev/null @@ -1,33 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file basic.cc - * \brief This is an example demonstrating what is Allreduce - * - * \author Tianqi Chen - */ -#include -using namespace rabit; -const int N = 3; -int main(int argc, char *argv[]) { - int a[N] = {0}; - rabit::Init(argc, argv); - // lazy preparation function - auto prepare = [&]() { - printf("@node[%d] run prepare function\n", rabit::GetRank()); - for (int i = 0; i < N; ++i) { - a[i] = rabit::GetRank() + i; - } - }; - printf("@node[%d] before-allreduce: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // allreduce take max of each elements in all processes - Allreduce(&a[0], N, prepare); - printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - // rum second allreduce - Allreduce(&a[0], N); - printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n", - rabit::GetRank(), a[0], a[1], a[2]); - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/guide/lazy_allreduce.py b/subtree/rabit/guide/lazy_allreduce.py deleted file mode 100755 index a195f58d2..000000000 --- a/subtree/rabit/guide/lazy_allreduce.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/python -""" -demo python script of rabit: Lazy preparation function -""" -import os -import sys -import numpy as np -# import rabit, the tracker script will setup the lib path correctly -# for normal run without tracker script, add following line -# sys.path.append(os.path.dirname(__file__) + '/../wrapper') -import rabit - - -# use mock library so that we can run failure test -rabit.init(lib = 'mock') -n = 3 -rank = rabit.get_rank() -a = np.zeros(n) - -def prepare(a): - print '@node[%d] run prepare function' % rank - # must take in reference and modify the reference - for i in xrange(n): - a[i] = rank + i - -print '@node[%d] before-allreduce: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.MAX, prepare_fun = prepare) -print '@node[%d] after-allreduce-max: a=%s' % (rank, str(a)) -a = rabit.allreduce(a, rabit.SUM) -print '@node[%d] after-allreduce-sum: a=%s' % (rank, str(a)) -rabit.finalize() diff --git a/subtree/rabit/include/README.md b/subtree/rabit/include/README.md deleted file mode 100644 index 2512edc78..000000000 --- a/subtree/rabit/include/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Library Header Files -==== -* This folder contains all the header needed to use the library -* To use it, add the "include" folder to the search path of the compiler -* User only needs to know [rabit.h](rabit.h) and [rabit_serializable.h](rabit_serializable.h) in order to use the library -* Folder [rabit](rabit) contains headers for internal engine and template's implementation -* Not all .h files in the project are in the "include" folder, .h files that are internally used by the library remain at [src](../src) diff --git a/subtree/rabit/include/dmlc/README.md b/subtree/rabit/include/dmlc/README.md deleted file mode 100644 index 846cec006..000000000 --- a/subtree/rabit/include/dmlc/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This folder is part of dmlc-core library, this allows rabit to use unified stream interface with other dmlc projects. - -- Since it is only interface dependency DMLC core is not required to compile rabit -- To compile project that uses dmlc-core functions, link to libdmlc.a (provided by dmlc-core) will be required. diff --git a/subtree/rabit/include/dmlc/io.h b/subtree/rabit/include/dmlc/io.h deleted file mode 100644 index 66d590b2d..000000000 --- a/subtree/rabit/include/dmlc/io.h +++ /dev/null @@ -1,423 +0,0 @@ -/*! - * Copyright (c) 2015 by Contributors - * \file io.h - * \brief defines serializable interface of dmlc - */ -#ifndef DMLC_IO_H_ -#define DMLC_IO_H_ -#include -#include -#include -#include -#include -#include - -// include uint64_t only to make io standalone -#ifdef _MSC_VER -/*! \brief uint64 */ -typedef unsigned __int64 uint64_t; -#else -#include -#endif - -/*! \brief namespace for dmlc */ -namespace dmlc { -/*! - * \brief interface of stream I/O for serialization - */ -class Stream { // NOLINT(*) - public: - /*! - * \brief reads data from a stream - * \param ptr pointer to a memory buffer - * \param size block size - * \return the size of data read - */ - virtual size_t Read(void *ptr, size_t size) = 0; - /*! - * \brief writes data to a stream - * \param ptr pointer to a memory buffer - * \param size block size - */ - virtual void Write(const void *ptr, size_t size) = 0; - /*! \brief virtual destructor */ - virtual ~Stream(void) {} - /*! - * \brief generic factory function - * create an stream, the stream will close the underlying files upon deletion - * - * \param uri the uri of the input currently we support - * hdfs://, s3://, and file:// by default file:// will be used - * \param flag can be "w", "r", "a" - * \param allow_null whether NULL can be returned, or directly report error - * \return the created stream, can be NULL when allow_null == true and file do not exist - */ - static Stream *Create(const char *uri, - const char* const flag, - bool allow_null = false); - // helper functions to write/read different data structures - /*! - * \brief writes a vector - * \param vec vector to be written/serialized - */ - template - inline void Write(const std::vector &vec); - /*! - * \brief loads a vector - * \param out_vec vector to be loaded/deserialized - * \return whether the load was successful - */ - template - inline bool Read(std::vector *out_vec); - /*! - * \brief writes a string - * \param str the string to be written/serialized - */ - inline void Write(const std::string &str); - /*! - * \brief loads a string - * \param out_str string to be loaded/deserialized - * \return whether the load/deserialization was successful - */ - inline bool Read(std::string *out_str); -}; - -/*! \brief interface of i/o stream that support seek */ -class SeekStream: public Stream { - public: - // virtual destructor - virtual ~SeekStream(void) {} - /*! \brief seek to certain position of the file */ - virtual void Seek(size_t pos) = 0; - /*! \brief tell the position of the stream */ - virtual size_t Tell(void) = 0; - /*! - * \brief generic factory function - * create an SeekStream for read only, - * the stream will close the underlying files upon deletion - * error will be reported and the system will exit when create failed - * \param uri the uri of the input currently we support - * hdfs://, s3://, and file:// by default file:// will be used - * \param allow_null whether NULL can be returned, or directly report error - * \return the created stream, can be NULL when allow_null == true and file do not exist - */ - static SeekStream *CreateForRead(const char *uri, - bool allow_null = false); -}; - -/*! \brief interface for serializable objects */ -class Serializable { - public: - /*! - * \brief load the model from a stream - * \param fi stream where to load the model from - */ - virtual void Load(Stream *fi) = 0; - /*! - * \brief saves the model to a stream - * \param fo stream where to save the model to - */ - virtual void Save(Stream *fo) const = 0; -}; - -/*! - * \brief input split creates that allows reading - * of records from split of data, - * independent part that covers all the dataset - * - * see InputSplit::Create for definition of record - */ -class InputSplit { - public: - /*! \brief a blob of memory region */ - struct Blob { - /*! \brief points to start of the memory region */ - void *dptr; - /*! \brief size of the memory region */ - size_t size; - }; - /*! - * \brief hint the inputsplit how large the chunk size - * it should return when implementing NextChunk - * this is a hint so may not be enforced, - * but InputSplit will try adjust its internal buffer - * size to the hinted value - * \param chunk_size the chunk size - */ - virtual void HintChunkSize(size_t chunk_size) {} - /*! \brief reset the position of InputSplit to beginning */ - virtual void BeforeFirst(void) = 0; - /*! - * \brief get the next record, the returning value - * is valid until next call to NextRecord or NextChunk - * caller can modify the memory content of out_rec - * - * For text, out_rec contains a single line - * For recordio, out_rec contains one record content(with header striped) - * - * \param out_rec used to store the result - * \return true if we can successfully get next record - * false if we reached end of split - * \sa InputSplit::Create for definition of record - */ - virtual bool NextRecord(Blob *out_rec) = 0; - /*! - * \brief get a chunk of memory that can contain multiple records, - * the caller needs to parse the content of the resulting chunk, - * for text file, out_chunk can contain data of multiple lines - * for recordio, out_chunk can contain multiple records(including headers) - * - * This function ensures there won't be partial record in the chunk - * caller can modify the memory content of out_chunk, - * the memory is valid until next call to NextRecord or NextChunk - * - * Usually NextRecord is sufficient, NextChunk can be used by some - * multi-threaded parsers to parse the input content - * - * \param out_chunk used to store the result - * \return true if we can successfully get next record - * false if we reached end of split - * \sa InputSplit::Create for definition of record - * \sa RecordIOChunkReader to parse recordio content from out_chunk - */ - virtual bool NextChunk(Blob *out_chunk) = 0; - /*! \brief destructor*/ - virtual ~InputSplit(void) {} - /*! - * \brief factory function: - * create input split given a uri - * \param uri the uri of the input, can contain hdfs prefix - * \param part_index the part id of current input - * \param num_parts total number of splits - * \param type type of record - * List of possible types: "text", "recordio" - * - "text": - * text file, each line is treated as a record - * input split will split on '\\n' or '\\r' - * - "recordio": - * binary recordio file, see recordio.h - * \return a new input split - * \sa InputSplit::Type - */ - static InputSplit* Create(const char *uri, - unsigned part_index, - unsigned num_parts, - const char *type); -}; - -/*! - * \brief a std::ostream class that can can wrap Stream objects, - * can use ostream with that output to underlying Stream - * - * Usage example: - * \code - * - * Stream *fs = Stream::Create("hdfs:///test.txt", "w"); - * dmlc::ostream os(fs); - * os << "hello world" << std::endl; - * delete fs; - * \endcode - */ -class ostream : public std::basic_ostream { - public: - /*! - * \brief construct std::ostream type - * \param stream the Stream output to be used - * \param buffer_size internal streambuf size - */ - explicit ostream(Stream *stream, - size_t buffer_size = (1 << 10)) - : std::basic_ostream(NULL), buf_(buffer_size) { - this->set_stream(stream); - } - // explictly synchronize the buffer - virtual ~ostream() { - buf_.pubsync(); - } - /*! - * \brief set internal stream to be stream, reset states - * \param stream new stream as output - */ - inline void set_stream(Stream *stream) { - buf_.set_stream(stream); - this->rdbuf(&buf_); - } - - private: - // internal streambuf - class OutBuf : public std::streambuf { - public: - explicit OutBuf(size_t buffer_size) - : stream_(NULL), buffer_(buffer_size) { - if (buffer_size == 0) buffer_.resize(2); - } - // set stream to the buffer - inline void set_stream(Stream *stream); - - private: - /*! \brief internal stream by StreamBuf */ - Stream *stream_; - /*! \brief internal buffer */ - std::vector buffer_; - // override sync - inline int_type sync(void); - // override overflow - inline int_type overflow(int c); - }; - /*! \brief buffer of the stream */ - OutBuf buf_; -}; - -/*! - * \brief a std::istream class that can can wrap Stream objects, - * can use istream with that output to underlying Stream - * - * Usage example: - * \code - * - * Stream *fs = Stream::Create("hdfs:///test.txt", "r"); - * dmlc::istream is(fs); - * is >> mydata; - * delete fs; - * \endcode - */ -class istream : public std::basic_istream { - public: - /*! - * \brief construct std::ostream type - * \param stream the Stream output to be used - * \param buffer_size internal buffer size - */ - explicit istream(Stream *stream, - size_t buffer_size = (1 << 10)) - : std::basic_istream(NULL), buf_(buffer_size) { - this->set_stream(stream); - } - virtual ~istream() {} - /*! - * \brief set internal stream to be stream, reset states - * \param stream new stream as output - */ - inline void set_stream(Stream *stream) { - buf_.set_stream(stream); - this->rdbuf(&buf_); - } - /*! \return how many bytes we read so far */ - inline size_t bytes_read(void) const { - return buf_.bytes_read(); - } - - private: - // internal streambuf - class InBuf : public std::streambuf { - public: - explicit InBuf(size_t buffer_size) - : stream_(NULL), bytes_read_(0), - buffer_(buffer_size) { - if (buffer_size == 0) buffer_.resize(2); - } - // set stream to the buffer - inline void set_stream(Stream *stream); - // return how many bytes read so far - inline size_t bytes_read(void) const { - return bytes_read_; - } - private: - /*! \brief internal stream by StreamBuf */ - Stream *stream_; - /*! \brief how many bytes we read so far */ - size_t bytes_read_; - /*! \brief internal buffer */ - std::vector buffer_; - // override underflow - inline int_type underflow(); - }; - /*! \brief input buffer */ - InBuf buf_; -}; - -// implementations of inline functions -template -inline void Stream::Write(const std::vector &vec) { - uint64_t sz = static_cast(vec.size()); - this->Write(&sz, sizeof(sz)); - if (sz != 0) { - this->Write(&vec[0], sizeof(T) * vec.size()); - } -} -template -inline bool Stream::Read(std::vector *out_vec) { - uint64_t sz; - if (this->Read(&sz, sizeof(sz)) == 0) return false; - size_t size = static_cast(sz); - out_vec->resize(size); - if (sz != 0) { - if (this->Read(&(*out_vec)[0], sizeof(T) * size) == 0) return false; - } - return true; -} -inline void Stream::Write(const std::string &str) { - uint64_t sz = static_cast(str.length()); - this->Write(&sz, sizeof(sz)); - if (sz != 0) { - this->Write(&str[0], sizeof(char) * str.length()); - } -} -inline bool Stream::Read(std::string *out_str) { - uint64_t sz; - if (this->Read(&sz, sizeof(sz)) == 0) return false; - size_t size = static_cast(sz); - out_str->resize(size); - if (sz != 0) { - if (this->Read(&(*out_str)[0], sizeof(char) * size) == 0) { - return false; - } - } - return true; -} - -// implementations for ostream -inline void ostream::OutBuf::set_stream(Stream *stream) { - if (stream_ != NULL) this->pubsync(); - this->stream_ = stream; - this->setp(&buffer_[0], &buffer_[0] + buffer_.size() - 1); -} -inline int ostream::OutBuf::sync(void) { - if (stream_ == NULL) return -1; - std::ptrdiff_t n = pptr() - pbase(); - stream_->Write(pbase(), n); - this->pbump(-static_cast(n)); - return 0; -} -inline int ostream::OutBuf::overflow(int c) { - *(this->pptr()) = c; - std::ptrdiff_t n = pptr() - pbase(); - this->pbump(-static_cast(n)); - if (c == EOF) { - stream_->Write(pbase(), n); - } else { - stream_->Write(pbase(), n + 1); - } - return c; -} - -// implementations for istream -inline void istream::InBuf::set_stream(Stream *stream) { - stream_ = stream; - this->setg(&buffer_[0], &buffer_[0], &buffer_[0]); -} -inline int istream::InBuf::underflow() { - char *bhead = &buffer_[0]; - if (this->gptr() == this->egptr()) { - size_t sz = stream_->Read(bhead, buffer_.size()); - this->setg(bhead, bhead, bhead + sz); - bytes_read_ += sz; - } - if (this->gptr() == this->egptr()) { - return traits_type::eof(); - } else { - return traits_type::to_int_type(*gptr()); - } -} -} // namespace dmlc -#endif // DMLC_IO_H_ diff --git a/subtree/rabit/include/rabit.h b/subtree/rabit/include/rabit.h deleted file mode 100644 index b0f1df39c..000000000 --- a/subtree/rabit/include/rabit.h +++ /dev/null @@ -1,342 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file rabit.h - * \brief This file defines rabit's Allreduce/Broadcast interface - * The rabit engine contains the actual implementation - * Code that only uses this header can also be compiled with MPI Allreduce (non fault-tolerant), - * - * rabit.h and serializable.h is all what the user needs to use the rabit interface - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#ifndef RABIT_RABIT_H_ // NOLINT(*) -#define RABIT_RABIT_H_ // NOLINT(*) -#include -#include - -// whether or not use c++11 support -#ifndef DMLC_USE_CXX11 -#define DMLC_USE_CXX11 (defined(__GXX_EXPERIMENTAL_CXX0X__) ||\ - __cplusplus >= 201103L || defined(_MSC_VER)) -#endif -// optionally support of lambda functions in C++11, if available -#if DMLC_USE_CXX11 -#include -#endif // C++11 -// contains definition of Serializable -#include "./rabit_serializable.h" -// engine definition of rabit, defines internal implementation -// to use rabit interface, there is no need to read engine.h -// rabit.h and serializable.h are enough to use the interface -#include "./rabit/engine.h" - -/*! \brief rabit namespace */ -namespace rabit { -/*! - * \brief reduction operators namespace - */ -namespace op { -/*! - * \class rabit::op::Max - * \brief maximum reduction operator - */ -struct Max; -/*! - * \class rabit::op::Min - * \brief minimum reduction operator - */ -struct Min; -/*! - * \class rabit::op::Sum - * \brief sum reduction operator - */ -struct Sum; -/*! - * \class rabit::op::BitOR - * \brief bitwise OR reduction operator - */ -struct BitOR; -} // namespace op -/*! - * \brief initializes rabit, call this once at the beginning of your program - * \param argc number of arguments in argv - * \param argv the array of input arguments - */ -inline void Init(int argc, char *argv[]); -/*! - * \brief finalizes the rabit engine, call this function after you finished with all the jobs - */ -inline void Finalize(void); -/*! \brief gets rank of the current process */ -inline int GetRank(void); -/*! \brief gets total number of processes */ -inline int GetWorldSize(void); -/*! \brief whether rabit env is in distributed mode */ -inline bool IsDistributed(void); - -/*! \brief gets processor's name */ -inline std::string GetProcessorName(void); -/*! - * \brief prints the msg to the tracker, - * this function can be used to communicate progress information to - * the user who monitors the tracker - * \param msg the message to be printed - */ -inline void TrackerPrint(const std::string &msg); -#ifndef RABIT_STRICT_CXX98_ -/*! - * \brief prints the msg to the tracker, this function may not be available - * in very strict c++98 compilers, though it usually is. - * this function can be used to communicate progress information to - * the user who monitors the tracker - * \param fmt the format string - */ -inline void TrackerPrintf(const char *fmt, ...); -#endif -/*! - * \brief broadcasts a memory region to every node from the root - * - * Example: int a = 1; Broadcast(&a, sizeof(a), root); - * \param sendrecv_data the pointer to the send/receive buffer, - * \param size the data size - * \param root the process root - */ -inline void Broadcast(void *sendrecv_data, size_t size, int root); -/*! - * \brief broadcasts an std::vector to every node from root - * \param sendrecv_data the pointer to send/receive vector, - * for the receiver, the vector does not need to be pre-allocated - * \param root the process root - * \tparam DType the data type stored in the vector, has to be a simple data type - * that can be directly transmitted by sending the sizeof(DType) - */ -template -inline void Broadcast(std::vector *sendrecv_data, int root); -/*! - * \brief broadcasts a std::string to every node from the root - * \param sendrecv_data the pointer to the send/receive buffer, - * for the receiver, the vector does not need to be pre-allocated - * \param root the process root - */ -inline void Broadcast(std::string *sendrecv_data, int root); -/*! - * \brief performs in-place Allreduce on sendrecvbuf - * this function is NOT thread-safe - * - * Example Usage: the following code does an Allreduce and outputs the sum as the result - * \code{.cpp} - * vector data(10); - * ... - * Allreduce(&data[0], data.size()); - * ... - * \endcode - * - * \param sendrecvbuf buffer for both sending and receiving data - * \param count number of elements to be reduced - * \param prepare_fun Lazy preprocessing function, if it is not NULL, prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce in order to initialize the data in sendrecvbuf. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function - * \tparam OP see namespace op, reduce operator - * \tparam DType data type - */ -template -inline void Allreduce(DType *sendrecvbuf, size_t count, - void (*prepare_fun)(void *) = NULL, - void *prepare_arg = NULL); -// C++11 support for lambda prepare function -#if DMLC_USE_CXX11 -/*! - * \brief performs in-place Allreduce, on sendrecvbuf - * with a prepare function specified by a lambda function - * - * Example Usage: - * \code{.cpp} - * // the following code does an Allreduce and outputs the sum as the result - * vector data(10); - * ... - * Allreduce(&data[0], data.size(), [&]() { - * for (int i = 0; i < 10; ++i) { - * data[i] = i; - * } - * }); - * ... - * \endcode - * \param sendrecvbuf buffer for both sending and receiving data - * \param count number of elements to be reduced - * \param prepare_fun Lazy lambda preprocessing function, prepare_fun() will be invoked - * by the function before performing Allreduce in order to initialize the data in sendrecvbuf. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \tparam OP see namespace op, reduce operator - * \tparam DType data type - */ -template -inline void Allreduce(DType *sendrecvbuf, size_t count, - std::function prepare_fun); -#endif // C++11 -/*! - * \brief loads the latest check point - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that the global_model - * is the same in every node - * \param local_model pointer to the local model that is specific to the current node/rank - * this can be NULL when no local model is needed - * - * \return the version number of the check point loaded - * if returned version == 0, this means no model has been CheckPointed - * the p_model is not touched, users should do the necessary initialization by themselves - * - * \code{.cpp} - * // Example usage code of LoadCheckPoint - * int iter = rabit::LoadCheckPoint(&model); - * if (iter == 0) model.InitParameters(); - * for (i = iter; i < max_iter; ++i) { - * // do many things, include allreduce - * rabit::CheckPoint(model); - * } - * \endcode - * \sa CheckPoint, VersionNumber - */ -inline int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL); -/*! - * \brief checkpoints the model, meaning a stage of execution has finished. - * every time we call check point, a version number will be increased by one - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that the global_model - * is the same in every node - * \param local_model pointer to the local model that is specific to the current node/rank - * this can be NULL when no local state is needed - * NOTE: local_model requires explicit replication of the model for fault-tolerance, which will - * bring replication cost in the CheckPoint function. global_model does not need explicit replication. - * So, only CheckPoint with the global_model if possible - * \sa LoadCheckPoint, VersionNumber - */ -inline void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL); -/*! - * \brief This function can be used to replace CheckPoint for global_model only, - * when certain condition is met (see detailed explanation). - * - * This is a "lazy" checkpoint such that only the pointer to the global_model is - * remembered and no memory copy is taken. To use this function, the user MUST ensure that: - * The global_model must remain unchanged until the last call of Allreduce/Broadcast in the current version finishes. - * In other words, the global_model model can be changed only between the last call of - * Allreduce/Broadcast and LazyCheckPoint, both in the same version - * - * For example, suppose the calling sequence is: - * LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint/(or can be CheckPoint) - * - * Then the user MUST only change the global_model in code3. - * - * The use of LazyCheckPoint instead of CheckPoint will improve the efficiency of the program. - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that the global_model - * is the same in every node - * \sa LoadCheckPoint, CheckPoint, VersionNumber - */ -inline void LazyCheckPoint(const Serializable *global_model); -/*! - * \return version number of the current stored model, - * which means how many calls to CheckPoint we made so far - * \sa LoadCheckPoint, CheckPoint - */ -inline int VersionNumber(void); -// ----- extensions that allow customized reducer ------ -// helper class to do customized reduce, user do not need to know the type -namespace engine { -class ReduceHandle; -} // namespace engine -/*! - * \brief template class to make customized reduce and all reduce easy - * Do not use reducer directly in the function you call Finalize, - * because the destructor can execute after Finalize - * \tparam DType data type that to be reduced - * \tparam freduce the customized reduction function - * DType must be a struct, with no pointer - */ -template // NOLINT(*) -class Reducer { - public: - Reducer(void); - /*! - * \brief customized in-place all reduce operation - * \param sendrecvbuf the in place send-recv buffer - * \param count number of elements to be reduced - * \param prepare_fun Lazy preprocessing function, if it is not NULL, prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to initialize the data in sendrecvbuf. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function - */ - inline void Allreduce(DType *sendrecvbuf, size_t count, - void (*prepare_fun)(void *) = NULL, - void *prepare_arg = NULL); -#if DMLC_USE_CXX11 - /*! - * \brief customized in-place all reduce operation, with lambda function as preprocessor - * \param sendrecvbuf pointer to the array of objects to be reduced - * \param count number of elements to be reduced - * \param prepare_fun lambda function executed to prepare the data, if necessary - */ - inline void Allreduce(DType *sendrecvbuf, size_t count, - std::function prepare_fun); -#endif - - private: - /*! \brief function handle to do reduce */ - engine::ReduceHandle handle_; -}; -/*! - * \brief template class to make customized reduce, - * this class defines complex reducer handles all the data structure that can be - * serialized/deserialized into fixed size buffer - * Do not use reducer directly in the function you call Finalize, because the destructor can execute after Finalize - * - * \tparam DType data type that to be reduced, DType must contain the following functions: - * \tparam freduce the customized reduction function - * (1) Save(IStream &fs) (2) Load(IStream &fs) (3) Reduce(const DType &src, size_t max_nbyte) - */ -template -class SerializeReducer { - public: - SerializeReducer(void); - /*! - * \brief customized in-place all reduce operation - * \param sendrecvobj pointer to the array of objects to be reduced - * \param max_nbyte maximum amount of memory needed to serialize each object - * this includes budget limit for intermediate and final result - * \param count number of elements to be reduced - * \param prepare_fun Lazy preprocessing function, if it is not NULL, prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to initialize the data in sendrecvbuf. - * If the result of Allreduce can be recovered directly, then the prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function - */ - inline void Allreduce(DType *sendrecvobj, - size_t max_nbyte, size_t count, - void (*prepare_fun)(void *) = NULL, - void *prepare_arg = NULL); -// C++11 support for lambda prepare function -#if DMLC_USE_CXX11 - /*! - * \brief customized in-place all reduce operation, with lambda function as preprocessor - * \param sendrecvobj pointer to the array of objects to be reduced - * \param max_nbyte maximum amount of memory needed to serialize each object - * this includes budget limit for intermediate and final result - * \param count number of elements to be reduced - * \param prepare_fun lambda function executed to prepare the data, if necessary - */ - inline void Allreduce(DType *sendrecvobj, - size_t max_nbyte, size_t count, - std::function prepare_fun); -#endif - - private: - /*! \brief function handle to do reduce */ - engine::ReduceHandle handle_; - /*! \brief temporal buffer used to do reduce*/ - std::string buffer_; -}; -} // namespace rabit -// implementation of template functions -#include "./rabit/rabit-inl.h" -#endif // RABIT_RABIT_H_ // NOLINT(*) diff --git a/subtree/rabit/include/rabit/engine.h b/subtree/rabit/include/rabit/engine.h deleted file mode 100644 index 272bbb8ef..000000000 --- a/subtree/rabit/include/rabit/engine.h +++ /dev/null @@ -1,260 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine.h - * \brief This file defines the core interface of rabit library - * \author Tianqi Chen, Nacho, Tianyi - */ -#ifndef RABIT_ENGINE_H_ -#define RABIT_ENGINE_H_ -#include -#include "../rabit_serializable.h" - -namespace MPI { -/*! \brief MPI data type just to be compatible with MPI reduce function*/ -class Datatype; -} - -/*! \brief namespace of rabit */ -namespace rabit { -/*! \brief core interface of the engine */ -namespace engine { -/*! \brief interface of core Allreduce engine */ -class IEngine { - public: - /*! - * \brief Preprocessing function, that is called before AllReduce, - * used to prepare the data used by AllReduce - * \param arg additional possible argument used to invoke the preprocessor - */ - typedef void (PreprocFunction) (void *arg); - /*! - * \brief reduce function, the same form of MPI reduce function is used, - * to be compatible with MPI interface - * In all the functions, the memory is ensured to aligned to 64-bit - * which means it is OK to cast src,dst to double* int* etc - * \param src pointer to source space - * \param dst pointer to destination reduction - * \param count total number of elements to be reduced (note this is total number of elements instead of bytes) - * the definition of the reduce function should be type aware - * \param dtype the data type object, to be compatible with MPI reduce - */ - typedef void (ReduceFunction) (const void *src, - void *dst, int count, - const MPI::Datatype &dtype); - /*! - * \brief performs in-place Allreduce, on sendrecvbuf - * this function is NOT thread-safe - * \param sendrecvbuf_ buffer for both sending and receiving data - * \param type_nbytes the number of bytes the type has - * \param count number of elements to be reduced - * \param reducer reduce function - * \param prepare_func Lazy preprocessing function, if it is not NULL, prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce in order to initialize the data in sendrecvbuf. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function - */ - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun = NULL, - void *prepare_arg = NULL) = 0; - /*! - * \brief broadcasts data from root to every other node - * \param sendrecvbuf_ buffer for both sending and receiving data - * \param size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - */ - virtual void Broadcast(void *sendrecvbuf_, size_t size, int root) = 0; - /*! - * \brief explicitly re-initialize everything before calling LoadCheckPoint - * call this function when IEngine throws an exception, - * this function should only be used for test purposes - */ - virtual void InitAfterException(void) = 0; - /*! - * \brief loads the latest check point - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that the global_model - * is the same in all nodes - * \param local_model pointer to the local model that is specific to current node/rank - * this can be NULL when no local model is needed - * - * \return the version number of the model loaded - * if returned version == 0, this means no model has been CheckPointed - * the p_model is not touched, users should do necessary initialization by themselves - * - * Common usage example: - * int iter = rabit::LoadCheckPoint(&model); - * if (iter == 0) model.InitParameters(); - * for (i = iter; i < max_iter; ++i) { - * do many things, include allreduce - * rabit::CheckPoint(model); - * } - * - * \sa CheckPoint, VersionNumber - */ - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL) = 0; - /*! - * \brief checkpoints the model, meaning a stage of execution was finished - * every time we call check point, a version number increases by ones - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that the global_model - * is the same in every node - * \param local_model pointer to the local model that is specific to current node/rank - * this can be NULL when no local state is needed - * - * NOTE: local_model requires explicit replication of the model for fault-tolerance, which will - * bring replication cost in CheckPoint function. global_model does not need explicit replication. - * So, only CheckPoint with global_model if possible - * - * \sa LoadCheckPoint, VersionNumber - */ - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL) = 0; - /*! - * \brief This function can be used to replace CheckPoint for global_model only, - * when certain condition is met (see detailed explanation). - * - * This is a "lazy" checkpoint such that only the pointer to global_model is - * remembered and no memory copy is taken. To use this function, the user MUST ensure that: - * The global_model must remain unchanged until the last call of Allreduce/Broadcast in the current version finishes. - * In other words, global_model can be changed only between the last call of - * Allreduce/Broadcast and LazyCheckPoint in the current version - * - * For example, suppose the calling sequence is: - * LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint - * - * If the user can only change global_model in code3, then LazyCheckPoint can be used to - * improve the efficiency of the program. - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller needs to guarantee that global_model - * is the same in every node - * \sa LoadCheckPoint, CheckPoint, VersionNumber - */ - virtual void LazyCheckPoint(const Serializable *global_model) = 0; - /*! - * \return version number of the current stored model, - * which means how many calls to CheckPoint we made so far - * \sa LoadCheckPoint, CheckPoint - */ - virtual int VersionNumber(void) const = 0; - /*! \brief gets rank of current node */ - virtual int GetRank(void) const = 0; - /*! \brief gets total number of nodes */ - virtual int GetWorldSize(void) const = 0; - /*! \brief whether we run in distribted mode */ - virtual bool IsDistributed(void) const = 0; - /*! \brief gets the host name of the current node */ - virtual std::string GetHost(void) const = 0; - /*! - * \brief prints the msg in the tracker, - * this function can be used to communicate progress information to - * the user who monitors the tracker - * \param msg message to be printed in the tracker - */ - virtual void TrackerPrint(const std::string &msg) = 0; -}; - -/*! \brief initializes the engine module */ -void Init(int argc, char *argv[]); -/*! \brief finalizes the engine module */ -void Finalize(void); -/*! \brief singleton method to get engine */ -IEngine *GetEngine(void); - -/*! \brief namespace that contains stubs to be compatible with MPI */ -namespace mpi { -/*!\brief enum of all operators */ -enum OpType { - kMax = 0, - kMin = 1, - kSum = 2, - kBitwiseOR = 3 -}; -/*!\brief enum of supported data types */ -enum DataType { - kChar = 0, - kUChar = 1, - kInt = 2, - kUInt = 3, - kLong = 4, - kULong = 5, - kFloat = 6, - kDouble = 7, - kLongLong = 8, - kULongLong = 9 -}; -} // namespace mpi -/*! - * \brief perform in-place Allreduce, on sendrecvbuf - * this is an internal function used by rabit to be able to compile with MPI - * do not use this function directly - * \param sendrecvbuf buffer for both sending and receiving data - * \param type_nbytes the number of bytes the type has - * \param count number of elements to be reduced - * \param reducer reduce function - * \param dtype the data type - * \param op the reduce operator type - * \param prepare_func Lazy preprocessing function, lazy prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to initialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function. - */ -void Allreduce_(void *sendrecvbuf, - size_t type_nbytes, - size_t count, - IEngine::ReduceFunction red, - mpi::DataType dtype, - mpi::OpType op, - IEngine::PreprocFunction prepare_fun = NULL, - void *prepare_arg = NULL); - -/*! - * \brief handle for customized reducer, used to handle customized reduce - * this class is mainly created for compatiblity issues with MPI's customized reduce - */ -class ReduceHandle { - public: - // constructor - ReduceHandle(void); - // destructor - ~ReduceHandle(void); - /*! - * \brief initialize the reduce function, - * with the type the reduce function needs to deal with - * the reduce function MUST be communicative - */ - void Init(IEngine::ReduceFunction redfunc, size_t type_nbytes); - /*! - * \brief customized in-place all reduce operation - * \param sendrecvbuf the in place send-recv buffer - * \param type_n4bytes size of the type, in terms of 4bytes - * \param count number of elements to send - * \param prepare_func Lazy preprocessing function, lazy prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce in order to initialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to pass into the lazy preprocessing function - */ - void Allreduce(void *sendrecvbuf, - size_t type_nbytes, size_t count, - IEngine::PreprocFunction prepare_fun = NULL, - void *prepare_arg = NULL); - /*! \return the number of bytes occupied by the type */ - static int TypeSize(const MPI::Datatype &dtype); - - protected: - // handle function field - void *handle_; - // reduce function of the reducer - IEngine::ReduceFunction *redfunc_; - // handle to the type field - void *htype_; - // the created type in 4 bytes - size_t created_type_nbytes_; -}; -} // namespace engine -} // namespace rabit -#endif // RABIT_ENGINE_H_ diff --git a/subtree/rabit/include/rabit/io.h b/subtree/rabit/include/rabit/io.h deleted file mode 100644 index 7ffca38f2..000000000 --- a/subtree/rabit/include/rabit/io.h +++ /dev/null @@ -1,106 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file io.h - * \brief utilities with different serializable implementations - * \author Tianqi Chen - */ -#ifndef RABIT_IO_H_ -#define RABIT_IO_H_ -#include -#include -#include -#include -#include -#include "./utils.h" -#include "../rabit_serializable.h" - -namespace rabit { -namespace utils { -/*! \brief re-use definition of dmlc::SeekStream */ -typedef dmlc::SeekStream SeekStream; -/*! \brief fixed size memory buffer */ -struct MemoryFixSizeBuffer : public SeekStream { - public: - MemoryFixSizeBuffer(void *p_buffer, size_t buffer_size) - : p_buffer_(reinterpret_cast(p_buffer)), - buffer_size_(buffer_size) { - curr_ptr_ = 0; - } - virtual ~MemoryFixSizeBuffer(void) {} - virtual size_t Read(void *ptr, size_t size) { - utils::Assert(curr_ptr_ + size <= buffer_size_, - "read can not have position excceed buffer length"); - size_t nread = std::min(buffer_size_ - curr_ptr_, size); - if (nread != 0) std::memcpy(ptr, p_buffer_ + curr_ptr_, nread); - curr_ptr_ += nread; - return nread; - } - virtual void Write(const void *ptr, size_t size) { - if (size == 0) return; - utils::Assert(curr_ptr_ + size <= buffer_size_, - "write position exceed fixed buffer size"); - std::memcpy(p_buffer_ + curr_ptr_, ptr, size); - curr_ptr_ += size; - } - virtual void Seek(size_t pos) { - curr_ptr_ = static_cast(pos); - } - virtual size_t Tell(void) { - return curr_ptr_; - } - virtual bool AtEnd(void) const { - return curr_ptr_ == buffer_size_; - } - - private: - /*! \brief in memory buffer */ - char *p_buffer_; - /*! \brief current pointer */ - size_t buffer_size_; - /*! \brief current pointer */ - size_t curr_ptr_; -}; // class MemoryFixSizeBuffer - -/*! \brief a in memory buffer that can be read and write as stream interface */ -struct MemoryBufferStream : public SeekStream { - public: - explicit MemoryBufferStream(std::string *p_buffer) - : p_buffer_(p_buffer) { - curr_ptr_ = 0; - } - virtual ~MemoryBufferStream(void) {} - virtual size_t Read(void *ptr, size_t size) { - utils::Assert(curr_ptr_ <= p_buffer_->length(), - "read can not have position excceed buffer length"); - size_t nread = std::min(p_buffer_->length() - curr_ptr_, size); - if (nread != 0) std::memcpy(ptr, &(*p_buffer_)[0] + curr_ptr_, nread); - curr_ptr_ += nread; - return nread; - } - virtual void Write(const void *ptr, size_t size) { - if (size == 0) return; - if (curr_ptr_ + size > p_buffer_->length()) { - p_buffer_->resize(curr_ptr_+size); - } - std::memcpy(&(*p_buffer_)[0] + curr_ptr_, ptr, size); - curr_ptr_ += size; - } - virtual void Seek(size_t pos) { - curr_ptr_ = static_cast(pos); - } - virtual size_t Tell(void) { - return curr_ptr_; - } - virtual bool AtEnd(void) const { - return curr_ptr_ == p_buffer_->length(); - } - - private: - /*! \brief in memory buffer */ - std::string *p_buffer_; - /*! \brief current pointer */ - size_t curr_ptr_; -}; // class MemoryBufferStream -} // namespace utils -} // namespace rabit -#endif // RABIT_IO_H_ diff --git a/subtree/rabit/include/rabit/rabit-inl.h b/subtree/rabit/include/rabit/rabit-inl.h deleted file mode 100644 index e82b5a9a0..000000000 --- a/subtree/rabit/include/rabit/rabit-inl.h +++ /dev/null @@ -1,328 +0,0 @@ -/*! - * Copyright by Contributors - * \file rabit-inl.h - * \brief implementation of inline template function for rabit interface - * - * \author Tianqi Chen - */ -#ifndef RABIT_RABIT_INL_H_ -#define RABIT_RABIT_INL_H_ -// use engine for implementation -#include -#include -#include "./io.h" -#include "./utils.h" -#include "../rabit.h" - -namespace rabit { -namespace engine { -namespace mpi { -// template function to translate type to enum indicator -template -inline DataType GetType(void); -template<> -inline DataType GetType(void) { - return kChar; -} -template<> -inline DataType GetType(void) { - return kUChar; -} -template<> -inline DataType GetType(void) { - return kInt; -} -template<> -inline DataType GetType(void) { // NOLINT(*) - return kUInt; -} -template<> -inline DataType GetType(void) { // NOLINT(*) - return kLong; -} -template<> -inline DataType GetType(void) { // NOLINT(*) - return kULong; -} -template<> -inline DataType GetType(void) { - return kFloat; -} -template<> -inline DataType GetType(void) { - return kDouble; -} -template<> -inline DataType GetType(void) { // NOLINT(*) - return kLongLong; -} -template<> -inline DataType GetType(void) { // NOLINT(*) - return kULongLong; -} -} // namespace mpi -} // namespace engine - -namespace op { -struct Max { - static const engine::mpi::OpType kType = engine::mpi::kMax; - template - inline static void Reduce(DType &dst, const DType &src) { // NOLINT(*) - if (dst < src) dst = src; - } -}; -struct Min { - static const engine::mpi::OpType kType = engine::mpi::kMin; - template - inline static void Reduce(DType &dst, const DType &src) { // NOLINT(*) - if (dst > src) dst = src; - } -}; -struct Sum { - static const engine::mpi::OpType kType = engine::mpi::kSum; - template - inline static void Reduce(DType &dst, const DType &src) { // NOLINT(*) - dst += src; - } -}; -struct BitOR { - static const engine::mpi::OpType kType = engine::mpi::kBitwiseOR; - template - inline static void Reduce(DType &dst, const DType &src) { // NOLINT(*) - dst |= src; - } -}; -template -inline void Reducer(const void *src_, void *dst_, int len, const MPI::Datatype &dtype) { - const DType *src = (const DType*)src_; - DType *dst = (DType*)dst_; // NOLINT(*) - for (int i = 0; i < len; ++i) { - OP::Reduce(dst[i], src[i]); - } -} -} // namespace op - -// intialize the rabit engine -inline void Init(int argc, char *argv[]) { - engine::Init(argc, argv); -} -// finalize the rabit engine -inline void Finalize(void) { - engine::Finalize(); -} -// get the rank of current process -inline int GetRank(void) { - return engine::GetEngine()->GetRank(); -} -// the the size of the world -inline int GetWorldSize(void) { - return engine::GetEngine()->GetWorldSize(); -} -// whether rabit is distributed -inline bool IsDistributed(void) { - return engine::GetEngine()->IsDistributed(); -} -// get the name of current processor -inline std::string GetProcessorName(void) { - return engine::GetEngine()->GetHost(); -} -// broadcast data to all other nodes from root -inline void Broadcast(void *sendrecv_data, size_t size, int root) { - engine::GetEngine()->Broadcast(sendrecv_data, size, root); -} -template -inline void Broadcast(std::vector *sendrecv_data, int root) { - size_t size = sendrecv_data->size(); - Broadcast(&size, sizeof(size), root); - if (sendrecv_data->size() != size) { - sendrecv_data->resize(size); - } - if (size != 0) { - Broadcast(&(*sendrecv_data)[0], size * sizeof(DType), root); - } -} -inline void Broadcast(std::string *sendrecv_data, int root) { - size_t size = sendrecv_data->length(); - Broadcast(&size, sizeof(size), root); - if (sendrecv_data->length() != size) { - sendrecv_data->resize(size); - } - if (size != 0) { - Broadcast(&(*sendrecv_data)[0], size * sizeof(char), root); - } -} - -// perform inplace Allreduce -template -inline void Allreduce(DType *sendrecvbuf, size_t count, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - engine::Allreduce_(sendrecvbuf, sizeof(DType), count, op::Reducer, - engine::mpi::GetType(), OP::kType, prepare_fun, prepare_arg); -} - -// C++11 support for lambda prepare function -#if DMLC_USE_CXX11 -inline void InvokeLambda_(void *fun) { - (*static_cast*>(fun))(); -} -template -inline void Allreduce(DType *sendrecvbuf, size_t count, std::function prepare_fun) { - engine::Allreduce_(sendrecvbuf, sizeof(DType), count, op::Reducer, - engine::mpi::GetType(), OP::kType, InvokeLambda_, &prepare_fun); -} -#endif // C++11 - -// print message to the tracker -inline void TrackerPrint(const std::string &msg) { - engine::GetEngine()->TrackerPrint(msg); -} -#ifndef RABIT_STRICT_CXX98_ -inline void TrackerPrintf(const char *fmt, ...) { - const int kPrintBuffer = 1 << 10; - std::string msg(kPrintBuffer, '\0'); - va_list args; - va_start(args, fmt); - vsnprintf(&msg[0], kPrintBuffer, fmt, args); - va_end(args); - msg.resize(strlen(msg.c_str())); - TrackerPrint(msg); -} -#endif -// load latest check point -inline int LoadCheckPoint(Serializable *global_model, - Serializable *local_model) { - return engine::GetEngine()->LoadCheckPoint(global_model, local_model); -} -// checkpoint the model, meaning we finished a stage of execution -inline void CheckPoint(const Serializable *global_model, - const Serializable *local_model) { - engine::GetEngine()->CheckPoint(global_model, local_model); -} -// lazy checkpoint the model, only remember the pointer to global_model -inline void LazyCheckPoint(const Serializable *global_model) { - engine::GetEngine()->LazyCheckPoint(global_model); -} -// return the version number of currently stored model -inline int VersionNumber(void) { - return engine::GetEngine()->VersionNumber(); -} -// --------------------------------- -// Code to handle customized Reduce -// --------------------------------- -// function to perform reduction for Reducer -template -inline void ReducerSafe_(const void *src_, void *dst_, int len_, const MPI::Datatype &dtype) { - const size_t kUnit = sizeof(DType); - const char *psrc = reinterpret_cast(src_); - char *pdst = reinterpret_cast(dst_); - DType tdst, tsrc; - for (int i = 0; i < len_; ++i) { - // use memcpy to avoid alignment issue - std::memcpy(&tdst, pdst + i * kUnit, sizeof(tdst)); - std::memcpy(&tsrc, psrc + i * kUnit, sizeof(tsrc)); - freduce(tdst, tsrc); - std::memcpy(pdst + i * kUnit, &tdst, sizeof(tdst)); - } -} -// function to perform reduction for Reducer -template // NOLINT(*) -inline void ReducerAlign_(const void *src_, void *dst_, - int len_, const MPI::Datatype &dtype) { - const DType *psrc = reinterpret_cast(src_); - DType *pdst = reinterpret_cast(dst_); - for (int i = 0; i < len_; ++i) { - freduce(pdst[i], psrc[i]); - } -} -template // NOLINT(*) -inline Reducer::Reducer(void) { - // it is safe to directly use handle for aligned data types - if (sizeof(DType) == 8 || sizeof(DType) == 4 || sizeof(DType) == 1) { - this->handle_.Init(ReducerAlign_, sizeof(DType)); - } else { - this->handle_.Init(ReducerSafe_, sizeof(DType)); - } -} -template // NOLINT(*) -inline void Reducer::Allreduce(DType *sendrecvbuf, size_t count, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - handle_.Allreduce(sendrecvbuf, sizeof(DType), count, prepare_fun, prepare_arg); -} -// function to perform reduction for SerializeReducer -template -inline void SerializeReducerFunc_(const void *src_, void *dst_, - int len_, const MPI::Datatype &dtype) { - int nbytes = engine::ReduceHandle::TypeSize(dtype); - // temp space - DType tsrc, tdst; - for (int i = 0; i < len_; ++i) { - utils::MemoryFixSizeBuffer fsrc((char*)(src_) + i * nbytes, nbytes); // NOLINT(*) - utils::MemoryFixSizeBuffer fdst((char*)(dst_) + i * nbytes, nbytes); // NOLINT(*) - tsrc.Load(fsrc); - tdst.Load(fdst); - // govern const check - tdst.Reduce(static_cast(tsrc), nbytes); - fdst.Seek(0); - tdst.Save(fdst); - } -} -template -inline SerializeReducer::SerializeReducer(void) { - handle_.Init(SerializeReducerFunc_, sizeof(DType)); -} -// closure to call Allreduce -template -struct SerializeReduceClosure { - DType *sendrecvobj; - size_t max_nbyte, count; - void (*prepare_fun)(void *arg); - void *prepare_arg; - std::string *p_buffer; - // invoke the closure - inline void Run(void) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); - for (size_t i = 0; i < count; ++i) { - utils::MemoryFixSizeBuffer fs(BeginPtr(*p_buffer) + i * max_nbyte, max_nbyte); - sendrecvobj[i].Save(fs); - } - } - inline static void Invoke(void *c) { - static_cast*>(c)->Run(); - } -}; -template -inline void SerializeReducer::Allreduce(DType *sendrecvobj, - size_t max_nbyte, size_t count, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - buffer_.resize(max_nbyte * count); - // setup closure - SerializeReduceClosure c; - c.sendrecvobj = sendrecvobj; c.max_nbyte = max_nbyte; c.count = count; - c.prepare_fun = prepare_fun; c.prepare_arg = prepare_arg; c.p_buffer = &buffer_; - // invoke here - handle_.Allreduce(BeginPtr(buffer_), max_nbyte, count, - SerializeReduceClosure::Invoke, &c); - for (size_t i = 0; i < count; ++i) { - utils::MemoryFixSizeBuffer fs(BeginPtr(buffer_) + i * max_nbyte, max_nbyte); - sendrecvobj[i].Load(fs); - } -} - -#if DMLC_USE_CXX11 -template // NOLINT(*)g -inline void Reducer::Allreduce(DType *sendrecvbuf, size_t count, - std::function prepare_fun) { - this->Allreduce(sendrecvbuf, count, InvokeLambda_, &prepare_fun); -} -template -inline void SerializeReducer::Allreduce(DType *sendrecvobj, - size_t max_nbytes, size_t count, - std::function prepare_fun) { - this->Allreduce(sendrecvobj, max_nbytes, count, InvokeLambda_, &prepare_fun); -} -#endif -} // namespace rabit -#endif // RABIT_RABIT_INL_H_ diff --git a/subtree/rabit/include/rabit/timer.h b/subtree/rabit/include/rabit/timer.h deleted file mode 100644 index 1f135add6..000000000 --- a/subtree/rabit/include/rabit/timer.h +++ /dev/null @@ -1,41 +0,0 @@ -/*! - * Copyright by Contributors - * \file timer.h - * \brief This file defines the utils for timing - * \author Tianqi Chen, Nacho, Tianyi - */ -#ifndef RABIT_TIMER_H_ -#define RABIT_TIMER_H_ -#include -#ifdef __MACH__ -#include -#include -#endif -#include "./utils.h" - -namespace rabit { -namespace utils { -/*! - * \brief return time in seconds, not cross platform, avoid to use this in most places - */ -inline double GetTime(void) { - #ifdef __MACH__ - clock_serv_t cclock; - mach_timespec_t mts; - host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); - utils::Check(clock_get_time(cclock, &mts) == 0, "failed to get time"); - mach_port_deallocate(mach_task_self(), cclock); - return static_cast(mts.tv_sec) + static_cast(mts.tv_nsec) * 1e-9; - #else - #if defined(__unix__) || defined(__linux__) - timespec ts; - utils::Check(clock_gettime(CLOCK_REALTIME, &ts) == 0, "failed to get time"); - return static_cast(ts.tv_sec) + static_cast(ts.tv_nsec) * 1e-9; - #else - return static_cast(time(NULL)); - #endif - #endif -} -} // namespace utils -} // namespace rabit -#endif // RABIT_TIMER_H_ diff --git a/subtree/rabit/include/rabit/utils.h b/subtree/rabit/include/rabit/utils.h deleted file mode 100644 index 28709ee7d..000000000 --- a/subtree/rabit/include/rabit/utils.h +++ /dev/null @@ -1,191 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file utils.h - * \brief simple utils to support the code - * \author Tianqi Chen - */ -#ifndef RABIT_UTILS_H_ -#define RABIT_UTILS_H_ -#define _CRT_SECURE_NO_WARNINGS -#include -#include -#include -#include - -#ifndef RABIT_STRICT_CXX98_ -#include -#endif - -#if !defined(__GNUC__) -#define fopen64 std::fopen -#endif -#ifdef _MSC_VER -// NOTE: sprintf_s is not equivalent to snprintf, -// they are equivalent when success, which is sufficient for our case -#define snprintf sprintf_s -#define vsnprintf vsprintf_s -#else -#ifdef _FILE_OFFSET_BITS -#if _FILE_OFFSET_BITS == 32 -#pragma message("Warning: FILE OFFSET BITS defined to be 32 bit") -#endif -#endif - -#ifdef __APPLE__ -#define off64_t off_t -#define fopen64 std::fopen -#endif - -extern "C" { -#include -} -#endif - -#ifdef _MSC_VER -typedef unsigned char uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef __int64 int64_t; -#else -#include -#endif - -namespace rabit { -/*! \brief namespace for helper utils of the project */ -namespace utils { - -/*! \brief error message buffer length */ -const int kPrintBuffer = 1 << 12; - -#ifndef RABIT_CUSTOMIZE_MSG_ -/*! - * \brief handling of Assert error, caused by inappropriate input - * \param msg error message - */ -inline void HandleAssertError(const char *msg) { - fprintf(stderr, "AssertError:%s\n", msg); - exit(-1); -} -/*! - * \brief handling of Check error, caused by inappropriate input - * \param msg error message - */ -inline void HandleCheckError(const char *msg) { - fprintf(stderr, "%s\n", msg); - exit(-1); -} -inline void HandlePrint(const char *msg) { - printf("%s", msg); -} -inline void HandleLogPrint(const char *msg) { - fprintf(stderr, "%s", msg); - fflush(stderr); -} -#else -#ifndef RABIT_STRICT_CXX98_ -// include declarations, some one must implement this -void HandleAssertError(const char *msg); -void HandleCheckError(const char *msg); -void HandlePrint(const char *msg); -#endif -#endif -#ifdef RABIT_STRICT_CXX98_ -// these function pointers are to be assigned -extern "C" void (*Printf)(const char *fmt, ...); -extern "C" int (*SPrintf)(char *buf, size_t size, const char *fmt, ...); -extern "C" void (*Assert)(int exp, const char *fmt, ...); -extern "C" void (*Check)(int exp, const char *fmt, ...); -extern "C" void (*Error)(const char *fmt, ...); -#else -/*! \brief printf, prints messages to the console */ -inline void Printf(const char *fmt, ...) { - std::string msg(kPrintBuffer, '\0'); - va_list args; - va_start(args, fmt); - vsnprintf(&msg[0], kPrintBuffer, fmt, args); - va_end(args); - HandlePrint(msg.c_str()); -} -/*! \brief portable version of snprintf */ -inline int SPrintf(char *buf, size_t size, const char *fmt, ...) { - va_list args; - va_start(args, fmt); - int ret = vsnprintf(buf, size, fmt, args); - va_end(args); - return ret; -} - -/*! \brief assert a condition is true, use this to handle debug information */ -inline void Assert(bool exp, const char *fmt, ...) { - if (!exp) { - std::string msg(kPrintBuffer, '\0'); - va_list args; - va_start(args, fmt); - vsnprintf(&msg[0], kPrintBuffer, fmt, args); - va_end(args); - HandleAssertError(msg.c_str()); - } -} - -/*!\brief same as assert, but this is intended to be used as a message for users */ -inline void Check(bool exp, const char *fmt, ...) { - if (!exp) { - std::string msg(kPrintBuffer, '\0'); - va_list args; - va_start(args, fmt); - vsnprintf(&msg[0], kPrintBuffer, fmt, args); - va_end(args); - HandleCheckError(msg.c_str()); - } -} - -/*! \brief report error message, same as check */ -inline void Error(const char *fmt, ...) { - { - std::string msg(kPrintBuffer, '\0'); - va_list args; - va_start(args, fmt); - vsnprintf(&msg[0], kPrintBuffer, fmt, args); - va_end(args); - HandleCheckError(msg.c_str()); - } -} -#endif - -/*! \brief replace fopen, report error when the file open fails */ -inline std::FILE *FopenCheck(const char *fname, const char *flag) { - std::FILE *fp = fopen64(fname, flag); - Check(fp != NULL, "can not open file \"%s\"\n", fname); - return fp; -} -} // namespace utils -// easy utils that can be directly accessed in xgboost -/*! \brief get the beginning address of a vector */ -template -inline T *BeginPtr(std::vector &vec) { // NOLINT(*) - if (vec.size() == 0) { - return NULL; - } else { - return &vec[0]; - } -} -/*! \brief get the beginning address of a vector */ -template -inline const T *BeginPtr(const std::vector &vec) { // NOLINT(*) - if (vec.size() == 0) { - return NULL; - } else { - return &vec[0]; - } -} -inline char* BeginPtr(std::string &str) { // NOLINT(*) - if (str.length() == 0) return NULL; - return &str[0]; -} -inline const char* BeginPtr(const std::string &str) { - if (str.length() == 0) return NULL; - return &str[0]; -} -} // namespace rabit -#endif // RABIT_UTILS_H_ diff --git a/subtree/rabit/include/rabit_serializable.h b/subtree/rabit/include/rabit_serializable.h deleted file mode 100644 index c9199bba1..000000000 --- a/subtree/rabit/include/rabit_serializable.h +++ /dev/null @@ -1,27 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file rabit_serializable.h - * \brief defines serializable interface of rabit - * \author Tianqi Chen - */ -#ifndef RABIT_SERIALIZABLE_H_ -#define RABIT_SERIALIZABLE_H_ -#include -#include -#include "./rabit/utils.h" -#include "./dmlc/io.h" - -namespace rabit { -/*! - * \brief defines stream used in rabit - * see definition of Stream in dmlc/io.h - */ -typedef dmlc::Stream Stream; -/*! - * \brief defines serializable objects used in rabit - * see definition of Serializable in dmlc/io.h - */ -typedef dmlc::Serializable Serializable; - -} // namespace rabit -#endif // RABIT_SERIALIZABLE_H_ diff --git a/subtree/rabit/lib/README.md b/subtree/rabit/lib/README.md deleted file mode 100644 index b6a5aa8b2..000000000 --- a/subtree/rabit/lib/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Rabit Library -===== -This folder holds the library file generated by the compiler. To generate the library file, type ```make``` in the project root folder. If you want mpi compatible library, type ```make mpi``` - -***List of Files*** -* rabit.a The rabit package library - - Normally you need to link with this one -* rabit_mock.a The rabit package library with mock test - - This library allows additional mock-test -* rabit_mpi.a The MPI backed library - - Link against this library makes the program use MPI Allreduce - - This library is not fault-tolerant -* rabit_empty.a Dummy package implementation - - This is an empty library that does not provide anything - - Only introduced to minimize code dependency for projects that only need single machine code diff --git a/subtree/rabit/scripts/travis_runtest.sh b/subtree/rabit/scripts/travis_runtest.sh deleted file mode 100755 index f57141c6c..000000000 --- a/subtree/rabit/scripts/travis_runtest.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -make -f test.mk model_recover_10_10k || exit -1 -make -f test.mk model_recover_10_10k_die_same || exit -1 -make -f test.mk local_recover_10_10k || exit -1 -make -f test.mk pylocal_recover_10_10k || exit -1 -make -f test.mk lazy_recover_10_10k_die_hard || exit -1 -make -f test.mk lazy_recover_10_10k_die_same || exit -1 -make -f test.mk ringallreduce_10_10k || exit -1 \ No newline at end of file diff --git a/subtree/rabit/scripts/travis_script.sh b/subtree/rabit/scripts/travis_script.sh deleted file mode 100755 index 664582906..000000000 --- a/subtree/rabit/scripts/travis_script.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# main script of travis -if [ ${TASK} == "lint" ]; then - make lint || exit -1 -fi - -if [ ${TASK} == "doc" ]; then - make doc 2>log.txt - (cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag" |grep warning) && exit -1 -fi - -if [ ${TASK} == "build" ]; then - make all || exit -1 -fi - -if [ ${TASK} == "test" ]; then - cd test - make all || exit -1 - ../scripts/travis_runtest.sh || exit -1 -fi - diff --git a/subtree/rabit/src/README.md b/subtree/rabit/src/README.md deleted file mode 100644 index 5e55d9210..000000000 --- a/subtree/rabit/src/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Source Files of Rabit -==== -* This folder contains the source files of rabit library -* The library headers are in folder [include](../include) -* The .h files in this folder are internal header files that are only used by rabit and will not be seen by users - diff --git a/subtree/rabit/src/allreduce_base.cc b/subtree/rabit/src/allreduce_base.cc deleted file mode 100644 index d3b7502ff..000000000 --- a/subtree/rabit/src/allreduce_base.cc +++ /dev/null @@ -1,892 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file allreduce_base.cc - * \brief Basic implementation of AllReduce - * - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -#include -#include -#include -#include "./allreduce_base.h" - -namespace rabit { -namespace engine { -// constructor -AllreduceBase::AllreduceBase(void) { - tracker_uri = "NULL"; - tracker_port = 9000; - host_uri = ""; - slave_port = 9010; - nport_trial = 1000; - rank = 0; - world_size = -1; - connect_retry = 5; - hadoop_mode = 0; - version_number = 0; - // 32 K items - reduce_ring_mincount = 32 << 10; - // tracker URL - task_id = "NULL"; - err_link = NULL; - dmlc_role = "worker"; - this->SetParam("rabit_reduce_buffer", "256MB"); - // setup possible enviroment variable of intrest - env_vars.push_back("rabit_task_id"); - env_vars.push_back("rabit_num_trial"); - env_vars.push_back("rabit_reduce_buffer"); - env_vars.push_back("rabit_reduce_ring_mincount"); - env_vars.push_back("rabit_tracker_uri"); - env_vars.push_back("rabit_tracker_port"); - // also include dmlc support direct variables - env_vars.push_back("DMLC_TASK_ID"); - env_vars.push_back("DMLC_ROLE"); - env_vars.push_back("DMLC_NUM_ATTEMPT"); - env_vars.push_back("DMLC_TRACKER_URI"); - env_vars.push_back("DMLC_TRACKER_PORT"); - env_vars.push_back("DMLC_WORKER_CONNECT_RETRY"); -} - -// initialization function -void AllreduceBase::Init(void) { - // setup from enviroment variables - // handler to get variables from env - for (size_t i = 0; i < env_vars.size(); ++i) { - const char *value = getenv(env_vars[i].c_str()); - if (value != NULL) { - this->SetParam(env_vars[i].c_str(), value); - } - } - { - // handling for hadoop - const char *task_id = getenv("mapred_tip_id"); - if (task_id == NULL) { - task_id = getenv("mapreduce_task_id"); - } - if (hadoop_mode != 0) { - utils::Check(task_id != NULL, - "hadoop_mode is set but cannot find mapred_task_id"); - } - if (task_id != NULL) { - this->SetParam("rabit_task_id", task_id); - this->SetParam("rabit_hadoop_mode", "1"); - } - const char *attempt_id = getenv("mapred_task_id"); - if (attempt_id != 0) { - const char *att = strrchr(attempt_id, '_'); - int num_trial; - if (att != NULL && sscanf(att + 1, "%d", &num_trial) == 1) { - this->SetParam("rabit_num_trial", att + 1); - } - } - // handling for hadoop - const char *num_task = getenv("mapred_map_tasks"); - if (num_task == NULL) { - num_task = getenv("mapreduce_job_maps"); - } - if (hadoop_mode != 0) { - utils::Check(num_task != NULL, - "hadoop_mode is set but cannot find mapred_map_tasks"); - } - if (num_task != NULL) { - this->SetParam("rabit_world_size", num_task); - } - } - if (dmlc_role != "worker") { - fprintf(stderr, "Rabit Module currently only work with dmlc worker"\ - ", quit this program by exit 0\n"); - exit(0); - } - // clear the setting before start reconnection - this->rank = -1; - //--------------------- - // start socket - utils::Socket::Startup(); - utils::Assert(all_links.size() == 0, "can only call Init once"); - this->host_uri = utils::SockAddr::GetHostName(); - // get information from tracker - this->ReConnectLinks(); -} - -void AllreduceBase::Shutdown(void) { - for (size_t i = 0; i < all_links.size(); ++i) { - all_links[i].sock.Close(); - } - all_links.clear(); - tree_links.plinks.clear(); - - if (tracker_uri == "NULL") return; - // notify tracker rank i have shutdown - utils::TCPSocket tracker = this->ConnectTracker(); - tracker.SendStr(std::string("shutdown")); - tracker.Close(); - utils::TCPSocket::Finalize(); -} -void AllreduceBase::TrackerPrint(const std::string &msg) { - if (tracker_uri == "NULL") { - utils::Printf("%s", msg.c_str()); return; - } - utils::TCPSocket tracker = this->ConnectTracker(); - tracker.SendStr(std::string("print")); - tracker.SendStr(msg); - tracker.Close(); -} -// util to parse data with unit suffix -inline size_t ParseUnit(const char *name, const char *val) { - char unit; - unsigned long amt; // NOLINT(*) - int n = sscanf(val, "%lu%c", &amt, &unit); - size_t amount = amt; - if (n == 2) { - switch (unit) { - case 'B': return amount; - case 'K': return amount << 10UL; - case 'M': return amount << 20UL; - case 'G': return amount << 30UL; - default: utils::Error("invalid format for %s", name); return 0; - } - } else if (n == 1) { - return amount; - } else { - utils::Error("invalid format for %s," \ - "shhould be {integer}{unit}, unit can be {B, KB, MB, GB}", name); - return 0; - } -} -/*! - * \brief set parameters to the engine - * \param name parameter name - * \param val parameter value - */ -void AllreduceBase::SetParam(const char *name, const char *val) { - if (!strcmp(name, "rabit_tracker_uri")) tracker_uri = val; - if (!strcmp(name, "rabit_tracker_port")) tracker_port = atoi(val); - if (!strcmp(name, "rabit_task_id")) task_id = val; - if (!strcmp(name, "DMLC_TRACKER_URI")) tracker_uri = val; - if (!strcmp(name, "DMLC_TRACKER_PORT")) tracker_port = atoi(val); - if (!strcmp(name, "DMLC_TASK_ID")) task_id = val; - if (!strcmp(name, "DMLC_ROLE")) dmlc_role = val; - if (!strcmp(name, "rabit_world_size")) world_size = atoi(val); - if (!strcmp(name, "rabit_hadoop_mode")) hadoop_mode = atoi(val); - if (!strcmp(name, "rabit_reduce_ring_mincount")) { - reduce_ring_mincount = ParseUnit(name, val); - } - if (!strcmp(name, "rabit_reduce_buffer")) { - reduce_buffer_size = (ParseUnit(name, val) + 7) >> 3; - } - if (!strcmp(name, "DMLC_WORKER_CONNECT_RETRY")) { - connect_retry = atoi(val); - } -} -/*! - * \brief initialize connection to the tracker - * \return a socket that initializes the connection - */ -utils::TCPSocket AllreduceBase::ConnectTracker(void) const { - int magic = kMagic; - // get information from tracker - utils::TCPSocket tracker; - tracker.Create(); - - int retry = 0; - do { - fprintf(stderr, "connect to ip: [%s]\n", tracker_uri.c_str()); - if (!tracker.Connect(utils::SockAddr(tracker_uri.c_str(), tracker_port))) { - if (++retry >= connect_retry) { - fprintf(stderr, "connect to (failed): [%s]\n", tracker_uri.c_str()); - utils::Socket::Error("Connect"); - } else { - fprintf(stderr, "retry connect to ip(retry time %d): [%s]\n", retry, tracker_uri.c_str()); - #ifdef _MSC_VER - Sleep(1); - #else - sleep(1); - #endif - continue; - } - } - break; - } while (1); - - using utils::Assert; - Assert(tracker.SendAll(&magic, sizeof(magic)) == sizeof(magic), - "ReConnectLink failure 1"); - Assert(tracker.RecvAll(&magic, sizeof(magic)) == sizeof(magic), - "ReConnectLink failure 2"); - utils::Check(magic == kMagic, "sync::Invalid tracker message, init failure"); - Assert(tracker.SendAll(&rank, sizeof(rank)) == sizeof(rank), - "ReConnectLink failure 3"); - Assert(tracker.SendAll(&world_size, sizeof(world_size)) == sizeof(world_size), - "ReConnectLink failure 3"); - tracker.SendStr(task_id); - return tracker; -} -/*! - * \brief connect to the tracker to fix the the missing links - * this function is also used when the engine start up - */ -void AllreduceBase::ReConnectLinks(const char *cmd) { - // single node mode - if (tracker_uri == "NULL") { - rank = 0; world_size = 1; return; - } - utils::TCPSocket tracker = this->ConnectTracker(); - tracker.SendStr(std::string(cmd)); - - // the rank of previous link, next link in ring - int prev_rank, next_rank; - // the rank of neighbors - std::map tree_neighbors; - using utils::Assert; - // get new ranks - int newrank, num_neighbors; - Assert(tracker.RecvAll(&newrank, sizeof(newrank)) == sizeof(newrank), - "ReConnectLink failure 4"); - Assert(tracker.RecvAll(&parent_rank, sizeof(parent_rank)) ==\ - sizeof(parent_rank), "ReConnectLink failure 4"); - Assert(tracker.RecvAll(&world_size, sizeof(world_size)) == sizeof(world_size), - "ReConnectLink failure 4"); - Assert(rank == -1 || newrank == rank, - "must keep rank to same if the node already have one"); - rank = newrank; - Assert(tracker.RecvAll(&num_neighbors, sizeof(num_neighbors)) == \ - sizeof(num_neighbors), "ReConnectLink failure 4"); - for (int i = 0; i < num_neighbors; ++i) { - int nrank; - Assert(tracker.RecvAll(&nrank, sizeof(nrank)) == sizeof(nrank), - "ReConnectLink failure 4"); - tree_neighbors[nrank] = 1; - } - Assert(tracker.RecvAll(&prev_rank, sizeof(prev_rank)) == sizeof(prev_rank), - "ReConnectLink failure 4"); - Assert(tracker.RecvAll(&next_rank, sizeof(next_rank)) == sizeof(next_rank), - "ReConnectLink failure 4"); - // create listening socket - utils::TCPSocket sock_listen; - sock_listen.Create(); - int port = sock_listen.TryBindHost(slave_port, slave_port + nport_trial); - utils::Check(port != -1, "ReConnectLink fail to bind the ports specified"); - sock_listen.Listen(); - - // get number of to connect and number of to accept nodes from tracker - int num_conn, num_accept, num_error = 1; - do { - // send over good links - std::vector good_link; - for (size_t i = 0; i < all_links.size(); ++i) { - if (!all_links[i].sock.BadSocket()) { - good_link.push_back(static_cast(all_links[i].rank)); - } else { - if (!all_links[i].sock.IsClosed()) all_links[i].sock.Close(); - } - } - int ngood = static_cast(good_link.size()); - Assert(tracker.SendAll(&ngood, sizeof(ngood)) == sizeof(ngood), - "ReConnectLink failure 5"); - for (size_t i = 0; i < good_link.size(); ++i) { - Assert(tracker.SendAll(&good_link[i], sizeof(good_link[i])) == \ - sizeof(good_link[i]), "ReConnectLink failure 6"); - } - Assert(tracker.RecvAll(&num_conn, sizeof(num_conn)) == sizeof(num_conn), - "ReConnectLink failure 7"); - Assert(tracker.RecvAll(&num_accept, sizeof(num_accept)) == \ - sizeof(num_accept), "ReConnectLink failure 8"); - num_error = 0; - for (int i = 0; i < num_conn; ++i) { - LinkRecord r; - int hport, hrank; - std::string hname; - tracker.RecvStr(&hname); - Assert(tracker.RecvAll(&hport, sizeof(hport)) == sizeof(hport), - "ReConnectLink failure 9"); - Assert(tracker.RecvAll(&hrank, sizeof(hrank)) == sizeof(hrank), - "ReConnectLink failure 10"); - r.sock.Create(); - if (!r.sock.Connect(utils::SockAddr(hname.c_str(), hport))) { - num_error += 1; r.sock.Close(); continue; - } - Assert(r.sock.SendAll(&rank, sizeof(rank)) == sizeof(rank), - "ReConnectLink failure 12"); - Assert(r.sock.RecvAll(&r.rank, sizeof(r.rank)) == sizeof(r.rank), - "ReConnectLink failure 13"); - utils::Check(hrank == r.rank, - "ReConnectLink failure, link rank inconsistent"); - bool match = false; - for (size_t i = 0; i < all_links.size(); ++i) { - if (all_links[i].rank == hrank) { - Assert(all_links[i].sock.IsClosed(), - "Override a link that is active"); - all_links[i].sock = r.sock; match = true; break; - } - } - if (!match) all_links.push_back(r); - } - Assert(tracker.SendAll(&num_error, sizeof(num_error)) == sizeof(num_error), - "ReConnectLink failure 14"); - } while (num_error != 0); - // send back socket listening port to tracker - Assert(tracker.SendAll(&port, sizeof(port)) == sizeof(port), - "ReConnectLink failure 14"); - // close connection to tracker - tracker.Close(); - // listen to incoming links - for (int i = 0; i < num_accept; ++i) { - LinkRecord r; - r.sock = sock_listen.Accept(); - Assert(r.sock.SendAll(&rank, sizeof(rank)) == sizeof(rank), - "ReConnectLink failure 15"); - Assert(r.sock.RecvAll(&r.rank, sizeof(r.rank)) == sizeof(r.rank), - "ReConnectLink failure 15"); - bool match = false; - for (size_t i = 0; i < all_links.size(); ++i) { - if (all_links[i].rank == r.rank) { - utils::Assert(all_links[i].sock.IsClosed(), - "Override a link that is active"); - all_links[i].sock = r.sock; match = true; break; - } - } - if (!match) all_links.push_back(r); - } - // close listening sockets - sock_listen.Close(); - this->parent_index = -1; - // setup tree links and ring structure - tree_links.plinks.clear(); - for (size_t i = 0; i < all_links.size(); ++i) { - utils::Assert(!all_links[i].sock.BadSocket(), "ReConnectLink: bad socket"); - // set the socket to non-blocking mode, enable TCP keepalive - all_links[i].sock.SetNonBlock(true); - all_links[i].sock.SetKeepAlive(true); - if (tree_neighbors.count(all_links[i].rank) != 0) { - if (all_links[i].rank == parent_rank) { - parent_index = static_cast(tree_links.plinks.size()); - } - tree_links.plinks.push_back(&all_links[i]); - } - if (all_links[i].rank == prev_rank) ring_prev = &all_links[i]; - if (all_links[i].rank == next_rank) ring_next = &all_links[i]; - } - Assert(parent_rank == -1 || parent_index != -1, - "cannot find parent in the link"); - Assert(prev_rank == -1 || ring_prev != NULL, - "cannot find prev ring in the link"); - Assert(next_rank == -1 || ring_next != NULL, - "cannot find next ring in the link"); -} -/*! - * \brief perform in-place allreduce, on sendrecvbuf, this function can fail, and will return the cause of failure - * - * NOTE on Allreduce: - * The kSuccess TryAllreduce does NOT mean every node have successfully finishes TryAllreduce. - * It only means the current node get the correct result of Allreduce. - * However, it means every node finishes LAST call(instead of this one) of Allreduce/Bcast - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceBase::ReturnType -AllreduceBase::TryAllreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer) { - if (count > reduce_ring_mincount) { - return this->TryAllreduceRing(sendrecvbuf_, type_nbytes, count, reducer); - } else { - return this->TryAllreduceTree(sendrecvbuf_, type_nbytes, count, reducer); - } -} -/*! - * \brief perform in-place allreduce, on sendrecvbuf, - * this function implements tree-shape reduction - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceBase::ReturnType -AllreduceBase::TryAllreduceTree(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer) { - RefLinkVector &links = tree_links; - if (links.size() == 0 || count == 0) return kSuccess; - // total size of message - const size_t total_size = type_nbytes * count; - // number of links - const int nlink = static_cast(links.size()); - // send recv buffer - char *sendrecvbuf = reinterpret_cast(sendrecvbuf_); - // size of space that we already performs reduce in up pass - size_t size_up_reduce = 0; - // size of space that we have already passed to parent - size_t size_up_out = 0; - // size of message we received, and send in the down pass - size_t size_down_in = 0; - // initialize the link ring-buffer and pointer - for (int i = 0; i < nlink; ++i) { - if (i != parent_index) { - links[i].InitBuffer(type_nbytes, count, reduce_buffer_size); - } - links[i].ResetSize(); - } - // if no childs, no need to reduce - if (nlink == static_cast(parent_index != -1)) { - size_up_reduce = total_size; - } - // while we have not passed the messages out - while (true) { - // select helper - bool finished = true; - utils::SelectHelper selecter; - for (int i = 0; i < nlink; ++i) { - if (i == parent_index) { - if (size_down_in != total_size) { - selecter.WatchRead(links[i].sock); - // only watch for exception in live channels - selecter.WatchException(links[i].sock); - finished = false; - } - if (size_up_out != total_size && size_up_out < size_up_reduce) { - selecter.WatchWrite(links[i].sock); - } - } else { - if (links[i].size_read != total_size) { - selecter.WatchRead(links[i].sock); - } - // size_write <= size_read - if (links[i].size_write != total_size) { - if (links[i].size_write < size_down_in) { - selecter.WatchWrite(links[i].sock); - } - // only watch for exception in live channels - selecter.WatchException(links[i].sock); - finished = false; - } - } - } - // finish runing allreduce - if (finished) break; - // select must return - selecter.Select(); - // exception handling - for (int i = 0; i < nlink; ++i) { - // recive OOB message from some link - if (selecter.CheckExcept(links[i].sock)) { - return ReportError(&links[i], kGetExcept); - } - } - // read data from childs - for (int i = 0; i < nlink; ++i) { - if (i != parent_index && selecter.CheckRead(links[i].sock)) { - ReturnType ret = links[i].ReadToRingBuffer(size_up_out, total_size); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - } - } - // this node have childs, peform reduce - if (nlink > static_cast(parent_index != -1)) { - size_t buffer_size = 0; - // do upstream reduce - size_t max_reduce = total_size; - for (int i = 0; i < nlink; ++i) { - if (i != parent_index) { - max_reduce = std::min(max_reduce, links[i].size_read); - utils::Assert(buffer_size == 0 || buffer_size == links[i].buffer_size, - "buffer size inconsistent"); - buffer_size = links[i].buffer_size; - } - } - utils::Assert(buffer_size != 0, "must assign buffer_size"); - // round to type_n4bytes - max_reduce = (max_reduce / type_nbytes * type_nbytes); - // peform reduce, can be at most two rounds - while (size_up_reduce < max_reduce) { - // start position - size_t start = size_up_reduce % buffer_size; - // peform read till end of buffer - size_t nread = std::min(buffer_size - start, - max_reduce - size_up_reduce); - utils::Assert(nread % type_nbytes == 0, "Allreduce: size check"); - for (int i = 0; i < nlink; ++i) { - if (i != parent_index) { - reducer(links[i].buffer_head + start, - sendrecvbuf + size_up_reduce, - static_cast(nread / type_nbytes), - MPI::Datatype(type_nbytes)); - } - } - size_up_reduce += nread; - } - } - if (parent_index != -1) { - // pass message up to parent, can pass data that are already been reduced - if (size_up_out < size_up_reduce) { - ssize_t len = links[parent_index].sock. - Send(sendrecvbuf + size_up_out, size_up_reduce - size_up_out); - if (len != -1) { - size_up_out += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) { - return ReportError(&links[parent_index], ret); - } - } - } - // read data from parent - if (selecter.CheckRead(links[parent_index].sock) && - total_size > size_down_in) { - ssize_t len = links[parent_index].sock. - Recv(sendrecvbuf + size_down_in, total_size - size_down_in); - if (len == 0) { - links[parent_index].sock.Close(); - return ReportError(&links[parent_index], kRecvZeroLen); - } - if (len != -1) { - size_down_in += static_cast(len); - utils::Assert(size_down_in <= size_up_out, - "Allreduce: boundary error"); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) { - return ReportError(&links[parent_index], ret); - } - } - } - } else { - // this is root, can use reduce as most recent point - size_down_in = size_up_out = size_up_reduce; - } - // can pass message down to childs - for (int i = 0; i < nlink; ++i) { - if (i != parent_index && links[i].size_write < size_down_in) { - ReturnType ret = links[i].WriteFromArray(sendrecvbuf, size_down_in); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - } - } - } - return kSuccess; -} -/*! - * \brief broadcast data from root to all nodes, this function can fail,and will return the cause of failure - * \param sendrecvbuf_ buffer for both sending and recving data - * \param total_size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceBase::ReturnType -AllreduceBase::TryBroadcast(void *sendrecvbuf_, size_t total_size, int root) { - RefLinkVector &links = tree_links; - if (links.size() == 0 || total_size == 0) return kSuccess; - utils::Check(root < world_size, - "Broadcast: root should be smaller than world size"); - // number of links - const int nlink = static_cast(links.size()); - // size of space already read from data - size_t size_in = 0; - // input link, -2 means unknown yet, -1 means this is root - int in_link = -2; - - // initialize the link statistics - for (int i = 0; i < nlink; ++i) { - links[i].ResetSize(); - } - // root have all the data - if (this->rank == root) { - size_in = total_size; - in_link = -1; - } - // while we have not passed the messages out - while (true) { - bool finished = true; - // select helper - utils::SelectHelper selecter; - for (int i = 0; i < nlink; ++i) { - if (in_link == -2) { - selecter.WatchRead(links[i].sock); finished = false; - } - if (i == in_link && links[i].size_read != total_size) { - selecter.WatchRead(links[i].sock); finished = false; - } - if (in_link != -2 && i != in_link && links[i].size_write != total_size) { - if (links[i].size_write < size_in) { - selecter.WatchWrite(links[i].sock); - } - finished = false; - } - selecter.WatchException(links[i].sock); - } - // finish running - if (finished) break; - // select - selecter.Select(); - // exception handling - for (int i = 0; i < nlink; ++i) { - // recive OOB message from some link - if (selecter.CheckExcept(links[i].sock)) { - return ReportError(&links[i], kGetExcept); - } - } - if (in_link == -2) { - // probe in-link - for (int i = 0; i < nlink; ++i) { - if (selecter.CheckRead(links[i].sock)) { - ReturnType ret = links[i].ReadToArray(sendrecvbuf_, total_size); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - size_in = links[i].size_read; - if (size_in != 0) { - in_link = i; break; - } - } - } - } else { - // read from in link - if (in_link >= 0 && selecter.CheckRead(links[in_link].sock)) { - ReturnType ret = links[in_link].ReadToArray(sendrecvbuf_, total_size); - if (ret != kSuccess) { - return ReportError(&links[in_link], ret); - } - size_in = links[in_link].size_read; - } - } - // send data to all out-link - for (int i = 0; i < nlink; ++i) { - if (i != in_link && links[i].size_write < size_in) { - ReturnType ret = links[i].WriteFromArray(sendrecvbuf_, size_in); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - } - } - } - return kSuccess; -} -/*! - * \brief internal Allgather function, each node have a segment of data in the ring of sendrecvbuf, - * the data provided by current node k is [slice_begin, slice_end), - * the next node's segment must start with slice_end - * after the call of Allgather, sendrecvbuf_ contains all the contents including all segments - * use a ring based algorithm - * - * \param sendrecvbuf_ buffer for both sending and receiving data, it is a ring conceptually - * \param total_size total size of data to be gathered - * \param slice_begin beginning of the current slice - * \param slice_end end of the current slice - * \param size_prev_slice size of the previous slice i.e. slice of node (rank - 1) % world_size - */ -AllreduceBase::ReturnType -AllreduceBase::TryAllgatherRing(void *sendrecvbuf_, size_t total_size, - size_t slice_begin, - size_t slice_end, - size_t size_prev_slice) { - // read from next link and send to prev one - LinkRecord &prev = *ring_prev, &next = *ring_next; - // need to reply on special rank structure - utils::Assert(next.rank == (rank + 1) % world_size && - rank == (prev.rank + 1) % world_size, - "need to assume rank structure"); - // send recv buffer - char *sendrecvbuf = reinterpret_cast(sendrecvbuf_); - const size_t stop_read = total_size + slice_begin; - const size_t stop_write = total_size + slice_begin - size_prev_slice; - size_t write_ptr = slice_begin; - size_t read_ptr = slice_end; - - while (true) { - // select helper - bool finished = true; - utils::SelectHelper selecter; - if (read_ptr != stop_read) { - selecter.WatchRead(next.sock); - finished = false; - } - if (write_ptr != stop_write) { - if (write_ptr < read_ptr) { - selecter.WatchWrite(prev.sock); - } - finished = false; - } - if (finished) break; - selecter.Select(); - if (read_ptr != stop_read && selecter.CheckRead(next.sock)) { - size_t size = stop_read - read_ptr; - size_t start = read_ptr % total_size; - if (start + size > total_size) { - size = total_size - start; - } - ssize_t len = next.sock.Recv(sendrecvbuf + start, size); - if (len != -1) { - read_ptr += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&next, ret); - } - } - if (write_ptr < read_ptr && write_ptr != stop_write) { - size_t size = std::min(read_ptr, stop_write) - write_ptr; - size_t start = write_ptr % total_size; - if (start + size > total_size) { - size = total_size - start; - } - ssize_t len = prev.sock.Send(sendrecvbuf + start, size); - if (len != -1) { - write_ptr += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&prev, ret); - } - } - } - return kSuccess; -} -/*! - * \brief perform in-place allreduce, on sendrecvbuf, this function can fail, - * and will return the cause of failure - * - * Ring-based algorithm - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType, TryAllreduce - */ -AllreduceBase::ReturnType -AllreduceBase::TryReduceScatterRing(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer) { - // read from next link and send to prev one - LinkRecord &prev = *ring_prev, &next = *ring_next; - // need to reply on special rank structure - utils::Assert(next.rank == (rank + 1) % world_size && - rank == (prev.rank + 1) % world_size, - "need to assume rank structure"); - // total size of message - const size_t total_size = type_nbytes * count; - size_t n = static_cast(world_size); - size_t step = (count + n - 1) / n; - size_t r = static_cast(next.rank); - size_t write_ptr = std::min(r * step, count) * type_nbytes; - size_t read_ptr = std::min((r + 1) * step, count) * type_nbytes; - size_t reduce_ptr = read_ptr; - // send recv buffer - char *sendrecvbuf = reinterpret_cast(sendrecvbuf_); - // position to stop reading - const size_t stop_read = total_size + write_ptr; - // position to stop writing - size_t stop_write = total_size + std::min(rank * step, count) * type_nbytes; - if (stop_write > stop_read) { - stop_write -= total_size; - utils::Assert(write_ptr <= stop_write, "write ptr boundary check"); - } - // use ring buffer in next position - next.InitBuffer(type_nbytes, step, reduce_buffer_size); - // set size_read to read pointer for ring buffer to work properly - next.size_read = read_ptr; - - while (true) { - // select helper - bool finished = true; - utils::SelectHelper selecter; - if (read_ptr != stop_read) { - selecter.WatchRead(next.sock); - finished = false; - } - if (write_ptr != stop_write) { - if (write_ptr < reduce_ptr) { - selecter.WatchWrite(prev.sock); - } - finished = false; - } - if (finished) break; - selecter.Select(); - if (read_ptr != stop_read && selecter.CheckRead(next.sock)) { - ReturnType ret = next.ReadToRingBuffer(reduce_ptr, stop_read); - if (ret != kSuccess) { - return ReportError(&next, ret); - } - // sync the rate - read_ptr = next.size_read; - utils::Assert(read_ptr <= stop_read, "[%d] read_ptr boundary check", rank); - const size_t buffer_size = next.buffer_size; - size_t max_reduce = (read_ptr / type_nbytes) * type_nbytes; - while (reduce_ptr < max_reduce) { - size_t bstart = reduce_ptr % buffer_size; - size_t nread = std::min(buffer_size - bstart, - max_reduce - reduce_ptr); - size_t rstart = reduce_ptr % total_size; - nread = std::min(nread, total_size - rstart); - reducer(next.buffer_head + bstart, - sendrecvbuf + rstart, - static_cast(nread / type_nbytes), - MPI::Datatype(type_nbytes)); - reduce_ptr += nread; - } - } - if (write_ptr < reduce_ptr && write_ptr != stop_write) { - size_t size = std::min(reduce_ptr, stop_write) - write_ptr; - size_t start = write_ptr % total_size; - if (start + size > total_size) { - size = total_size - start; - } - ssize_t len = prev.sock.Send(sendrecvbuf + start, size); - if (len != -1) { - write_ptr += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&prev, ret); - } - } - } - return kSuccess; -} -/*! - * \brief perform in-place allreduce, on sendrecvbuf - * use a ring based algorithm - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceBase::ReturnType -AllreduceBase::TryAllreduceRing(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer) { - ReturnType ret = TryReduceScatterRing(sendrecvbuf_, type_nbytes, count, reducer); - if (ret != kSuccess) return ret; - size_t n = static_cast(world_size); - size_t step = (count + n - 1) / n; - size_t begin = std::min(rank * step, count) * type_nbytes; - size_t end = std::min((rank + 1) * step, count) * type_nbytes; - // previous rank - int prank = ring_prev->rank; - // get rank of previous - return TryAllgatherRing - (sendrecvbuf_, type_nbytes * count, - begin, end, - (std::min((prank + 1) * step, count) - - std::min(prank * step, count)) * type_nbytes); -} -} // namespace engine -} // namespace rabit diff --git a/subtree/rabit/src/allreduce_base.h b/subtree/rabit/src/allreduce_base.h deleted file mode 100644 index 63acd75d5..000000000 --- a/subtree/rabit/src/allreduce_base.h +++ /dev/null @@ -1,527 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file allreduce_base.h - * \brief Basic implementation of AllReduce - * using TCP non-block socket and tree-shape reduction. - * - * This implementation provides basic utility of AllReduce and Broadcast - * without considering node failure - * - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#ifndef RABIT_ALLREDUCE_BASE_H_ -#define RABIT_ALLREDUCE_BASE_H_ - -#include -#include -#include -#include "../include/rabit/utils.h" -#include "../include/rabit/engine.h" -#include "./socket.h" - -namespace MPI { -// MPI data type to be compatible with existing MPI interface -class Datatype { - public: - size_t type_size; - explicit Datatype(size_t type_size) : type_size(type_size) {} -}; -} -namespace rabit { -namespace engine { -/*! \brief implementation of basic Allreduce engine */ -class AllreduceBase : public IEngine { - public: - // magic number to verify server - static const int kMagic = 0xff99; - // constant one byte out of band message to indicate error happening - AllreduceBase(void); - virtual ~AllreduceBase(void) {} - // initialize the manager - virtual void Init(void); - // shutdown the engine - virtual void Shutdown(void); - /*! - * \brief set parameters to the engine - * \param name parameter name - * \param val parameter value - */ - virtual void SetParam(const char *name, const char *val); - /*! - * \brief print the msg in the tracker, - * this function can be used to communicate the information of the progress to - * the user who monitors the tracker - * \param msg message to be printed in the tracker - */ - virtual void TrackerPrint(const std::string &msg); - /*! \brief get rank */ - virtual int GetRank(void) const { - return rank; - } - /*! \brief get rank */ - virtual int GetWorldSize(void) const { - if (world_size == -1) return 1; - return world_size; - } - /*! \brief whether is distributed or not */ - virtual bool IsDistributed(void) const { - return tracker_uri != "NULL"; - } - /*! \brief get rank */ - virtual std::string GetHost(void) const { - return host_uri; - } - /*! - * \brief perform in-place allreduce, on sendrecvbuf - * this function is NOT thread-safe - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \param prepare_func Lazy preprocessing function, lazy prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to intialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to passed into the lazy preprocessing function - */ - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun = NULL, - void *prepare_arg = NULL) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); - if (world_size == 1) return; - utils::Assert(TryAllreduce(sendrecvbuf_, - type_nbytes, count, reducer) == kSuccess, - "Allreduce failed"); - } - /*! - * \brief broadcast data from root to all nodes - * \param sendrecvbuf_ buffer for both sending and recving data - * \param size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - */ - virtual void Broadcast(void *sendrecvbuf_, size_t total_size, int root) { - if (world_size == 1) return; - utils::Assert(TryBroadcast(sendrecvbuf_, total_size, root) == kSuccess, - "Broadcast failed"); - } - /*! - * \brief load latest check point - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local model is needed - * - * \return the version number of check point loaded - * if returned version == 0, this means no model has been CheckPointed - * the p_model is not touched, user should do necessary initialization by themselves - * - * Common usage example: - * int iter = rabit::LoadCheckPoint(&model); - * if (iter == 0) model.InitParameters(); - * for (i = iter; i < max_iter; ++i) { - * do many things, include allreduce - * rabit::CheckPoint(model); - * } - * - * \sa CheckPoint, VersionNumber - */ - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL) { - return 0; - } - /*! - * \brief checkpoint the model, meaning we finished a stage of execution - * every time we call check point, there is a version number which will increase by one - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local state is needed - * - * NOTE: local_model requires explicit replication of the model for fault-tolerance, which will - * bring replication cost in CheckPoint function. global_model do not need explicit replication. - * So only CheckPoint with global_model if possible - * - * \sa LoadCheckPoint, VersionNumber - */ - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL) { - version_number += 1; - } - /*! - * \brief This function can be used to replace CheckPoint for global_model only, - * when certain condition is met(see detailed expplaination). - * - * This is a "lazy" checkpoint such that only the pointer to global_model is - * remembered and no memory copy is taken. To use this function, the user MUST ensure that: - * The global_model must remain unchanged util last call of Allreduce/Broadcast in current version finishs. - * In another words, global_model model can be changed only between last call of - * Allreduce/Broadcast and LazyCheckPoint in current version - * - * For example, suppose the calling sequence is: - * LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint - * - * If user can only changes global_model in code3, then LazyCheckPoint can be used to - * improve efficiency of the program. - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \sa LoadCheckPoint, CheckPoint, VersionNumber - */ - virtual void LazyCheckPoint(const Serializable *global_model) { - version_number += 1; - } - /*! - * \return version number of current stored model, - * which means how many calls to CheckPoint we made so far - * \sa LoadCheckPoint, CheckPoint - */ - virtual int VersionNumber(void) const { - return version_number; - } - /*! - * \brief explicitly re-init everything before calling LoadCheckPoint - * call this function when IEngine throw an exception out, - * this function is only used for test purpose - */ - virtual void InitAfterException(void) { - utils::Error("InitAfterException: not implemented"); - } - /*! - * \brief report current status to the job tracker - * depending on the job tracker we are in - */ - inline void ReportStatus(void) const { - if (hadoop_mode != 0) { - fprintf(stderr, "reporter:status:Rabit Phase[%03d] Operation %03d\n", - version_number, seq_counter); - } - } - - protected: - /*! \brief enumeration of possible returning results from Try functions */ - enum ReturnTypeEnum { - /*! \brief execution is successful */ - kSuccess, - /*! \brief a link was reset by peer */ - kConnReset, - /*! \brief received a zero length message */ - kRecvZeroLen, - /*! \brief a neighbor node go down, the connection is dropped */ - kSockError, - /*! - * \brief another node which is not my neighbor go down, - * get Out-of-Band exception notification from my neighbor - */ - kGetExcept - }; - /*! \brief struct return type to avoid implicit conversion to int/bool */ - struct ReturnType { - /*! \brief internal return type */ - ReturnTypeEnum value; - // constructor - ReturnType() {} - ReturnType(ReturnTypeEnum value) : value(value) {} // NOLINT(*) - inline bool operator==(const ReturnTypeEnum &v) const { - return value == v; - } - inline bool operator!=(const ReturnTypeEnum &v) const { - return value != v; - } - }; - /*! \brief translate errno to return type */ - inline static ReturnType Errno2Return() { - int errsv = utils::Socket::GetLastError(); - if (errsv == EAGAIN || errsv == EWOULDBLOCK || errsv == 0) return kSuccess; -#ifdef _WIN32 - if (errsv == WSAEWOULDBLOCK) return kSuccess; - if (errsv == WSAECONNRESET) return kConnReset; -#endif - if (errsv == ECONNRESET) return kConnReset; - return kSockError; - } - // link record to a neighbor - struct LinkRecord { - public: - // socket to get data from/to link - utils::TCPSocket sock; - // rank of the node in this link - int rank; - // size of data readed from link - size_t size_read; - // size of data sent to the link - size_t size_write; - // pointer to buffer head - char *buffer_head; - // buffer size, in bytes - size_t buffer_size; - // constructor - LinkRecord(void) - : buffer_head(NULL), buffer_size(0) { - } - // initialize buffer - inline void InitBuffer(size_t type_nbytes, size_t count, - size_t reduce_buffer_size) { - size_t n = (type_nbytes * count + 7)/ 8; - buffer_.resize(std::min(reduce_buffer_size, n)); - // make sure align to type_nbytes - buffer_size = - buffer_.size() * sizeof(uint64_t) / type_nbytes * type_nbytes; - utils::Assert(type_nbytes <= buffer_size, - "too large type_nbytes=%lu, buffer_size=%lu", - type_nbytes, buffer_size); - // set buffer head - buffer_head = reinterpret_cast(BeginPtr(buffer_)); - } - // reset the recv and sent size - inline void ResetSize(void) { - size_write = size_read = 0; - } - /*! - * \brief read data into ring-buffer, with care not to existing useful override data - * position after protect_start - * \param protect_start all data start from protect_start is still needed in buffer - * read shall not override this - * \param max_size_read maximum logical amount we can read, size_read cannot exceed this value - * \return the type of reading - */ - inline ReturnType ReadToRingBuffer(size_t protect_start, size_t max_size_read) { - utils::Assert(buffer_head != NULL, "ReadToRingBuffer: buffer not allocated"); - utils::Assert(size_read <= max_size_read, "ReadToRingBuffer: max_size_read check"); - size_t ngap = size_read - protect_start; - utils::Assert(ngap <= buffer_size, "Allreduce: boundary check"); - size_t offset = size_read % buffer_size; - size_t nmax = max_size_read - size_read; - nmax = std::min(nmax, buffer_size - ngap); - nmax = std::min(nmax, buffer_size - offset); - if (nmax == 0) return kSuccess; - ssize_t len = sock.Recv(buffer_head + offset, nmax); - // length equals 0, remote disconnected - if (len == 0) { - sock.Close(); return kRecvZeroLen; - } - if (len == -1) return Errno2Return(); - size_read += static_cast(len); - return kSuccess; - } - /*! - * \brief read data into array, - * this function can not be used together with ReadToRingBuffer - * a link can either read into the ring buffer, or existing array - * \param max_size maximum size of array - * \return true if it is an successful read, false if there is some error happens, check errno - */ - inline ReturnType ReadToArray(void *recvbuf_, size_t max_size) { - if (max_size == size_read) return kSuccess; - char *p = static_cast(recvbuf_); - ssize_t len = sock.Recv(p + size_read, max_size - size_read); - // length equals 0, remote disconnected - if (len == 0) { - sock.Close(); return kRecvZeroLen; - } - if (len == -1) return Errno2Return(); - size_read += static_cast(len); - return kSuccess; - } - /*! - * \brief write data in array to sock - * \param sendbuf_ head of array - * \param max_size maximum size of array - * \return true if it is an successful write, false if there is some error happens, check errno - */ - inline ReturnType WriteFromArray(const void *sendbuf_, size_t max_size) { - const char *p = static_cast(sendbuf_); - ssize_t len = sock.Send(p + size_write, max_size - size_write); - if (len == -1) return Errno2Return(); - size_write += static_cast(len); - return kSuccess; - } - - private: - // recv buffer to get data from child - // aligned with 64 bits, will be able to perform 64 bits operations freely - std::vector buffer_; - }; - /*! - * \brief simple data structure that works like a vector - * but takes reference instead of space - */ - struct RefLinkVector { - std::vector plinks; - inline LinkRecord &operator[](size_t i) { - return *plinks[i]; - } - inline size_t size(void) const { - return plinks.size(); - } - }; - /*! - * \brief initialize connection to the tracker - * \return a socket that initializes the connection - */ - utils::TCPSocket ConnectTracker(void) const; - /*! - * \brief connect to the tracker to fix the the missing links - * this function is also used when the engine start up - * \param cmd possible command to sent to tracker - */ - void ReConnectLinks(const char *cmd = "start"); - /*! - * \brief perform in-place allreduce, on sendrecvbuf, this function can fail, and will return the cause of failure - * - * NOTE on Allreduce: - * The kSuccess TryAllreduce does NOT mean every node have successfully finishes TryAllreduce. - * It only means the current node get the correct result of Allreduce. - * However, it means every node finishes LAST call(instead of this one) of Allreduce/Bcast - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryAllreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer); - /*! - * \brief broadcast data from root to all nodes, this function can fail,and will return the cause of failure - * \param sendrecvbuf_ buffer for both sending and receiving data - * \param size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryBroadcast(void *sendrecvbuf_, size_t size, int root); - /*! - * \brief perform in-place allreduce, on sendrecvbuf, - * this function implements tree-shape reduction - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryAllreduceTree(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer); - /*! - * \brief internal Allgather function, each node have a segment of data in the ring of sendrecvbuf, - * the data provided by current node k is [slice_begin, slice_end), - * the next node's segment must start with slice_end - * after the call of Allgather, sendrecvbuf_ contains all the contents including all segments - * use a ring based algorithm - * - * \param sendrecvbuf_ buffer for both sending and receiving data, it is a ring conceptually - * \param total_size total size of data to be gathered - * \param slice_begin beginning of the current slice - * \param slice_end end of the current slice - * \param size_prev_slice size of the previous slice i.e. slice of node (rank - 1) % world_size - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryAllgatherRing(void *sendrecvbuf_, size_t total_size, - size_t slice_begin, size_t slice_end, - size_t size_prev_slice); - /*! - * \brief perform in-place allreduce, reduce on the sendrecvbuf, - * - * after the function, node k get k-th segment of the reduction result - * the k-th segment is defined by [k * step, min((k + 1) * step,count) ) - * where step = ceil(count / world_size) - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType, TryAllreduce - */ - ReturnType TryReduceScatterRing(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer); - /*! - * \brief perform in-place allreduce, on sendrecvbuf - * use a ring based algorithm, reduce-scatter + allgather - * - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \return this function can return kSuccess, kSockError, kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryAllreduceRing(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer); - /*! - * \brief function used to report error when a link goes wrong - * \param link the pointer to the link who causes the error - * \param err the error type - */ - inline ReturnType ReportError(LinkRecord *link, ReturnType err) { - err_link = link; return err; - } - //---- data structure related to model ---- - // call sequence counter, records how many calls we made so far - // from last call to CheckPoint, LoadCheckPoint - int seq_counter; - // version number of model - int version_number; - // whether the job is running in hadoop - int hadoop_mode; - //---- local data related to link ---- - // index of parent link, can be -1, meaning this is root of the tree - int parent_index; - // rank of parent node, can be -1 - int parent_rank; - // sockets of all links this connects to - std::vector all_links; - // used to record the link where things goes wrong - LinkRecord *err_link; - // all the links in the reduction tree connection - RefLinkVector tree_links; - // pointer to links in the ring - LinkRecord *ring_prev, *ring_next; - //----- meta information----- - // list of enviroment variables that are of possible interest - std::vector env_vars; - // unique identifier of the possible job this process is doing - // used to assign ranks, optional, default to NULL - std::string task_id; - // uri of current host, to be set by Init - std::string host_uri; - // uri of tracker - std::string tracker_uri; - // role in dmlc jobs - std::string dmlc_role; - // port of tracker address - int tracker_port; - // port of slave process - int slave_port, nport_trial; - // reduce buffer size - size_t reduce_buffer_size; - // reduction method - int reduce_method; - // mininum count of cells to use ring based method - size_t reduce_ring_mincount; - // current rank - int rank; - // world size - int world_size; - // connect retry time - int connect_retry; -}; -} // namespace engine -} // namespace rabit -#endif // RABIT_ALLREDUCE_BASE_H_ diff --git a/subtree/rabit/src/allreduce_mock.h b/subtree/rabit/src/allreduce_mock.h deleted file mode 100644 index c3f9f4f1d..000000000 --- a/subtree/rabit/src/allreduce_mock.h +++ /dev/null @@ -1,178 +0,0 @@ -/*! - * Copyright by Contributors - * \file allreduce_mock.h - * \brief Mock test module of AllReduce engine, - * insert failures in certain call point, to test if the engine is robust to failure - * - * \author Ignacio Cano, Tianqi Chen - */ -#ifndef RABIT_ALLREDUCE_MOCK_H_ -#define RABIT_ALLREDUCE_MOCK_H_ -#include -#include -#include -#include "../include/rabit/engine.h" -#include "../include/rabit/timer.h" -#include "./allreduce_robust.h" - -namespace rabit { -namespace engine { -class AllreduceMock : public AllreduceRobust { - public: - // constructor - AllreduceMock(void) { - num_trial = 0; - force_local = 0; - report_stats = 0; - tsum_allreduce = 0.0; - } - // destructor - virtual ~AllreduceMock(void) {} - virtual void SetParam(const char *name, const char *val) { - AllreduceRobust::SetParam(name, val); - // additional parameters - if (!strcmp(name, "rabit_num_trial")) num_trial = atoi(val); - if (!strcmp(name, "DMLC_NUM_ATTEMPT")) num_trial = atoi(val); - if (!strcmp(name, "report_stats")) report_stats = atoi(val); - if (!strcmp(name, "force_local")) force_local = atoi(val); - if (!strcmp(name, "mock")) { - MockKey k; - utils::Check(sscanf(val, "%d,%d,%d,%d", - &k.rank, &k.version, &k.seqno, &k.ntrial) == 4, - "invalid mock parameter"); - mock_map[k] = 1; - } - } - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun, - void *prepare_arg) { - this->Verify(MockKey(rank, version_number, seq_counter, num_trial), "AllReduce"); - double tstart = utils::GetTime(); - AllreduceRobust::Allreduce(sendrecvbuf_, type_nbytes, - count, reducer, prepare_fun, prepare_arg); - tsum_allreduce += utils::GetTime() - tstart; - } - virtual void Broadcast(void *sendrecvbuf_, size_t total_size, int root) { - this->Verify(MockKey(rank, version_number, seq_counter, num_trial), "Broadcast"); - AllreduceRobust::Broadcast(sendrecvbuf_, total_size, root); - } - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model) { - tsum_allreduce = 0.0; - time_checkpoint = utils::GetTime(); - if (force_local == 0) { - return AllreduceRobust::LoadCheckPoint(global_model, local_model); - } else { - DummySerializer dum; - ComboSerializer com(global_model, local_model); - return AllreduceRobust::LoadCheckPoint(&dum, &com); - } - } - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model) { - this->Verify(MockKey(rank, version_number, seq_counter, num_trial), "CheckPoint"); - double tstart = utils::GetTime(); - double tbet_chkpt = tstart - time_checkpoint; - if (force_local == 0) { - AllreduceRobust::CheckPoint(global_model, local_model); - } else { - DummySerializer dum; - ComboSerializer com(global_model, local_model); - AllreduceRobust::CheckPoint(&dum, &com); - } - time_checkpoint = utils::GetTime(); - double tcost = utils::GetTime() - tstart; - if (report_stats != 0 && rank == 0) { - std::stringstream ss; - ss << "[v" << version_number << "] global_size=" << global_checkpoint.length() - << ",local_size=" << (local_chkpt[0].length() + local_chkpt[1].length()) - << ",check_tcost="<< tcost <<" sec" - << ",allreduce_tcost=" << tsum_allreduce << " sec" - << ",between_chpt=" << tbet_chkpt << "sec\n"; - this->TrackerPrint(ss.str()); - } - tsum_allreduce = 0.0; - } - - virtual void LazyCheckPoint(const Serializable *global_model) { - this->Verify(MockKey(rank, version_number, seq_counter, num_trial), "LazyCheckPoint"); - AllreduceRobust::LazyCheckPoint(global_model); - } - - protected: - // force checkpoint to local - int force_local; - // whether report statistics - int report_stats; - // sum of allreduce - double tsum_allreduce; - double time_checkpoint; - - private: - struct DummySerializer : public Serializable { - virtual void Load(Stream *fi) { - } - virtual void Save(Stream *fo) const { - } - }; - struct ComboSerializer : public Serializable { - Serializable *lhs; - Serializable *rhs; - const Serializable *c_lhs; - const Serializable *c_rhs; - ComboSerializer(Serializable *lhs, Serializable *rhs) - : lhs(lhs), rhs(rhs), c_lhs(lhs), c_rhs(rhs) { - } - ComboSerializer(const Serializable *lhs, const Serializable *rhs) - : lhs(NULL), rhs(NULL), c_lhs(lhs), c_rhs(rhs) { - } - virtual void Load(Stream *fi) { - if (lhs != NULL) lhs->Load(fi); - if (rhs != NULL) rhs->Load(fi); - } - virtual void Save(Stream *fo) const { - if (c_lhs != NULL) c_lhs->Save(fo); - if (c_rhs != NULL) c_rhs->Save(fo); - } - }; - // key to identify the mock stage - struct MockKey { - int rank; - int version; - int seqno; - int ntrial; - MockKey(void) {} - MockKey(int rank, int version, int seqno, int ntrial) - : rank(rank), version(version), seqno(seqno), ntrial(ntrial) {} - inline bool operator==(const MockKey &b) const { - return rank == b.rank && - version == b.version && - seqno == b.seqno && - ntrial == b.ntrial; - } - inline bool operator<(const MockKey &b) const { - if (rank != b.rank) return rank < b.rank; - if (version != b.version) return version < b.version; - if (seqno != b.seqno) return seqno < b.seqno; - return ntrial < b.ntrial; - } - }; - // number of failure trials - int num_trial; - // record all mock actions - std::map mock_map; - // used to generate all kinds of exceptions - inline void Verify(const MockKey &key, const char *name) { - if (mock_map.count(key) != 0) { - num_trial += 1; - fprintf(stderr, "[%d]@@@Hit Mock Error:%s\n", rank, name); - exit(-2); - } - } -}; -} // namespace engine -} // namespace rabit -#endif // RABIT_ALLREDUCE_MOCK_H_ diff --git a/subtree/rabit/src/allreduce_robust-inl.h b/subtree/rabit/src/allreduce_robust-inl.h deleted file mode 100644 index d3cbc0033..000000000 --- a/subtree/rabit/src/allreduce_robust-inl.h +++ /dev/null @@ -1,169 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file allreduce_robust-inl.h - * \brief implementation of inline template function in AllreduceRobust - * - * \author Tianqi Chen - */ -#ifndef RABIT_ALLREDUCE_ROBUST_INL_H_ -#define RABIT_ALLREDUCE_ROBUST_INL_H_ -#include - -namespace rabit { -namespace engine { -/*! - * \brief run message passing algorithm on the allreduce tree - * the result is edge message stored in p_edge_in and p_edge_out - * \param node_value the value associated with current node - * \param p_edge_in used to store input message from each of the edge - * \param p_edge_out used to store output message from each of the edge - * \param func a function that defines the message passing rule - * Parameters of func: - * - node_value same as node_value in the main function - * - edge_in the array of input messages from each edge, - * this includes the output edge, which should be excluded - * - out_index array the index of output edge, the function should - * exclude the output edge when compute the message passing value - * Return of func: - * the function returns the output message based on the input message and node_value - * - * \tparam EdgeType type of edge message, must be simple struct - * \tparam NodeType type of node value - */ -template -inline AllreduceRobust::ReturnType -AllreduceRobust::MsgPassing(const NodeType &node_value, - std::vector *p_edge_in, - std::vector *p_edge_out, - EdgeType(*func) - (const NodeType &node_value, - const std::vector &edge_in, - size_t out_index)) { - RefLinkVector &links = tree_links; - if (links.size() == 0) return kSuccess; - // number of links - const int nlink = static_cast(links.size()); - // initialize the pointers - for (int i = 0; i < nlink; ++i) { - links[i].ResetSize(); - } - std::vector &edge_in = *p_edge_in; - std::vector &edge_out = *p_edge_out; - edge_in.resize(nlink); - edge_out.resize(nlink); - // stages in the process - // 0: recv messages from childs - // 1: send message to parent - // 2: recv message from parent - // 3: send message to childs - int stage = 0; - // if no childs, no need to, directly start passing message - if (nlink == static_cast(parent_index != -1)) { - utils::Assert(parent_index == 0, "parent must be 0"); - edge_out[parent_index] = func(node_value, edge_in, parent_index); - stage = 1; - } - // while we have not passed the messages out - while (true) { - // for node with no parent, directly do stage 3 - if (parent_index == -1) { - utils::Assert(stage != 2 && stage != 1, "invalie stage id"); - } - // select helper - utils::SelectHelper selecter; - bool done = (stage == 3); - for (int i = 0; i < nlink; ++i) { - selecter.WatchException(links[i].sock); - switch (stage) { - case 0: - if (i != parent_index && links[i].size_read != sizeof(EdgeType)) { - selecter.WatchRead(links[i].sock); - } - break; - case 1: - if (i == parent_index) { - selecter.WatchWrite(links[i].sock); - } - break; - case 2: - if (i == parent_index) { - selecter.WatchRead(links[i].sock); - } - break; - case 3: - if (i != parent_index && links[i].size_write != sizeof(EdgeType)) { - selecter.WatchWrite(links[i].sock); - done = false; - } - break; - default: utils::Error("invalid stage"); - } - } - // finish all the stages, and write out message - if (done) break; - selecter.Select(); - // exception handling - for (int i = 0; i < nlink; ++i) { - // recive OOB message from some link - if (selecter.CheckExcept(links[i].sock)) { - return ReportError(&links[i], kGetExcept); - } - } - if (stage == 0) { - bool finished = true; - // read data from childs - for (int i = 0; i < nlink; ++i) { - if (i != parent_index) { - if (selecter.CheckRead(links[i].sock)) { - ReturnType ret = links[i].ReadToArray(&edge_in[i], sizeof(EdgeType)); - if (ret != kSuccess) return ReportError(&links[i], ret); - } - if (links[i].size_read != sizeof(EdgeType)) finished = false; - } - } - // if no parent, jump to stage 3, otherwise do stage 1 - if (finished) { - if (parent_index != -1) { - edge_out[parent_index] = func(node_value, edge_in, parent_index); - stage = 1; - } else { - for (int i = 0; i < nlink; ++i) { - edge_out[i] = func(node_value, edge_in, i); - } - stage = 3; - } - } - } - if (stage == 1) { - const int pid = this->parent_index; - utils::Assert(pid != -1, "MsgPassing invalid stage"); - ReturnType ret = links[pid].WriteFromArray(&edge_out[pid], sizeof(EdgeType)); - if (ret != kSuccess) return ReportError(&links[pid], ret); - if (links[pid].size_write == sizeof(EdgeType)) stage = 2; - } - if (stage == 2) { - const int pid = this->parent_index; - utils::Assert(pid != -1, "MsgPassing invalid stage"); - ReturnType ret = links[pid].ReadToArray(&edge_in[pid], sizeof(EdgeType)); - if (ret != kSuccess) return ReportError(&links[pid], ret); - if (links[pid].size_read == sizeof(EdgeType)) { - for (int i = 0; i < nlink; ++i) { - if (i != pid) edge_out[i] = func(node_value, edge_in, i); - } - stage = 3; - } - } - if (stage == 3) { - for (int i = 0; i < nlink; ++i) { - if (i != parent_index && links[i].size_write != sizeof(EdgeType)) { - ReturnType ret = links[i].WriteFromArray(&edge_out[i], sizeof(EdgeType)); - if (ret != kSuccess) return ReportError(&links[i], ret); - } - } - } - } - return kSuccess; -} -} // namespace engine -} // namespace rabit -#endif // RABIT_ALLREDUCE_ROBUST_INL_H_ diff --git a/subtree/rabit/src/allreduce_robust.cc b/subtree/rabit/src/allreduce_robust.cc deleted file mode 100644 index 175751842..000000000 --- a/subtree/rabit/src/allreduce_robust.cc +++ /dev/null @@ -1,1183 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file allreduce_robust.cc - * \brief Robust implementation of Allreduce - * - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -#include -#include -#include "../include/rabit/io.h" -#include "../include/rabit/utils.h" -#include "../include/rabit/engine.h" -#include "../include/rabit/rabit-inl.h" -#include "./allreduce_robust.h" - -namespace rabit { -namespace engine { -AllreduceRobust::AllreduceRobust(void) { - num_local_replica = 0; - num_global_replica = 5; - default_local_replica = 2; - seq_counter = 0; - local_chkpt_version = 0; - result_buffer_round = 1; - global_lazycheck = NULL; - use_local_model = -1; - recover_counter = 0; - env_vars.push_back("rabit_global_replica"); - env_vars.push_back("rabit_local_replica"); -} -void AllreduceRobust::Init(void) { - AllreduceBase::Init(); - result_buffer_round = std::max(world_size / num_global_replica, 1); -} -/*! \brief shutdown the engine */ -void AllreduceRobust::Shutdown(void) { - // need to sync the exec before we shutdown, do a pesudo check point - // execute checkpoint, note: when checkpoint existing, load will not happen - utils::Assert(RecoverExec(NULL, 0, ActionSummary::kCheckPoint, ActionSummary::kSpecialOp), - "Shutdown: check point must return true"); - // reset result buffer - resbuf.Clear(); seq_counter = 0; - // execute check ack step, load happens here - utils::Assert(RecoverExec(NULL, 0, ActionSummary::kCheckAck, ActionSummary::kSpecialOp), - "Shutdown: check ack must return true"); - AllreduceBase::Shutdown(); -} -/*! - * \brief set parameters to the engine - * \param name parameter name - * \param val parameter value - */ -void AllreduceRobust::SetParam(const char *name, const char *val) { - AllreduceBase::SetParam(name, val); - if (!strcmp(name, "rabit_global_replica")) num_global_replica = atoi(val); - if (!strcmp(name, "rabit_local_replica")) { - num_local_replica = atoi(val); - } -} -/*! - * \brief perform in-place allreduce, on sendrecvbuf - * this function is NOT thread-safe - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \param prepare_func Lazy preprocessing function, lazy prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to intialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to passed into the lazy preprocessing function - */ -void AllreduceRobust::Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun, - void *prepare_arg) { - // skip action in single node - if (world_size == 1) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); - return; - } - bool recovered = RecoverExec(sendrecvbuf_, type_nbytes * count, 0, seq_counter); - // now we are free to remove the last result, if any - if (resbuf.LastSeqNo() != -1 && - (resbuf.LastSeqNo() % result_buffer_round != rank % result_buffer_round)) { - resbuf.DropLast(); - } - if (!recovered && prepare_fun != NULL) prepare_fun(prepare_arg); - void *temp = resbuf.AllocTemp(type_nbytes, count); - while (true) { - if (recovered) { - std::memcpy(temp, sendrecvbuf_, type_nbytes * count); break; - } else { - std::memcpy(temp, sendrecvbuf_, type_nbytes * count); - if (CheckAndRecover(TryAllreduce(temp, type_nbytes, count, reducer))) { - std::memcpy(sendrecvbuf_, temp, type_nbytes * count); break; - } else { - recovered = RecoverExec(sendrecvbuf_, type_nbytes * count, 0, seq_counter); - } - } - } - resbuf.PushTemp(seq_counter, type_nbytes, count); - seq_counter += 1; -} -/*! - * \brief broadcast data from root to all nodes - * \param sendrecvbuf_ buffer for both sending and recving data - * \param size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - */ -void AllreduceRobust::Broadcast(void *sendrecvbuf_, size_t total_size, int root) { - // skip action in single node - if (world_size == 1) return; - bool recovered = RecoverExec(sendrecvbuf_, total_size, 0, seq_counter); - // now we are free to remove the last result, if any - if (resbuf.LastSeqNo() != -1 && - (resbuf.LastSeqNo() % result_buffer_round != rank % result_buffer_round)) { - resbuf.DropLast(); - } - void *temp = resbuf.AllocTemp(1, total_size); - while (true) { - if (recovered) { - std::memcpy(temp, sendrecvbuf_, total_size); break; - } else { - if (CheckAndRecover(TryBroadcast(sendrecvbuf_, total_size, root))) { - std::memcpy(temp, sendrecvbuf_, total_size); break; - } else { - recovered = RecoverExec(sendrecvbuf_, total_size, 0, seq_counter); - } - } - } - resbuf.PushTemp(seq_counter, 1, total_size); - seq_counter += 1; -} -/*! - * \brief load latest check point - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local model is needed - * - * \return the version number of check point loaded - * if returned version == 0, this means no model has been CheckPointed - * the p_model is not touched, user should do necessary initialization by themselves - * - * Common usage example: - * int iter = rabit::LoadCheckPoint(&model); - * if (iter == 0) model.InitParameters(); - * for (i = iter; i < max_iter; ++i) { - * do many things, include allreduce - * rabit::CheckPoint(model); - * } - * - * \sa CheckPoint, VersionNumber - */ -int AllreduceRobust::LoadCheckPoint(Serializable *global_model, - Serializable *local_model) { - // skip action in single node - if (world_size == 1) return 0; - this->LocalModelCheck(local_model != NULL); - if (num_local_replica == 0) { - utils::Check(local_model == NULL, - "need to set rabit_local_replica larger than 1 to checkpoint local_model"); - } - // check if we succesful - if (RecoverExec(NULL, 0, ActionSummary::kLoadCheck, ActionSummary::kSpecialOp)) { - int nlocal = std::max(static_cast(local_rptr[local_chkpt_version].size()) - 1, 0); - if (local_model != NULL) { - if (nlocal == num_local_replica + 1) { - // load in local model - utils::MemoryFixSizeBuffer fs(BeginPtr(local_chkpt[local_chkpt_version]), - local_rptr[local_chkpt_version][1]); - local_model->Load(&fs); - } else { - utils::Assert(nlocal == 0, "[%d] local model inconsistent, nlocal=%d", rank, nlocal); - } - } - // reset result buffer - resbuf.Clear(); seq_counter = 0; - // load from buffer - utils::MemoryBufferStream fs(&global_checkpoint); - if (global_checkpoint.length() == 0) { - version_number = 0; - } else { - utils::Assert(fs.Read(&version_number, sizeof(version_number)) != 0, - "read in version number"); - global_model->Load(&fs); - utils::Assert(local_model == NULL || nlocal == num_local_replica + 1, - "local model inconsistent, nlocal=%d", nlocal); - } - // run another phase of check ack, if recovered from data - utils::Assert(RecoverExec(NULL, 0, ActionSummary::kCheckAck, ActionSummary::kSpecialOp), - "check ack must return true"); - return version_number; - } else { - // reset result buffer - resbuf.Clear(); seq_counter = 0; version_number = 0; - // nothing loaded, a fresh start, everyone init model - return version_number; - } -} -/*! - * \brief internal consistency check function, - * use check to ensure user always call CheckPoint/LoadCheckPoint - * with or without local but not both, this function will set the approperiate settings - * in the first call of LoadCheckPoint/CheckPoint - * - * \param with_local whether the user calls CheckPoint with local model - */ -void AllreduceRobust::LocalModelCheck(bool with_local) { - if (use_local_model == -1) { - if (with_local) { - use_local_model = 1; - if (num_local_replica == 0) { - num_local_replica = default_local_replica; - } - } else { - use_local_model = 0; - num_local_replica = 0; - } - } else { - utils::Check(use_local_model == static_cast(with_local), - "Can only call Checkpoint/LoadCheckPoint always with"\ - "or without local_model, but not mixed case"); - } -} -/*! - * \brief internal implementation of checkpoint, support both lazy and normal way - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local state is needed - * \param lazy_checkpt whether the action is lazy checkpoint - * - * \sa CheckPoint, LazyCheckPoint - */ -void AllreduceRobust::CheckPoint_(const Serializable *global_model, - const Serializable *local_model, - bool lazy_checkpt) { - // never do check point in single machine mode - if (world_size == 1) { - version_number += 1; return; - } - this->LocalModelCheck(local_model != NULL); - if (num_local_replica == 0) { - utils::Check(local_model == NULL, - "need to set rabit_local_replica larger than 1 to checkpoint local_model"); - } - if (num_local_replica != 0) { - while (true) { - if (RecoverExec(NULL, 0, 0, ActionSummary::kLocalCheckPoint)) break; - // save model model to new version place - int new_version = !local_chkpt_version; - local_chkpt[new_version].clear(); - utils::MemoryBufferStream fs(&local_chkpt[new_version]); - if (local_model != NULL) { - local_model->Save(&fs); - } - local_rptr[new_version].clear(); - local_rptr[new_version].push_back(0); - local_rptr[new_version].push_back(local_chkpt[new_version].length()); - if (CheckAndRecover(TryCheckinLocalState(&local_rptr[new_version], - &local_chkpt[new_version]))) break; - } - // run the ack phase, can be true or false - RecoverExec(NULL, 0, 0, ActionSummary::kLocalCheckAck); - // switch pointer to new version - local_chkpt_version = !local_chkpt_version; - } - // execute checkpoint, note: when checkpoint existing, load will not happen - utils::Assert(RecoverExec(NULL, 0, ActionSummary::kCheckPoint, ActionSummary::kSpecialOp), - "check point must return true"); - // this is the critical region where we will change all the stored models - // increase version number - version_number += 1; - // save model - if (lazy_checkpt) { - global_lazycheck = global_model; - } else { - global_checkpoint.resize(0); - utils::MemoryBufferStream fs(&global_checkpoint); - fs.Write(&version_number, sizeof(version_number)); - global_model->Save(&fs); - global_lazycheck = NULL; - } - // reset result buffer - resbuf.Clear(); seq_counter = 0; - // execute check ack step, load happens here - utils::Assert(RecoverExec(NULL, 0, ActionSummary::kCheckAck, ActionSummary::kSpecialOp), - "check ack must return true"); -} -/*! - * \brief reset the all the existing links by sending Out-of-Band message marker - * after this function finishes, all the messages received and sent before in all live links are discarded, - * This allows us to get a fresh start after error has happened - * - * \return this function can return kSuccess or kSockError - * when kSockError is returned, it simply means there are bad sockets in the links, - * and some link recovery proceduer is needed - */ -AllreduceRobust::ReturnType AllreduceRobust::TryResetLinks(void) { - // number of links - const int nlink = static_cast(all_links.size()); - for (int i = 0; i < nlink; ++i) { - all_links[i].InitBuffer(sizeof(int), 1 << 10, reduce_buffer_size); - all_links[i].ResetSize(); - } - // read and discard data from all channels until pass mark - while (true) { - for (int i = 0; i < nlink; ++i) { - if (all_links[i].sock.BadSocket()) continue; - if (all_links[i].size_write == 0) { - char sig = kOOBReset; - ssize_t len = all_links[i].sock.Send(&sig, sizeof(sig), MSG_OOB); - // error will be filtered in next loop - if (len == sizeof(sig)) all_links[i].size_write = 1; - } - if (all_links[i].size_write == 1) { - char sig = kResetMark; - ssize_t len = all_links[i].sock.Send(&sig, sizeof(sig)); - if (len == sizeof(sig)) all_links[i].size_write = 2; - } - } - utils::SelectHelper rsel; - bool finished = true; - for (int i = 0; i < nlink; ++i) { - if (all_links[i].size_write != 2 && !all_links[i].sock.BadSocket()) { - rsel.WatchWrite(all_links[i].sock); finished = false; - } - } - if (finished) break; - // wait to read from the channels to discard data - rsel.Select(); - } - for (int i = 0; i < nlink; ++i) { - if (!all_links[i].sock.BadSocket()) { - utils::SelectHelper::WaitExcept(all_links[i].sock); - } - } - while (true) { - utils::SelectHelper rsel; - bool finished = true; - for (int i = 0; i < nlink; ++i) { - if (all_links[i].size_read == 0 && !all_links[i].sock.BadSocket()) { - rsel.WatchRead(all_links[i].sock); finished = false; - } - } - if (finished) break; - rsel.Select(); - for (int i = 0; i < nlink; ++i) { - if (all_links[i].sock.BadSocket()) continue; - if (all_links[i].size_read == 0) { - int atmark = all_links[i].sock.AtMark(); - if (atmark < 0) { - utils::Assert(all_links[i].sock.BadSocket(), "must already gone bad"); - } else if (atmark > 0) { - all_links[i].size_read = 1; - } else { - // no at mark, read and discard data - ssize_t len = all_links[i].sock.Recv(all_links[i].buffer_head, all_links[i].buffer_size); - if (all_links[i].sock.AtMark()) all_links[i].size_read = 1; - // zero length, remote closed the connection, close socket - if (len == 0) all_links[i].sock.Close(); - } - } - } - } - // start synchronization, use blocking I/O to avoid select - for (int i = 0; i < nlink; ++i) { - if (!all_links[i].sock.BadSocket()) { - char oob_mark; - all_links[i].sock.SetNonBlock(false); - ssize_t len = all_links[i].sock.Recv(&oob_mark, sizeof(oob_mark), MSG_WAITALL); - if (len == 0) { - all_links[i].sock.Close(); continue; - } else if (len > 0) { - utils::Assert(oob_mark == kResetMark, "wrong oob msg"); - utils::Assert(all_links[i].sock.AtMark() != 1, "should already read past mark"); - } else { - utils::Assert(errno != EAGAIN|| errno != EWOULDBLOCK, "BUG"); - } - // send out ack - char ack = kResetAck; - while (true) { - len = all_links[i].sock.Send(&ack, sizeof(ack)); - if (len == sizeof(ack)) break; - if (len == -1) { - if (errno != EAGAIN && errno != EWOULDBLOCK) break; - } - } - } - } - // wait all ack - for (int i = 0; i < nlink; ++i) { - if (!all_links[i].sock.BadSocket()) { - char ack; - ssize_t len = all_links[i].sock.Recv(&ack, sizeof(ack), MSG_WAITALL); - if (len == 0) { - all_links[i].sock.Close(); continue; - } else if (len > 0) { - utils::Assert(ack == kResetAck, "wrong Ack MSG"); - } else { - utils::Assert(errno != EAGAIN|| errno != EWOULDBLOCK, "BUG"); - } - // set back to nonblock mode - all_links[i].sock.SetNonBlock(true); - } - } - for (int i = 0; i < nlink; ++i) { - if (all_links[i].sock.BadSocket()) return kSockError; - } - return kSuccess; -} -/*! - * \brief if err_type indicates an error - * recover links according to the error type reported - * if there is no error, return true - * \param err_type the type of error happening in the system - * \return true if err_type is kSuccess, false otherwise - */ -bool AllreduceRobust::CheckAndRecover(ReturnType err_type) { - if (err_type == kSuccess) return true; - utils::Assert(err_link != NULL, "must know the error source"); - recover_counter += 1; - { - // simple way, shutdown all links - for (size_t i = 0; i < all_links.size(); ++i) { - if (!all_links[i].sock.BadSocket()) all_links[i].sock.Close(); - } - ReConnectLinks("recover"); - return false; - } - // this was old way - // TryResetLinks still causes possible errors, so not use this one - while (err_type != kSuccess) { - switch (err_type.value) { - case kGetExcept: err_type = TryResetLinks(); break; - case kSockError: { - TryResetLinks(); - ReConnectLinks(); - err_type = kSuccess; - break; - } - default: utils::Assert(false, "RecoverLinks: cannot reach here"); - } - } - return false; -} -/*! - * \brief message passing function, used to decide the - * shortest distance to the possible source of data - * \param node_value a pair of have_data and size - * have_data whether current node have data - * size gives the size of data, if current node is kHaveData - * \param dist_in the shorest to any data source distance in each direction - * \param out_index the edge index of output link - * \return the shorest distance result of out edge specified by out_index - */ -inline std::pair -ShortestDist(const std::pair &node_value, - const std::vector< std::pair > &dist_in, - size_t out_index) { - if (node_value.first) { - return std::make_pair(1, node_value.second); - } - size_t size = 0; - int res = std::numeric_limits::max(); - for (size_t i = 0; i < dist_in.size(); ++i) { - if (i == out_index) continue; - if (dist_in[i].first == std::numeric_limits::max()) continue; - if (dist_in[i].first + 1 < res) { - res = dist_in[i].first + 1; - size = dist_in[i].second; - } - } - // add one hop - - return std::make_pair(res, size); -} -/*! - * \brief message passing function, used to decide the - * data request from each edge, whether need to request data from certain edge - * \param node_value a pair of request_data and best_link - * request_data stores whether current node need to request data - * best_link gives the best edge index to fetch the data - * \param req_in the data request from incoming edges - * \param out_index the edge index of output link - * \return the request to the output edge - */ -inline char DataRequest(const std::pair &node_value, - const std::vector &req_in, - size_t out_index) { - // whether current node need to request data - bool request_data = node_value.first; - // which edge index is the best link to request data - // can be -1, which means current node contains data - const int best_link = node_value.second; - if (static_cast(out_index) == best_link) { - if (request_data) return 1; - for (size_t i = 0; i < req_in.size(); ++i) { - if (i == out_index) continue; - if (req_in[i] != 0) return 1; - } - } - return 0; -} -/*! - * \brief try to decide the recovery message passing request - * \param role the current role of the node - * \param p_size used to store the size of the message, for node in state kHaveData, - * this size must be set correctly before calling the function - * for others, this surves as output parameter - * - * \param p_recvlink used to store the link current node should recv data from, if necessary - * this can be -1, which means current node have the data - * \param p_req_in used to store the resulting vector, indicating which link we should send the data to - * - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceRobust::ReturnType -AllreduceRobust::TryDecideRouting(AllreduceRobust::RecoverType role, - size_t *p_size, - int *p_recvlink, - std::vector *p_req_in) { - int best_link = -2; - { - // get the shortest distance to the request point - std::vector > dist_in, dist_out; - ReturnType succ = MsgPassing(std::make_pair(role == kHaveData, *p_size), - &dist_in, &dist_out, ShortestDist); - if (succ != kSuccess) return succ; - if (role != kHaveData) { - for (size_t i = 0; i < dist_in.size(); ++i) { - if (dist_in[i].first != std::numeric_limits::max()) { - utils::Check(best_link == -2 || *p_size == dist_in[i].second, - "[%d] Allreduce size inconsistent, distin=%lu, size=%lu, reporting=%lu\n", - rank, dist_in[i].first, *p_size, dist_in[i].second); - if (best_link == -2 || dist_in[i].first < dist_in[best_link].first) { - best_link = static_cast(i); - *p_size = dist_in[i].second; - } - } - } - utils::Check(best_link != -2, "Too many nodes went down and we cannot recover.."); - } else { - best_link = -1; - } - } - // get the node request - std::vector req_in, req_out; - ReturnType succ = MsgPassing(std::make_pair(role == kRequestData, best_link), - &req_in, &req_out, DataRequest); - if (succ != kSuccess) return succ; - // set p_req_in - p_req_in->resize(req_in.size()); - for (size_t i = 0; i < req_in.size(); ++i) { - // set p_req_in - (*p_req_in)[i] = (req_in[i] != 0); - if (req_out[i] != 0) { - utils::Assert(req_in[i] == 0, "cannot get and receive request"); - utils::Assert(static_cast(i) == best_link, "request result inconsistent"); - } - } - *p_recvlink = best_link; - return kSuccess; -} -/*! - * \brief try to finish the data recovery request, - * this function is used together with TryDecideRouting - * \param role the current role of the node - * \param sendrecvbuf_ the buffer to store the data to be sent/recived - * - if the role is kHaveData, this stores the data to be sent - * - if the role is kRequestData, this is the buffer to store the result - * - if the role is kPassData, this will not be used, and can be NULL - * \param size the size of the data, obtained from TryDecideRouting - * \param recv_link the link index to receive data, if necessary, obtained from TryDecideRouting - * \param req_in the request of each link to send data, obtained from TryDecideRouting - * - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType, TryDecideRouting - */ -AllreduceRobust::ReturnType -AllreduceRobust::TryRecoverData(RecoverType role, - void *sendrecvbuf_, - size_t size, - int recv_link, - const std::vector &req_in) { - RefLinkVector &links = tree_links; - // no need to run recovery for zero size messages - if (links.size() == 0 || size == 0) return kSuccess; - utils::Assert(req_in.size() == links.size(), "TryRecoverData"); - const int nlink = static_cast(links.size()); - { - bool req_data = role == kRequestData; - for (int i = 0; i < nlink; ++i) { - if (req_in[i]) { - utils::Assert(i != recv_link, "TryDecideRouting"); - req_data = true; - } - } - // do not need to provide data or receive data, directly exit - if (!req_data) return kSuccess; - } - utils::Assert(recv_link >= 0 || role == kHaveData, "recv_link must be active"); - if (role == kPassData) { - links[recv_link].InitBuffer(1, size, reduce_buffer_size); - } - for (int i = 0; i < nlink; ++i) { - links[i].ResetSize(); - } - while (true) { - bool finished = true; - utils::SelectHelper selecter; - for (int i = 0; i < nlink; ++i) { - if (i == recv_link && links[i].size_read != size) { - selecter.WatchRead(links[i].sock); - finished = false; - } - if (req_in[i] && links[i].size_write != size) { - if (role == kHaveData || - (links[recv_link].size_read != links[i].size_write)) { - selecter.WatchWrite(links[i].sock); - } - finished = false; - } - selecter.WatchException(links[i].sock); - } - if (finished) break; - selecter.Select(); - // exception handling - for (int i = 0; i < nlink; ++i) { - if (selecter.CheckExcept(links[i].sock)) { - return ReportError(&links[i], kGetExcept); - } - } - if (role == kRequestData) { - const int pid = recv_link; - if (selecter.CheckRead(links[pid].sock)) { - ReturnType ret = links[pid].ReadToArray(sendrecvbuf_, size); - if (ret != kSuccess) { - return ReportError(&links[pid], ret); - } - } - for (int i = 0; i < nlink; ++i) { - if (req_in[i] && links[i].size_write != links[pid].size_read) { - ReturnType ret = links[i].WriteFromArray(sendrecvbuf_, links[pid].size_read); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - } - } - } - if (role == kHaveData) { - for (int i = 0; i < nlink; ++i) { - if (req_in[i] && links[i].size_write != size) { - ReturnType ret = links[i].WriteFromArray(sendrecvbuf_, size); - if (ret != kSuccess) { - return ReportError(&links[i], ret); - } - } - } - } - if (role == kPassData) { - const int pid = recv_link; - const size_t buffer_size = links[pid].buffer_size; - if (selecter.CheckRead(links[pid].sock)) { - size_t min_write = size; - for (int i = 0; i < nlink; ++i) { - if (req_in[i]) min_write = std::min(links[i].size_write, min_write); - } - utils::Assert(min_write <= links[pid].size_read, "boundary check"); - ReturnType ret = links[pid].ReadToRingBuffer(min_write, size); - if (ret != kSuccess) { - return ReportError(&links[pid], ret); - } - } - for (int i = 0; i < nlink; ++i) { - if (req_in[i] && links[pid].size_read != links[i].size_write) { - size_t start = links[i].size_write % buffer_size; - // send out data from ring buffer - size_t nwrite = std::min(buffer_size - start, links[pid].size_read - links[i].size_write); - ssize_t len = links[i].sock.Send(links[pid].buffer_head + start, nwrite); - if (len != -1) { - links[i].size_write += len; - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&links[i], ret); - } - } - } - } - } - return kSuccess; -} -/*! - * \brief try to load check point - * - * This is a collaborative function called by all nodes - * only the nodes with requester set to true really needs to load the check point - * other nodes acts as collaborative roles to complete this request - * - * \param requester whether current node is the requester - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceRobust::ReturnType AllreduceRobust::TryLoadCheckPoint(bool requester) { - // check in local data - RecoverType role = requester ? kRequestData : kHaveData; - ReturnType succ; - if (num_local_replica != 0) { - if (requester) { - // clear existing history, if any, before load - local_rptr[local_chkpt_version].clear(); - local_chkpt[local_chkpt_version].clear(); - } - // recover local checkpoint - succ = TryRecoverLocalState(&local_rptr[local_chkpt_version], - &local_chkpt[local_chkpt_version]); - if (succ != kSuccess) return succ; - int nlocal = std::max(static_cast(local_rptr[local_chkpt_version].size()) - 1, 0); - // check if everyone is OK - unsigned state = 0; - if (nlocal == num_local_replica + 1) { - // complete recovery - state = 1; - } else if (nlocal == 0) { - // get nothing - state = 2; - } else { - // partially complete state - state = 4; - } - succ = TryAllreduce(&state, sizeof(state), 1, op::Reducer); - if (succ != kSuccess) return succ; - utils::Check(state == 1 || state == 2, - "LoadCheckPoint: too many nodes fails, cannot recover local state"); - } - // do call save model if the checkpoint was lazy - if (role == kHaveData && global_lazycheck != NULL) { - global_checkpoint.resize(0); - utils::MemoryBufferStream fs(&global_checkpoint); - fs.Write(&version_number, sizeof(version_number)); - global_lazycheck->Save(&fs); - global_lazycheck = NULL; - } - // recover global checkpoint - size_t size = this->global_checkpoint.length(); - int recv_link; - std::vector req_in; - succ = TryDecideRouting(role, &size, &recv_link, &req_in); - if (succ != kSuccess) return succ; - if (role == kRequestData) { - global_checkpoint.resize(size); - } - if (size == 0) return kSuccess; - return TryRecoverData(role, BeginPtr(global_checkpoint), size, recv_link, req_in); -} -/*! - * \brief try to get the result of operation specified by seqno - * - * This is a collaborative function called by all nodes - * only the nodes with requester set to true really needs to get the result - * other nodes acts as collaborative roles to complete this request - * - * \param buf the buffer to store the result, this parameter is only used when current node is requester - * \param size the total size of the buffer, this parameter is only used when current node is requester - * \param seqno sequence number of the operation, this is unique index of a operation in current iteration - * \param requester whether current node is the requester - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceRobust::ReturnType -AllreduceRobust::TryGetResult(void *sendrecvbuf, size_t size, int seqno, bool requester) { - // if minimum sequence requested is local check point ack, - // this means all nodes have finished local check point, directly return - if (seqno == ActionSummary::kLocalCheckAck) return kSuccess; - if (seqno == ActionSummary::kLocalCheckPoint) { - // new version of local model - int new_version = !local_chkpt_version; - int nlocal = std::max(static_cast(local_rptr[new_version].size()) - 1, 0); - // if we goes to this place, use must have already setup the state once - utils::Assert(nlocal == 1 || nlocal == num_local_replica + 1, - "TryGetResult::Checkpoint"); - return TryRecoverLocalState(&local_rptr[new_version], &local_chkpt[new_version]); - } - // handles normal data recovery - RecoverType role; - if (!requester) { - sendrecvbuf = resbuf.Query(seqno, &size); - role = sendrecvbuf != NULL ? kHaveData : kPassData; - } else { - role = kRequestData; - } - int recv_link; - std::vector req_in; - // size of data - size_t data_size = size; - ReturnType succ = TryDecideRouting(role, &data_size, &recv_link, &req_in); - if (succ != kSuccess) return succ; - utils::Check(data_size != 0, "zero size check point is not allowed"); - if (role == kRequestData || role == kHaveData) { - utils::Check(data_size == size, - "Allreduce Recovered data size do not match the specification of function call.\n"\ - "Please check if calling sequence of recovered program is the " \ - "same the original one in current VersionNumber"); - } - return TryRecoverData(role, sendrecvbuf, data_size, recv_link, req_in); -} -/*! - * \brief try to run recover execution for a request action described by flag and seqno, - * the function will keep blocking to run possible recovery operations before the specified action, - * until the requested result is received by a recovering procedure, - * or the function discovers that the requested action is not yet executed, and return false - * - * \param buf the buffer to store the result - * \param size the total size of the buffer - * \param flag flag information about the action \sa ActionSummary - * \param seqno sequence number of the action, if it is special action with flag set, - * seqno needs to be set to ActionSummary::kSpecialOp - * - * \return if this function can return true or false - * - true means buf already set to the - * result by recovering procedure, the action is complete, no further action is needed - * - false means this is the lastest action that has not yet been executed, need to execute the action - */ -bool AllreduceRobust::RecoverExec(void *buf, size_t size, int flag, int seqno) { - if (flag != 0) { - utils::Assert(seqno == ActionSummary::kSpecialOp, "must only set seqno for normal operations"); - } - // request - ActionSummary req(flag, seqno); - while (true) { - this->ReportStatus(); - // action - ActionSummary act = req; - // get the reduced action - if (!CheckAndRecover(TryAllreduce(&act, sizeof(act), 1, ActionSummary::Reducer))) continue; - if (act.check_ack()) { - if (act.check_point()) { - // if we also have check_point, do check point first - utils::Assert(!act.diff_seq(), - "check ack & check pt cannot occur together with normal ops"); - // if we requested checkpoint, we are free to go - if (req.check_point()) return true; - } else if (act.load_check()) { - // if there is only check_ack and load_check, do load_check - if (!CheckAndRecover(TryLoadCheckPoint(req.load_check()))) continue; - // if requested load check, then misson complete - if (req.load_check()) return true; - } else { - // there is no check point and no load check, execute check ack - if (req.check_ack()) return true; - } - // if execute to this point - // this means the action requested has not been completed - // try next round - } else { - if (act.check_point()) { - if (act.diff_seq()) { - utils::Assert(act.min_seqno() != ActionSummary::kSpecialOp, "min seq bug"); - bool requester = req.min_seqno() == act.min_seqno(); - if (!CheckAndRecover(TryGetResult(buf, size, act.min_seqno(), requester))) continue; - if (requester) return true; - } else { - // no difference in seq no, means we are free to check point - if (req.check_point()) return true; - } - } else { - // no check point - if (act.load_check()) { - // all the nodes called load_check, this is an incomplete action - if (!act.diff_seq()) return false; - // load check have higher priority, do load_check - if (!CheckAndRecover(TryLoadCheckPoint(req.load_check()))) continue; - // if requested load check, then misson complete - if (req.load_check()) return true; - } else { - // no special flags, no checkpoint, check ack, load_check - utils::Assert(act.min_seqno() != ActionSummary::kSpecialOp, "min seq bug"); - if (act.diff_seq()) { - bool requester = req.min_seqno() == act.min_seqno(); - if (!CheckAndRecover(TryGetResult(buf, size, act.min_seqno(), requester))) continue; - if (requester) return true; - } else { - // all the request is same, - // this is most recent command that is yet to be executed - return false; - } - } - } - // something is still incomplete try next round - } - } - utils::Assert(false, "RecoverExec: should not reach here"); - return true; -} -/*! - * \brief try to recover the local state, making each local state to be the result of itself - * plus replication of states in previous num_local_replica hops in the ring - * - * The input parameters must contain the valid local states available in current nodes, - * This function try ist best to "complete" the missing parts of local_rptr and local_chkpt - * If there is sufficient information in the ring, when the function returns, local_chkpt will - * contain num_local_replica + 1 checkpoints (including the chkpt of this node) - * If there is no sufficient information in the ring, this function the number of checkpoints - * will be less than the specified value - * - * \param p_local_rptr the pointer to the segment pointers in the states array - * \param p_local_chkpt the pointer to the storage of local check points - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ -AllreduceRobust::ReturnType -AllreduceRobust::TryRecoverLocalState(std::vector *p_local_rptr, - std::string *p_local_chkpt) { - // if there is no local replica, we can do nothing - if (num_local_replica == 0) return kSuccess; - std::vector &rptr = *p_local_rptr; - std::string &chkpt = *p_local_chkpt; - if (rptr.size() == 0) { - rptr.push_back(0); - utils::Assert(chkpt.length() == 0, "local chkpt space inconsistent"); - } - const int n = num_local_replica; - { - // backward passing, passing state in backward direction of the ring - const int nlocal = static_cast(rptr.size() - 1); - utils::Assert(nlocal <= n + 1, "invalid local replica"); - std::vector msg_back(n + 1); - msg_back[0] = nlocal; - // backward passing one hop the request - ReturnType succ; - succ = RingPassing(BeginPtr(msg_back), - 1 * sizeof(int), (n+1) * sizeof(int), - 0 * sizeof(int), n * sizeof(int), - ring_next, ring_prev); - if (succ != kSuccess) return succ; - int msg_forward[2]; - msg_forward[0] = nlocal; - succ = RingPassing(msg_forward, - 1 * sizeof(int), 2 * sizeof(int), - 0 * sizeof(int), 1 * sizeof(int), - ring_prev, ring_next); - if (succ != kSuccess) return succ; - // calculate the number of things we can read from next link - int nread_end = nlocal; - for (int i = 1; i <= n; ++i) { - nread_end = std::max(nread_end, msg_back[i] - i); - } - // gives the size of forward - int nwrite_start = std::min(msg_forward[1] + 1, nread_end); - // get the size of each segments - std::vector sizes(nread_end); - for (int i = 0; i < nlocal; ++i) { - sizes[i] = rptr[i + 1] - rptr[i]; - } - // pass size through the link - succ = RingPassing(BeginPtr(sizes), - nlocal * sizeof(size_t), - nread_end * sizeof(size_t), - nwrite_start * sizeof(size_t), - nread_end * sizeof(size_t), - ring_next, ring_prev); - if (succ != kSuccess) return succ; - // update rptr - rptr.resize(nread_end + 1); - for (int i = nlocal; i < nread_end; ++i) { - rptr[i + 1] = rptr[i] + sizes[i]; - } - chkpt.resize(rptr.back()); - // pass data through the link - succ = RingPassing(BeginPtr(chkpt), rptr[nlocal], rptr[nread_end], - rptr[nwrite_start], rptr[nread_end], - ring_next, ring_prev); - if (succ != kSuccess) { - rptr.resize(nlocal + 1); chkpt.resize(rptr.back()); return succ; - } - } - { - // forward passing, passing state in forward direction of the ring - const int nlocal = static_cast(rptr.size() - 1); - utils::Assert(nlocal <= n + 1, "invalid local replica"); - std::vector msg_forward(n + 1); - msg_forward[0] = nlocal; - // backward passing one hop the request - ReturnType succ; - succ = RingPassing(BeginPtr(msg_forward), - 1 * sizeof(int), (n+1) * sizeof(int), - 0 * sizeof(int), n * sizeof(int), - ring_prev, ring_next); - if (succ != kSuccess) return succ; - int msg_back[2]; - msg_back[0] = nlocal; - succ = RingPassing(msg_back, - 1 * sizeof(int), 2 * sizeof(int), - 0 * sizeof(int), 1 * sizeof(int), - ring_next, ring_prev); - if (succ != kSuccess) return succ; - // calculate the number of things we can read from next link - int nread_end = nlocal, nwrite_end = 1; - // have to have itself in order to get other data from prev link - if (nlocal != 0) { - for (int i = 1; i <= n; ++i) { - if (msg_forward[i] == 0) break; - nread_end = std::max(nread_end, i + 1); - nwrite_end = i + 1; - } - if (nwrite_end > n) nwrite_end = n; - } else { - nread_end = 0; nwrite_end = 0; - } - // gives the size of forward - int nwrite_start = std::min(msg_back[1] - 1, nwrite_end); - // next node miss the state of itself, cannot recover - if (nwrite_start < 0) nwrite_start = nwrite_end = 0; - // get the size of each segments - std::vector sizes(nread_end); - for (int i = 0; i < nlocal; ++i) { - sizes[i] = rptr[i + 1] - rptr[i]; - } - // pass size through the link, check consistency - succ = RingPassing(BeginPtr(sizes), - nlocal * sizeof(size_t), - nread_end * sizeof(size_t), - nwrite_start * sizeof(size_t), - nwrite_end * sizeof(size_t), - ring_prev, ring_next); - if (succ != kSuccess) return succ; - // update rptr - rptr.resize(nread_end + 1); - for (int i = nlocal; i < nread_end; ++i) { - rptr[i + 1] = rptr[i] + sizes[i]; - } - chkpt.resize(rptr.back()); - // pass data through the link - succ = RingPassing(BeginPtr(chkpt), rptr[nlocal], rptr[nread_end], - rptr[nwrite_start], rptr[nwrite_end], - ring_prev, ring_next); - if (succ != kSuccess) { - rptr.resize(nlocal + 1); chkpt.resize(rptr.back()); return succ; - } - } - return kSuccess; -} -/*! - * \brief try to checkpoint local state, this function is called in normal executation phase - * of checkpoint that contains local state - * the input state must exactly one saved state(local state of current node), - * after complete, this function will get local state from previous num_local_replica nodes and put them - * into local_chkpt and local_rptr - * - * It is also OK to call TryRecoverLocalState instead, - * TryRecoverLocalState makes less assumption about the input, and requires more communications - * - * \param p_local_rptr the pointer to the segment pointers in the states array - * \param p_local_chkpt the pointer to the storage of local check points - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType, TryRecoverLocalState - */ -AllreduceRobust::ReturnType -AllreduceRobust::TryCheckinLocalState(std::vector *p_local_rptr, - std::string *p_local_chkpt) { - // if there is no local replica, we can do nothing - if (num_local_replica == 0) return kSuccess; - std::vector &rptr = *p_local_rptr; - std::string &chkpt = *p_local_chkpt; - utils::Assert(rptr.size() == 2, - "TryCheckinLocalState must have exactly 1 state"); - const int n = num_local_replica; - std::vector sizes(n + 1); - sizes[0] = rptr[1] - rptr[0]; - ReturnType succ; - // pass size through the link - succ = RingPassing(BeginPtr(sizes), - 1 * sizeof(size_t), - (n + 1) * sizeof(size_t), - 0 * sizeof(size_t), - n * sizeof(size_t), - ring_prev, ring_next); - if (succ != kSuccess) return succ; - // update rptr - rptr.resize(n + 2); - for (int i = 1; i <= n; ++i) { - rptr[i + 1] = rptr[i] + sizes[i]; - } - chkpt.resize(rptr.back()); - // pass data through the link - succ = RingPassing(BeginPtr(chkpt), - rptr[1], rptr[n + 1], - rptr[0], rptr[n], - ring_prev, ring_next); - if (succ != kSuccess) { - rptr.resize(2); chkpt.resize(rptr.back()); return succ; - } - return kSuccess; -} -/*! - * \brief perform a ring passing to receive data from prev link, and sent data to next link - * this allows data to stream over a ring structure - * sendrecvbuf[0:read_ptr] are already provided by current node - * current node will recv sendrecvbuf[read_ptr:read_end] from prev link - * current node will send sendrecvbuf[write_ptr:write_end] to next link - * write_ptr will wait till the data is readed before sending the data - * this function requires read_end >= write_end - * - * \param sendrecvbuf_ the place to hold the incoming and outgoing data - * \param read_ptr the initial read pointer - * \param read_end the ending position to read - * \param write_ptr the initial write pointer - * \param write_end the ending position to write - * \param read_link pointer to link to previous position in ring - * \param write_link pointer to link of next position in ring - */ -AllreduceRobust::ReturnType -AllreduceRobust::RingPassing(void *sendrecvbuf_, - size_t read_ptr, - size_t read_end, - size_t write_ptr, - size_t write_end, - LinkRecord *read_link, - LinkRecord *write_link) { - if (read_link == NULL || write_link == NULL || read_end == 0) return kSuccess; - utils::Assert(write_end <= read_end, - "RingPassing: boundary check1"); - utils::Assert(read_ptr <= read_end, "RingPassing: boundary check2"); - utils::Assert(write_ptr <= write_end, "RingPassing: boundary check3"); - // take reference - LinkRecord &prev = *read_link, &next = *write_link; - // send recv buffer - char *buf = reinterpret_cast(sendrecvbuf_); - while (true) { - bool finished = true; - utils::SelectHelper selecter; - if (read_ptr != read_end) { - selecter.WatchRead(prev.sock); - finished = false; - } - if (write_ptr < read_ptr && write_ptr != write_end) { - selecter.WatchWrite(next.sock); - finished = false; - } - selecter.WatchException(prev.sock); - selecter.WatchException(next.sock); - if (finished) break; - selecter.Select(); - if (selecter.CheckExcept(prev.sock)) return ReportError(&prev, kGetExcept); - if (selecter.CheckExcept(next.sock)) return ReportError(&next, kGetExcept); - if (read_ptr != read_end && selecter.CheckRead(prev.sock)) { - ssize_t len = prev.sock.Recv(buf + read_ptr, read_end - read_ptr); - if (len == 0) { - prev.sock.Close(); return ReportError(&prev, kRecvZeroLen); - } - if (len != -1) { - read_ptr += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&prev, ret); - } - } - if (write_ptr != write_end && write_ptr < read_ptr) { - size_t nsend = std::min(write_end - write_ptr, read_ptr - write_ptr); - ssize_t len = next.sock.Send(buf + write_ptr, nsend); - if (len != -1) { - write_ptr += static_cast(len); - } else { - ReturnType ret = Errno2Return(); - if (ret != kSuccess) return ReportError(&prev, ret); - } - } - } - return kSuccess; -} -} // namespace engine -} // namespace rabit - diff --git a/subtree/rabit/src/allreduce_robust.h b/subtree/rabit/src/allreduce_robust.h deleted file mode 100644 index caf2e57af..000000000 --- a/subtree/rabit/src/allreduce_robust.h +++ /dev/null @@ -1,553 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file allreduce_robust.h - * \brief Robust implementation of Allreduce - * using TCP non-block socket and tree-shape reduction. - * - * This implementation considers the failure of nodes - * - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#ifndef RABIT_ALLREDUCE_ROBUST_H_ -#define RABIT_ALLREDUCE_ROBUST_H_ -#include -#include -#include -#include "../include/rabit/engine.h" -#include "./allreduce_base.h" - -namespace rabit { -namespace engine { -/*! \brief implementation of fault tolerant all reduce engine */ -class AllreduceRobust : public AllreduceBase { - public: - AllreduceRobust(void); - virtual ~AllreduceRobust(void) {} - // initialize the manager - virtual void Init(void); - /*! \brief shutdown the engine */ - virtual void Shutdown(void); - /*! - * \brief set parameters to the engine - * \param name parameter name - * \param val parameter value - */ - virtual void SetParam(const char *name, const char *val); - /*! - * \brief perform in-place allreduce, on sendrecvbuf - * this function is NOT thread-safe - * \param sendrecvbuf_ buffer for both sending and recving data - * \param type_nbytes the unit number of bytes the type have - * \param count number of elements to be reduced - * \param reducer reduce function - * \param prepare_func Lazy preprocessing function, lazy prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to intialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to passed into the lazy preprocessing function - */ - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun = NULL, - void *prepare_arg = NULL); - /*! - * \brief broadcast data from root to all nodes - * \param sendrecvbuf_ buffer for both sending and recving data - * \param size the size of the data to be broadcasted - * \param root the root worker id to broadcast the data - */ - virtual void Broadcast(void *sendrecvbuf_, size_t total_size, int root); - /*! - * \brief load latest check point - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local model is needed - * - * \return the version number of check point loaded - * if returned version == 0, this means no model has been CheckPointed - * the p_model is not touched, user should do necessary initialization by themselves - * - * Common usage example: - * int iter = rabit::LoadCheckPoint(&model); - * if (iter == 0) model.InitParameters(); - * for (i = iter; i < max_iter; ++i) { - * do many things, include allreduce - * rabit::CheckPoint(model); - * } - * - * \sa CheckPoint, VersionNumber - */ - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL); - /*! - * \brief checkpoint the model, meaning we finished a stage of execution - * every time we call check point, there is a version number which will increase by one - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local state is needed - * - * NOTE: local_model requires explicit replication of the model for fault-tolerance, which will - * bring replication cost in CheckPoint function. global_model do not need explicit replication. - * So only CheckPoint with global_model if possible - * - * \sa LoadCheckPoint, VersionNumber - */ - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL) { - this->CheckPoint_(global_model, local_model, false); - } - /*! - * \brief This function can be used to replace CheckPoint for global_model only, - * when certain condition is met(see detailed expplaination). - * - * This is a "lazy" checkpoint such that only the pointer to global_model is - * remembered and no memory copy is taken. To use this function, the user MUST ensure that: - * The global_model must remain unchanged util last call of Allreduce/Broadcast in current version finishs. - * In another words, global_model model can be changed only between last call of - * Allreduce/Broadcast and LazyCheckPoint in current version - * - * For example, suppose the calling sequence is: - * LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint - * - * If user can only changes global_model in code3, then LazyCheckPoint can be used to - * improve efficiency of the program. - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \sa LoadCheckPoint, CheckPoint, VersionNumber - */ - virtual void LazyCheckPoint(const Serializable *global_model) { - this->CheckPoint_(global_model, NULL, true); - } - /*! - * \brief explicitly re-init everything before calling LoadCheckPoint - * call this function when IEngine throw an exception out, - * this function is only used for test purpose - */ - virtual void InitAfterException(void) { - // simple way, shutdown all links - for (size_t i = 0; i < all_links.size(); ++i) { - if (!all_links[i].sock.BadSocket()) all_links[i].sock.Close(); - } - ReConnectLinks("recover"); - } - - protected: - // constant one byte out of band message to indicate error happening - // and mark for channel cleanup - static const char kOOBReset = 95; - // and mark for channel cleanup, after OOB signal - static const char kResetMark = 97; - // and mark for channel cleanup - static const char kResetAck = 97; - /*! \brief type of roles each node can play during recovery */ - enum RecoverType { - /*! \brief current node have data */ - kHaveData = 0, - /*! \brief current node request data */ - kRequestData = 1, - /*! \brief current node only helps to pass data around */ - kPassData = 2 - }; - /*! - * \brief summary of actions proposed in all nodes - * this data structure is used to make consensus decision - * about next action to take in the recovery mode - */ - struct ActionSummary { - // maximumly allowed sequence id - static const int kSpecialOp = (1 << 26); - // special sequence number for local state checkpoint - static const int kLocalCheckPoint = (1 << 26) - 2; - // special sequnce number for local state checkpoint ack signal - static const int kLocalCheckAck = (1 << 26) - 1; - //--------------------------------------------- - // The following are bit mask of flag used in - //---------------------------------------------- - // some node want to load check point - static const int kLoadCheck = 1; - // some node want to do check point - static const int kCheckPoint = 2; - // check point Ack, we use a two phase message in check point, - // this is the second phase of check pointing - static const int kCheckAck = 4; - // there are difference sequence number the nodes proposed - // this means we want to do recover execution of the lower sequence - // action instead of normal execution - static const int kDiffSeq = 8; - // constructor - ActionSummary(void) {} - // constructor of action - explicit ActionSummary(int flag, int minseqno = kSpecialOp) { - seqcode = (minseqno << 4) | flag; - } - // minimum number of all operations - inline int min_seqno(void) const { - return seqcode >> 4; - } - // whether the operation set contains a load_check - inline bool load_check(void) const { - return (seqcode & kLoadCheck) != 0; - } - // whether the operation set contains a check point - inline bool check_point(void) const { - return (seqcode & kCheckPoint) != 0; - } - // whether the operation set contains a check ack - inline bool check_ack(void) const { - return (seqcode & kCheckAck) != 0; - } - // whether the operation set contains different sequence number - inline bool diff_seq(void) const { - return (seqcode & kDiffSeq) != 0; - } - // returns the operation flag of the result - inline int flag(void) const { - return seqcode & 15; - } - // reducer for Allreduce, get the result ActionSummary from all nodes - inline static void Reducer(const void *src_, void *dst_, - int len, const MPI::Datatype &dtype) { - const ActionSummary *src = (const ActionSummary*)src_; - ActionSummary *dst = reinterpret_cast(dst_); - for (int i = 0; i < len; ++i) { - int src_seqno = src[i].min_seqno(); - int dst_seqno = dst[i].min_seqno(); - int flag = src[i].flag() | dst[i].flag(); - if (src_seqno == dst_seqno) { - dst[i] = ActionSummary(flag, src_seqno); - } else { - dst[i] = ActionSummary(flag | kDiffSeq, - std::min(src_seqno, dst_seqno)); - } - } - } - - private: - // internel sequence code - int seqcode; - }; - /*! \brief data structure to remember result of Bcast and Allreduce calls */ - class ResultBuffer { - public: - // constructor - ResultBuffer(void) { - this->Clear(); - } - // clear the existing record - inline void Clear(void) { - seqno_.clear(); size_.clear(); - rptr_.clear(); rptr_.push_back(0); - data_.clear(); - } - // allocate temporal space - inline void *AllocTemp(size_t type_nbytes, size_t count) { - size_t size = type_nbytes * count; - size_t nhop = (size + sizeof(uint64_t) - 1) / sizeof(uint64_t); - utils::Assert(nhop != 0, "cannot allocate 0 size memory"); - data_.resize(rptr_.back() + nhop); - return BeginPtr(data_) + rptr_.back(); - } - // push the result in temp to the - inline void PushTemp(int seqid, size_t type_nbytes, size_t count) { - size_t size = type_nbytes * count; - size_t nhop = (size + sizeof(uint64_t) - 1) / sizeof(uint64_t); - if (seqno_.size() != 0) { - utils::Assert(seqno_.back() < seqid, "PushTemp seqid inconsistent"); - } - seqno_.push_back(seqid); - rptr_.push_back(rptr_.back() + nhop); - size_.push_back(size); - utils::Assert(data_.size() == rptr_.back(), "PushTemp inconsistent"); - } - // return the stored result of seqid, if any - inline void* Query(int seqid, size_t *p_size) { - size_t idx = std::lower_bound(seqno_.begin(), - seqno_.end(), seqid) - seqno_.begin(); - if (idx == seqno_.size() || seqno_[idx] != seqid) return NULL; - *p_size = size_[idx]; - return BeginPtr(data_) + rptr_[idx]; - } - // drop last stored result - inline void DropLast(void) { - utils::Assert(seqno_.size() != 0, "there is nothing to be dropped"); - seqno_.pop_back(); - rptr_.pop_back(); - size_.pop_back(); - data_.resize(rptr_.back()); - } - // the sequence number of last stored result - inline int LastSeqNo(void) const { - if (seqno_.size() == 0) return -1; - return seqno_.back(); - } - - private: - // sequence number of each - std::vector seqno_; - // pointer to the positions - std::vector rptr_; - // actual size of each buffer - std::vector size_; - // content of the buffer - std::vector data_; - }; - /*! - * \brief internal consistency check function, - * use check to ensure user always call CheckPoint/LoadCheckPoint - * with or without local but not both, this function will set the approperiate settings - * in the first call of LoadCheckPoint/CheckPoint - * - * \param with_local whether the user calls CheckPoint with local model - */ - void LocalModelCheck(bool with_local); - /*! - * \brief internal implementation of checkpoint, support both lazy and normal way - * - * \param global_model pointer to the globally shared model/state - * when calling this function, the caller need to gauranttees that global_model - * is the same in all nodes - * \param local_model pointer to local model, that is specific to current node/rank - * this can be NULL when no local state is needed - * \param lazy_checkpt whether the action is lazy checkpoint - * - * \sa CheckPoint, LazyCheckPoint - */ - void CheckPoint_(const Serializable *global_model, - const Serializable *local_model, - bool lazy_checkpt); - /*! - * \brief reset the all the existing links by sending Out-of-Band message marker - * after this function finishes, all the messages received and sent - * before in all live links are discarded, - * This allows us to get a fresh start after error has happened - * - * TODO(tqchen): this function is not yet functioning was not used by engine, - * simple resetlink and reconnect strategy is used - * - * \return this function can return kSuccess or kSockError - * when kSockError is returned, it simply means there are bad sockets in the links, - * and some link recovery proceduer is needed - */ - ReturnType TryResetLinks(void); - /*! - * \brief if err_type indicates an error - * recover links according to the error type reported - * if there is no error, return true - * \param err_type the type of error happening in the system - * \return true if err_type is kSuccess, false otherwise - */ - bool CheckAndRecover(ReturnType err_type); - /*! - * \brief try to run recover execution for a request action described by flag and seqno, - * the function will keep blocking to run possible recovery operations before the specified action, - * until the requested result is received by a recovering procedure, - * or the function discovers that the requested action is not yet executed, and return false - * - * \param buf the buffer to store the result - * \param size the total size of the buffer - * \param flag flag information about the action \sa ActionSummary - * \param seqno sequence number of the action, if it is special action with flag set, - * seqno needs to be set to ActionSummary::kSpecialOp - * - * \return if this function can return true or false - * - true means buf already set to the - * result by recovering procedure, the action is complete, no further action is needed - * - false means this is the lastest action that has not yet been executed, need to execute the action - */ - bool RecoverExec(void *buf, size_t size, int flag, - int seqno = ActionSummary::kSpecialOp); - /*! - * \brief try to load check point - * - * This is a collaborative function called by all nodes - * only the nodes with requester set to true really needs to load the check point - * other nodes acts as collaborative roles to complete this request - * - * \param requester whether current node is the requester - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryLoadCheckPoint(bool requester); - /*! - * \brief try to get the result of operation specified by seqno - * - * This is a collaborative function called by all nodes - * only the nodes with requester set to true really needs to get the result - * other nodes acts as collaborative roles to complete this request - * - * \param buf the buffer to store the result, this parameter is only used when current node is requester - * \param size the total size of the buffer, this parameter is only used when current node is requester - * \param seqno sequence number of the operation, this is unique index of a operation in current iteration - * \param requester whether current node is the requester - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryGetResult(void *buf, size_t size, int seqno, bool requester); - /*! - * \brief try to decide the routing strategy for recovery - * \param role the current role of the node - * \param p_size used to store the size of the message, for node in state kHaveData, - * this size must be set correctly before calling the function - * for others, this surves as output parameter - - * \param p_recvlink used to store the link current node should recv data from, if necessary - * this can be -1, which means current node have the data - * \param p_req_in used to store the resulting vector, indicating which link we should send the data to - * - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType, TryRecoverData - */ - ReturnType TryDecideRouting(RecoverType role, - size_t *p_size, - int *p_recvlink, - std::vector *p_req_in); - /*! - * \brief try to finish the data recovery request, - * this function is used together with TryDecideRouting - * \param role the current role of the node - * \param sendrecvbuf_ the buffer to store the data to be sent/recived - * - if the role is kHaveData, this stores the data to be sent - * - if the role is kRequestData, this is the buffer to store the result - * - if the role is kPassData, this will not be used, and can be NULL - * \param size the size of the data, obtained from TryDecideRouting - * \param recv_link the link index to receive data, if necessary, obtained from TryDecideRouting - * \param req_in the request of each link to send data, obtained from TryDecideRouting - * - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType, TryDecideRouting - */ - ReturnType TryRecoverData(RecoverType role, - void *sendrecvbuf_, - size_t size, - int recv_link, - const std::vector &req_in); - /*! - * \brief try to recover the local state, making each local state to be the result of itself - * plus replication of states in previous num_local_replica hops in the ring - * - * The input parameters must contain the valid local states available in current nodes, - * This function try ist best to "complete" the missing parts of local_rptr and local_chkpt - * If there is sufficient information in the ring, when the function returns, local_chkpt will - * contain num_local_replica + 1 checkpoints (including the chkpt of this node) - * If there is no sufficient information in the ring, this function the number of checkpoints - * will be less than the specified value - * - * \param p_local_rptr the pointer to the segment pointers in the states array - * \param p_local_chkpt the pointer to the storage of local check points - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType - */ - ReturnType TryRecoverLocalState(std::vector *p_local_rptr, - std::string *p_local_chkpt); - /*! - * \brief try to checkpoint local state, this function is called in normal executation phase - * of checkpoint that contains local state -o * the input state must exactly one saved state(local state of current node), - * after complete, this function will get local state from previous num_local_replica nodes and put them - * into local_chkpt and local_rptr - * - * It is also OK to call TryRecoverLocalState instead, - * TryRecoverLocalState makes less assumption about the input, and requires more communications - * - * \param p_local_rptr the pointer to the segment pointers in the states array - * \param p_local_chkpt the pointer to the storage of local check points - * \return this function can return kSuccess/kSockError/kGetExcept, see ReturnType for details - * \sa ReturnType, TryRecoverLocalState - */ - ReturnType TryCheckinLocalState(std::vector *p_local_rptr, - std::string *p_local_chkpt); - /*! - * \brief perform a ring passing to receive data from prev link, and sent data to next link - * this allows data to stream over a ring structure - * sendrecvbuf[0:read_ptr] are already provided by current node - * current node will recv sendrecvbuf[read_ptr:read_end] from prev link - * current node will send sendrecvbuf[write_ptr:write_end] to next link - * write_ptr will wait till the data is readed before sending the data - * this function requires read_end >= write_end - * - * \param sendrecvbuf_ the place to hold the incoming and outgoing data - * \param read_ptr the initial read pointer - * \param read_end the ending position to read - * \param write_ptr the initial write pointer - * \param write_end the ending position to write - * \param read_link pointer to link to previous position in ring - * \param write_link pointer to link of next position in ring - */ - ReturnType RingPassing(void *senrecvbuf_, - size_t read_ptr, - size_t read_end, - size_t write_ptr, - size_t write_end, - LinkRecord *read_link, - LinkRecord *write_link); - /*! - * \brief run message passing algorithm on the allreduce tree - * the result is edge message stored in p_edge_in and p_edge_out - * \param node_value the value associated with current node - * \param p_edge_in used to store input message from each of the edge - * \param p_edge_out used to store output message from each of the edge - * \param func a function that defines the message passing rule - * Parameters of func: - * - node_value same as node_value in the main function - * - edge_in the array of input messages from each edge, - * this includes the output edge, which should be excluded - * - out_index array the index of output edge, the function should - * exclude the output edge when compute the message passing value - * Return of func: - * the function returns the output message based on the input message and node_value - * - * \tparam EdgeType type of edge message, must be simple struct - * \tparam NodeType type of node value - */ - template - inline ReturnType MsgPassing(const NodeType &node_value, - std::vector *p_edge_in, - std::vector *p_edge_out, - EdgeType(*func) - (const NodeType &node_value, - const std::vector &edge_in, - size_t out_index)); - //---- recovery data structure ---- - // the round of result buffer, used to mode the result - int result_buffer_round; - // result buffer of all reduce - ResultBuffer resbuf; - // last check point global model - std::string global_checkpoint; - // lazy checkpoint of global model - const Serializable *global_lazycheck; - // number of replica for local state/model - int num_local_replica; - // number of default local replica - int default_local_replica; - // flag to decide whether local model is used, -1: unknown, 0: no, 1:yes - int use_local_model; - // number of replica for global state/model - int num_global_replica; - // number of times recovery happens - int recover_counter; - // --- recovery data structure for local checkpoint - // there is two version of the data structure, - // at one time one version is valid and another is used as temp memory - // pointer to memory position in the local model - // local model is stored in CSR format(like a sparse matrices) - // local_model[rptr[0]:rptr[1]] stores the model of current node - // local_model[rptr[k]:rptr[k+1]] stores the model of node in previous k hops - std::vector local_rptr[2]; - // storage for local model replicas - std::string local_chkpt[2]; - // version of local checkpoint can be 1 or 0 - int local_chkpt_version; -}; -} // namespace engine -} // namespace rabit -// implementation of inline template function -#include "./allreduce_robust-inl.h" -#endif // RABIT_ALLREDUCE_ROBUST_H_ diff --git a/subtree/rabit/src/engine.cc b/subtree/rabit/src/engine.cc deleted file mode 100644 index 0f4770fe2..000000000 --- a/subtree/rabit/src/engine.cc +++ /dev/null @@ -1,84 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine.cc - * \brief this file governs which implementation of engine we are actually using - * provides an singleton of engine interface - * - * \author Tianqi Chen, Ignacio Cano, Tianyi Zhou - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX - -#include "../include/rabit/engine.h" -#include "./allreduce_base.h" -#include "./allreduce_robust.h" - -namespace rabit { -namespace engine { -// singleton sync manager -#ifndef RABIT_USE_BASE -#ifndef RABIT_USE_MOCK -AllreduceRobust manager; -#else -AllreduceMock manager; -#endif -#else -AllreduceBase manager; -#endif - -/*! \brief intiialize the synchronization module */ -void Init(int argc, char *argv[]) { - for (int i = 1; i < argc; ++i) { - char name[256], val[256]; - if (sscanf(argv[i], "%[^=]=%s", name, val) == 2) { - manager.SetParam(name, val); - } - } - manager.Init(); -} - -/*! \brief finalize syncrhonization module */ -void Finalize(void) { - manager.Shutdown(); -} -/*! \brief singleton method to get engine */ -IEngine *GetEngine(void) { - return &manager; -} -// perform in-place allreduce, on sendrecvbuf -void Allreduce_(void *sendrecvbuf, - size_t type_nbytes, - size_t count, - IEngine::ReduceFunction red, - mpi::DataType dtype, - mpi::OpType op, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - GetEngine()->Allreduce(sendrecvbuf, type_nbytes, count, - red, prepare_fun, prepare_arg); -} - -// code for reduce handle -ReduceHandle::ReduceHandle(void) - : handle_(NULL), redfunc_(NULL), htype_(NULL) { -} -ReduceHandle::~ReduceHandle(void) {} - -int ReduceHandle::TypeSize(const MPI::Datatype &dtype) { - return static_cast(dtype.type_size); -} -void ReduceHandle::Init(IEngine::ReduceFunction redfunc, size_t type_nbytes) { - utils::Assert(redfunc_ == NULL, "cannot initialize reduce handle twice"); - redfunc_ = redfunc; -} -void ReduceHandle::Allreduce(void *sendrecvbuf, - size_t type_nbytes, size_t count, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - utils::Assert(redfunc_ != NULL, "must intialize handle to call AllReduce"); - GetEngine()->Allreduce(sendrecvbuf, type_nbytes, count, - redfunc_, prepare_fun, prepare_arg); -} -} // namespace engine -} // namespace rabit diff --git a/subtree/rabit/src/engine_base.cc b/subtree/rabit/src/engine_base.cc deleted file mode 100644 index 62739536f..000000000 --- a/subtree/rabit/src/engine_base.cc +++ /dev/null @@ -1,15 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine_mock.cc - * \brief this is an engine implementation that will - * insert failures in certain call point, to test if the engine is robust to failure - * \author Tianqi Chen - */ -// define use MOCK, os we will use mock Manager -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -// switch engine to AllreduceMock -#define RABIT_USE_BASE -#include "./engine.cc" - diff --git a/subtree/rabit/src/engine_empty.cc b/subtree/rabit/src/engine_empty.cc deleted file mode 100644 index 5fc16d9f4..000000000 --- a/subtree/rabit/src/engine_empty.cc +++ /dev/null @@ -1,118 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine_empty.cc - * \brief this file provides a dummy implementation of engine that does nothing - * this file provides a way to fall back to single node program without causing too many dependencies - * This is usually NOT needed, use engine_mpi or engine for real distributed version - * \author Tianqi Chen - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX - -#include "../include/rabit/engine.h" - -namespace rabit { -namespace engine { -/*! \brief EmptyEngine */ -class EmptyEngine : public IEngine { - public: - EmptyEngine(void) { - version_number = 0; - } - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun, - void *prepare_arg) { - utils::Error("EmptyEngine:: Allreduce is not supported,"\ - "use Allreduce_ instead"); - } - virtual void Broadcast(void *sendrecvbuf_, size_t size, int root) { - } - virtual void InitAfterException(void) { - utils::Error("EmptyEngine is not fault tolerant"); - } - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL) { - return 0; - } - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL) { - version_number += 1; - } - virtual void LazyCheckPoint(const Serializable *global_model) { - version_number += 1; - } - virtual int VersionNumber(void) const { - return version_number; - } - /*! \brief get rank of current node */ - virtual int GetRank(void) const { - return 0; - } - /*! \brief get total number of */ - virtual int GetWorldSize(void) const { - return 1; - } - /*! \brief whether it is distributed */ - virtual bool IsDistributed(void) const { - return false; - } - /*! \brief get the host name of current node */ - virtual std::string GetHost(void) const { - return std::string(""); - } - virtual void TrackerPrint(const std::string &msg) { - // simply print information into the tracker - utils::Printf("%s", msg.c_str()); - } - - private: - int version_number; -}; - -// singleton sync manager -EmptyEngine manager; - -/*! \brief intiialize the synchronization module */ -void Init(int argc, char *argv[]) { -} -/*! \brief finalize syncrhonization module */ -void Finalize(void) { -} - -/*! \brief singleton method to get engine */ -IEngine *GetEngine(void) { - return &manager; -} -// perform in-place allreduce, on sendrecvbuf -void Allreduce_(void *sendrecvbuf, - size_t type_nbytes, - size_t count, - IEngine::ReduceFunction red, - mpi::DataType dtype, - mpi::OpType op, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); -} - -// code for reduce handle -ReduceHandle::ReduceHandle(void) : handle_(NULL), htype_(NULL) { -} -ReduceHandle::~ReduceHandle(void) {} - -int ReduceHandle::TypeSize(const MPI::Datatype &dtype) { - return 0; -} -void ReduceHandle::Init(IEngine::ReduceFunction redfunc, size_t type_nbytes) {} -void ReduceHandle::Allreduce(void *sendrecvbuf, - size_t type_nbytes, size_t count, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); -} -} // namespace engine -} // namespace rabit diff --git a/subtree/rabit/src/engine_mock.cc b/subtree/rabit/src/engine_mock.cc deleted file mode 100644 index 24415a1d5..000000000 --- a/subtree/rabit/src/engine_mock.cc +++ /dev/null @@ -1,16 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine_mock.cc - * \brief this is an engine implementation that will - * insert failures in certain call point, to test if the engine is robust to failure - * \author Tianqi Chen - */ -// define use MOCK, os we will use mock Manager -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -// switch engine to AllreduceMock -#define RABIT_USE_MOCK -#include "./allreduce_mock.h" -#include "./engine.cc" - diff --git a/subtree/rabit/src/engine_mpi.cc b/subtree/rabit/src/engine_mpi.cc deleted file mode 100644 index 11e55335b..000000000 --- a/subtree/rabit/src/engine_mpi.cc +++ /dev/null @@ -1,211 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file engine_mpi.cc - * \brief this file gives an implementation of engine interface using MPI, - * this will allow rabit program to run with MPI, but do not comes with fault tolerant - * - * \author Tianqi Chen - */ -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE -#define NOMINMAX -#include -#include -#include "../include/rabit/engine.h" -#include "../include/rabit/utils.h" - -namespace rabit { -namespace engine { -/*! \brief implementation of engine using MPI */ -class MPIEngine : public IEngine { - public: - MPIEngine(void) { - version_number = 0; - } - virtual void Allreduce(void *sendrecvbuf_, - size_t type_nbytes, - size_t count, - ReduceFunction reducer, - PreprocFunction prepare_fun, - void *prepare_arg) { - utils::Error("MPIEngine:: Allreduce is not supported,"\ - "use Allreduce_ instead"); - } - virtual void Broadcast(void *sendrecvbuf_, size_t size, int root) { - MPI::COMM_WORLD.Bcast(sendrecvbuf_, size, MPI::CHAR, root); - } - virtual void InitAfterException(void) { - utils::Error("MPI is not fault tolerant"); - } - virtual int LoadCheckPoint(Serializable *global_model, - Serializable *local_model = NULL) { - return 0; - } - virtual void CheckPoint(const Serializable *global_model, - const Serializable *local_model = NULL) { - version_number += 1; - } - virtual void LazyCheckPoint(const Serializable *global_model) { - version_number += 1; - } - virtual int VersionNumber(void) const { - return version_number; - } - /*! \brief get rank of current node */ - virtual int GetRank(void) const { - return MPI::COMM_WORLD.Get_rank(); - } - /*! \brief get total number of */ - virtual int GetWorldSize(void) const { - return MPI::COMM_WORLD.Get_size(); - } - /*! \brief whether it is distributed */ - virtual bool IsDistributed(void) const { - return true; - } - /*! \brief get the host name of current node */ - virtual std::string GetHost(void) const { - int len; - char name[MPI_MAX_PROCESSOR_NAME]; - MPI::Get_processor_name(name, len); - name[len] = '\0'; - return std::string(name); - } - virtual void TrackerPrint(const std::string &msg) { - // simply print information into the tracker - if (GetRank() == 0) { - utils::Printf("%s", msg.c_str()); - } - } - - private: - int version_number; -}; - -// singleton sync manager -MPIEngine manager; - -/*! \brief intiialize the synchronization module */ -void Init(int argc, char *argv[]) { - MPI::Init(argc, argv); -} -/*! \brief finalize syncrhonization module */ -void Finalize(void) { - MPI::Finalize(); -} - -/*! \brief singleton method to get engine */ -IEngine *GetEngine(void) { - return &manager; -} -// transform enum to MPI data type -inline MPI::Datatype GetType(mpi::DataType dtype) { - using namespace mpi; - switch (dtype) { - case kChar: return MPI::CHAR; - case kUChar: return MPI::BYTE; - case kInt: return MPI::INT; - case kUInt: return MPI::UNSIGNED; - case kLong: return MPI::LONG; - case kULong: return MPI::UNSIGNED_LONG; - case kFloat: return MPI::FLOAT; - case kDouble: return MPI::DOUBLE; - case kLongLong: return MPI::LONG_LONG; - case kULongLong: return MPI::UNSIGNED_LONG_LONG; - } - utils::Error("unknown mpi::DataType"); - return MPI::CHAR; -} -// transform enum to MPI OP -inline MPI::Op GetOp(mpi::OpType otype) { - using namespace mpi; - switch (otype) { - case kMax: return MPI::MAX; - case kMin: return MPI::MIN; - case kSum: return MPI::SUM; - case kBitwiseOR: return MPI::BOR; - } - utils::Error("unknown mpi::OpType"); - return MPI::MAX; -} -// perform in-place allreduce, on sendrecvbuf -void Allreduce_(void *sendrecvbuf, - size_t type_nbytes, - size_t count, - IEngine::ReduceFunction red, - mpi::DataType dtype, - mpi::OpType op, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - if (prepare_fun != NULL) prepare_fun(prepare_arg); - MPI::COMM_WORLD.Allreduce(MPI_IN_PLACE, sendrecvbuf, - count, GetType(dtype), GetOp(op)); -} - -// code for reduce handle -ReduceHandle::ReduceHandle(void) - : handle_(NULL), redfunc_(NULL), htype_(NULL) { -} -ReduceHandle::~ReduceHandle(void) { - if (handle_ != NULL) { - MPI::Op *op = reinterpret_cast(handle_); - op->Free(); - delete op; - } - if (htype_ != NULL) { - MPI::Datatype *dtype = reinterpret_cast(htype_); - dtype->Free(); - delete dtype; - } -} -int ReduceHandle::TypeSize(const MPI::Datatype &dtype) { - return dtype.Get_size(); -} -void ReduceHandle::Init(IEngine::ReduceFunction redfunc, size_t type_nbytes) { - utils::Assert(handle_ == NULL, "cannot initialize reduce handle twice"); - if (type_nbytes != 0) { - MPI::Datatype *dtype = new MPI::Datatype(); - if (type_nbytes % 8 == 0) { - *dtype = MPI::LONG.Create_contiguous(type_nbytes / sizeof(long)); // NOLINT(*) - } else if (type_nbytes % 4 == 0) { - *dtype = MPI::INT.Create_contiguous(type_nbytes / sizeof(int)); - } else { - *dtype = MPI::CHAR.Create_contiguous(type_nbytes); - } - dtype->Commit(); - created_type_nbytes_ = type_nbytes; - htype_ = dtype; - } - MPI::Op *op = new MPI::Op(); - MPI::User_function *pf = redfunc; - op->Init(pf, true); - handle_ = op; -} -void ReduceHandle::Allreduce(void *sendrecvbuf, - size_t type_nbytes, size_t count, - IEngine::PreprocFunction prepare_fun, - void *prepare_arg) { - utils::Assert(handle_ != NULL, "must intialize handle to call AllReduce"); - MPI::Op *op = reinterpret_cast(handle_); - MPI::Datatype *dtype = reinterpret_cast(htype_); - if (created_type_nbytes_ != type_nbytes || dtype == NULL) { - if (dtype == NULL) { - dtype = new MPI::Datatype(); - } else { - dtype->Free(); - } - if (type_nbytes % 8 == 0) { - *dtype = MPI::LONG.Create_contiguous(type_nbytes / sizeof(long)); // NOLINT(*) - } else if (type_nbytes % 4 == 0) { - *dtype = MPI::INT.Create_contiguous(type_nbytes / sizeof(int)); - } else { - *dtype = MPI::CHAR.Create_contiguous(type_nbytes); - } - dtype->Commit(); - created_type_nbytes_ = type_nbytes; - } - if (prepare_fun != NULL) prepare_fun(prepare_arg); - MPI::COMM_WORLD.Allreduce(MPI_IN_PLACE, sendrecvbuf, count, *dtype, *op); -} -} // namespace engine -} // namespace rabit diff --git a/subtree/rabit/src/socket.h b/subtree/rabit/src/socket.h deleted file mode 100644 index 6df7a7b78..000000000 --- a/subtree/rabit/src/socket.h +++ /dev/null @@ -1,523 +0,0 @@ -/*! - * Copyright (c) 2014 by Contributors - * \file socket.h - * \brief this file aims to provide a wrapper of sockets - * \author Tianqi Chen - */ -#ifndef RABIT_SOCKET_H_ -#define RABIT_SOCKET_H_ -#if defined(_WIN32) -#include -#include -#ifdef _MSC_VER -#pragma comment(lib, "Ws2_32.lib") -#endif -#else -#include -#include -#include -#include -#include -#include -#include -#include -#include -#endif -#include -#include -#include "../include/rabit/utils.h" - -#if defined(_WIN32) -typedef int ssize_t; -typedef int sock_size_t; -#else -typedef int SOCKET; -typedef size_t sock_size_t; -const int INVALID_SOCKET = -1; -#endif - -namespace rabit { -namespace utils { -/*! \brief data structure for network address */ -struct SockAddr { - sockaddr_in addr; - // constructor - SockAddr(void) {} - SockAddr(const char *url, int port) { - this->Set(url, port); - } - inline static std::string GetHostName(void) { - std::string buf; buf.resize(256); - utils::Check(gethostname(&buf[0], 256) != -1, "fail to get host name"); - return std::string(buf.c_str()); - } - /*! - * \brief set the address - * \param url the url of the address - * \param port the port of address - */ - inline void Set(const char *host, int port) { - hostent *hp = gethostbyname(host); - Check(hp != NULL, "cannot obtain address of %s", host); - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_port = htons(port); - memcpy(&addr.sin_addr, hp->h_addr_list[0], hp->h_length); - } - /*! \brief return port of the address*/ - inline int port(void) const { - return ntohs(addr.sin_port); - } - /*! \return a string representation of the address */ - inline std::string AddrStr(void) const { - std::string buf; buf.resize(256); -#ifdef _WIN32 - const char *s = inet_ntop(AF_INET, (PVOID)&addr.sin_addr, - &buf[0], buf.length()); -#else - const char *s = inet_ntop(AF_INET, &addr.sin_addr, - &buf[0], buf.length()); -#endif - Assert(s != NULL, "cannot decode address"); - return std::string(s); - } -}; - -/*! - * \brief base class containing common operations of TCP and UDP sockets - */ -class Socket { - public: - /*! \brief the file descriptor of socket */ - SOCKET sockfd; - // default conversion to int - inline operator SOCKET() const { - return sockfd; - } - /*! - * \return last error of socket operation - */ - inline static int GetLastError(void) { -#ifdef _WIN32 - return WSAGetLastError(); -#else - return errno; -#endif - } - /*! \return whether last error was would block */ - inline static bool LastErrorWouldBlock(void) { - int errsv = GetLastError(); -#ifdef _WIN32 - return errsv == WSAEWOULDBLOCK; -#else - return errsv == EAGAIN || errsv == EWOULDBLOCK; -#endif - } - /*! - * \brief start up the socket module - * call this before using the sockets - */ - inline static void Startup(void) { -#ifdef _WIN32 - WSADATA wsa_data; - if (WSAStartup(MAKEWORD(2, 2), &wsa_data) == -1) { - Socket::Error("Startup"); - } - if (LOBYTE(wsa_data.wVersion) != 2 || HIBYTE(wsa_data.wVersion) != 2) { - WSACleanup(); - utils::Error("Could not find a usable version of Winsock.dll\n"); - } -#endif - } - /*! - * \brief shutdown the socket module after use, all sockets need to be closed - */ - inline static void Finalize(void) { -#ifdef _WIN32 - WSACleanup(); -#endif - } - /*! - * \brief set this socket to use non-blocking mode - * \param non_block whether set it to be non-block, if it is false - * it will set it back to block mode - */ - inline void SetNonBlock(bool non_block) { -#ifdef _WIN32 - u_long mode = non_block ? 1 : 0; - if (ioctlsocket(sockfd, FIONBIO, &mode) != NO_ERROR) { - Socket::Error("SetNonBlock"); - } -#else - int flag = fcntl(sockfd, F_GETFL, 0); - if (flag == -1) { - Socket::Error("SetNonBlock-1"); - } - if (non_block) { - flag |= O_NONBLOCK; - } else { - flag &= ~O_NONBLOCK; - } - if (fcntl(sockfd, F_SETFL, flag) == -1) { - Socket::Error("SetNonBlock-2"); - } -#endif - } - /*! - * \brief bind the socket to an address - * \param addr - */ - inline void Bind(const SockAddr &addr) { - if (bind(sockfd, reinterpret_cast(&addr.addr), - sizeof(addr.addr)) == -1) { - Socket::Error("Bind"); - } - } - /*! - * \brief try bind the socket to host, from start_port to end_port - * \param start_port starting port number to try - * \param end_port ending port number to try - * \return the port successfully bind to, return -1 if failed to bind any port - */ - inline int TryBindHost(int start_port, int end_port) { - // TODO(tqchen) add prefix check - for (int port = start_port; port < end_port; ++port) { - SockAddr addr("0.0.0.0", port); - if (bind(sockfd, reinterpret_cast(&addr.addr), - sizeof(addr.addr)) == 0) { - return port; - } -#if defined(_WIN32) - if (WSAGetLastError() != WSAEADDRINUSE) { - Socket::Error("TryBindHost"); - } -#else - if (errno != EADDRINUSE) { - Socket::Error("TryBindHost"); - } -#endif - } - - return -1; - } - /*! \brief get last error code if any */ - inline int GetSockError(void) const { - int error = 0; - socklen_t len = sizeof(error); - if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, reinterpret_cast(&error), &len) != 0) { - Error("GetSockError"); - } - return error; - } - /*! \brief check if anything bad happens */ - inline bool BadSocket(void) const { - if (IsClosed()) return true; - int err = GetSockError(); - if (err == EBADF || err == EINTR) return true; - return false; - } - /*! \brief check if socket is already closed */ - inline bool IsClosed(void) const { - return sockfd == INVALID_SOCKET; - } - /*! \brief close the socket */ - inline void Close(void) { - if (sockfd != INVALID_SOCKET) { -#ifdef _WIN32 - closesocket(sockfd); -#else - close(sockfd); -#endif - sockfd = INVALID_SOCKET; - } else { - Error("Socket::Close double close the socket or close without create"); - } - } - // report an socket error - inline static void Error(const char *msg) { - int errsv = GetLastError(); -#ifdef _WIN32 - utils::Error("Socket %s Error:WSAError-code=%d", msg, errsv); -#else - utils::Error("Socket %s Error:%s", msg, strerror(errsv)); -#endif - } - - protected: - explicit Socket(SOCKET sockfd) : sockfd(sockfd) { - } -}; - -/*! - * \brief a wrapper of TCP socket that hopefully be cross platform - */ -class TCPSocket : public Socket{ - public: - // constructor - TCPSocket(void) : Socket(INVALID_SOCKET) { - } - explicit TCPSocket(SOCKET sockfd) : Socket(sockfd) { - } - /*! - * \brief enable/disable TCP keepalive - * \param keepalive whether to set the keep alive option on - */ - inline void SetKeepAlive(bool keepalive) { - int opt = static_cast(keepalive); - if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, - reinterpret_cast(&opt), sizeof(opt)) < 0) { - Socket::Error("SetKeepAlive"); - } - } - /*! - * \brief create the socket, call this before using socket - * \param af domain - */ - inline void Create(int af = PF_INET) { - sockfd = socket(PF_INET, SOCK_STREAM, 0); - if (sockfd == INVALID_SOCKET) { - Socket::Error("Create"); - } - } - /*! - * \brief perform listen of the socket - * \param backlog backlog parameter - */ - inline void Listen(int backlog = 16) { - listen(sockfd, backlog); - } - /*! \brief get a new connection */ - TCPSocket Accept(void) { - SOCKET newfd = accept(sockfd, NULL, NULL); - if (newfd == INVALID_SOCKET) { - Socket::Error("Accept"); - } - return TCPSocket(newfd); - } - /*! - * \brief decide whether the socket is at OOB mark - * \return 1 if at mark, 0 if not, -1 if an error occured - */ - inline int AtMark(void) const { -#ifdef _WIN32 - unsigned long atmark; // NOLINT(*) - if (ioctlsocket(sockfd, SIOCATMARK, &atmark) != NO_ERROR) return -1; -#else - int atmark; - if (ioctl(sockfd, SIOCATMARK, &atmark) == -1) return -1; -#endif - return static_cast(atmark); - } - /*! - * \brief connect to an address - * \param addr the address to connect to - * \return whether connect is successful - */ - inline bool Connect(const SockAddr &addr) { - return connect(sockfd, reinterpret_cast(&addr.addr), - sizeof(addr.addr)) == 0; - } - /*! - * \brief send data using the socket - * \param buf the pointer to the buffer - * \param len the size of the buffer - * \param flags extra flags - * \return size of data actually sent - * return -1 if error occurs - */ - inline ssize_t Send(const void *buf_, size_t len, int flag = 0) { - const char *buf = reinterpret_cast(buf_); - return send(sockfd, buf, static_cast(len), flag); - } - /*! - * \brief receive data using the socket - * \param buf_ the pointer to the buffer - * \param len the size of the buffer - * \param flags extra flags - * \return size of data actually received - * return -1 if error occurs - */ - inline ssize_t Recv(void *buf_, size_t len, int flags = 0) { - char *buf = reinterpret_cast(buf_); - return recv(sockfd, buf, static_cast(len), flags); - } - /*! - * \brief peform block write that will attempt to send all data out - * can still return smaller than request when error occurs - * \param buf the pointer to the buffer - * \param len the size of the buffer - * \return size of data actually sent - */ - inline size_t SendAll(const void *buf_, size_t len) { - const char *buf = reinterpret_cast(buf_); - size_t ndone = 0; - while (ndone < len) { - ssize_t ret = send(sockfd, buf, static_cast(len - ndone), 0); - if (ret == -1) { - if (LastErrorWouldBlock()) return ndone; - Socket::Error("SendAll"); - } - buf += ret; - ndone += ret; - } - return ndone; - } - /*! - * \brief peforma block read that will attempt to read all data - * can still return smaller than request when error occurs - * \param buf_ the buffer pointer - * \param len length of data to recv - * \return size of data actually sent - */ - inline size_t RecvAll(void *buf_, size_t len) { - char *buf = reinterpret_cast(buf_); - size_t ndone = 0; - while (ndone < len) { - ssize_t ret = recv(sockfd, buf, - static_cast(len - ndone), MSG_WAITALL); - if (ret == -1) { - if (LastErrorWouldBlock()) return ndone; - Socket::Error("RecvAll"); - } - if (ret == 0) return ndone; - buf += ret; - ndone += ret; - } - return ndone; - } - /*! - * \brief send a string over network - * \param str the string to be sent - */ - inline void SendStr(const std::string &str) { - int len = static_cast(str.length()); - utils::Assert(this->SendAll(&len, sizeof(len)) == sizeof(len), - "error during send SendStr"); - if (len != 0) { - utils::Assert(this->SendAll(str.c_str(), str.length()) == str.length(), - "error during send SendStr"); - } - } - /*! - * \brief recv a string from network - * \param out_str the string to receive - */ - inline void RecvStr(std::string *out_str) { - int len; - utils::Assert(this->RecvAll(&len, sizeof(len)) == sizeof(len), - "error during send RecvStr"); - out_str->resize(len); - if (len != 0) { - utils::Assert(this->RecvAll(&(*out_str)[0], len) == out_str->length(), - "error during send SendStr"); - } - } -}; - -/*! \brief helper data structure to perform select */ -struct SelectHelper { - public: - SelectHelper(void) { - FD_ZERO(&read_set); - FD_ZERO(&write_set); - FD_ZERO(&except_set); - maxfd = 0; - } - /*! - * \brief add file descriptor to watch for read - * \param fd file descriptor to be watched - */ - inline void WatchRead(SOCKET fd) { - FD_SET(fd, &read_set); - if (fd > maxfd) maxfd = fd; - } - /*! - * \brief add file descriptor to watch for write - * \param fd file descriptor to be watched - */ - inline void WatchWrite(SOCKET fd) { - FD_SET(fd, &write_set); - if (fd > maxfd) maxfd = fd; - } - /*! - * \brief add file descriptor to watch for exception - * \param fd file descriptor to be watched - */ - inline void WatchException(SOCKET fd) { - FD_SET(fd, &except_set); - if (fd > maxfd) maxfd = fd; - } - /*! - * \brief Check if the descriptor is ready for read - * \param fd file descriptor to check status - */ - inline bool CheckRead(SOCKET fd) const { - return FD_ISSET(fd, &read_set) != 0; - } - /*! - * \brief Check if the descriptor is ready for write - * \param fd file descriptor to check status - */ - inline bool CheckWrite(SOCKET fd) const { - return FD_ISSET(fd, &write_set) != 0; - } - /*! - * \brief Check if the descriptor has any exception - * \param fd file descriptor to check status - */ - inline bool CheckExcept(SOCKET fd) const { - return FD_ISSET(fd, &except_set) != 0; - } - /*! - * \brief wait for exception event on a single descriptor - * \param fd the file descriptor to wait the event for - * \param timeout the timeout counter, can be 0, which means wait until the event happen - * \return 1 if success, 0 if timeout, and -1 if error occurs - */ - inline static int WaitExcept(SOCKET fd, long timeout = 0) { // NOLINT(*) - fd_set wait_set; - FD_ZERO(&wait_set); - FD_SET(fd, &wait_set); - return Select_(static_cast(fd + 1), - NULL, NULL, &wait_set, timeout); - } - /*! - * \brief peform select on the set defined - * \param select_read whether to watch for read event - * \param select_write whether to watch for write event - * \param select_except whether to watch for exception event - * \param timeout specify timeout in micro-seconds(ms) if equals 0, means select will always block - * \return number of active descriptors selected, - * return -1 if error occurs - */ - inline int Select(long timeout = 0) { // NOLINT(*) - int ret = Select_(static_cast(maxfd + 1), - &read_set, &write_set, &except_set, timeout); - if (ret == -1) { - Socket::Error("Select"); - } - return ret; - } - - private: - inline static int Select_(int maxfd, fd_set *rfds, - fd_set *wfds, fd_set *efds, long timeout) { // NOLINT(*) -#if !defined(_WIN32) - utils::Assert(maxfd < FD_SETSIZE, "maxdf must be smaller than FDSETSIZE"); -#endif - if (timeout == 0) { - return select(maxfd, rfds, wfds, efds, NULL); - } else { - timeval tm; - tm.tv_usec = (timeout % 1000) * 1000; - tm.tv_sec = timeout / 1000; - return select(maxfd, rfds, wfds, efds, &tm); - } - } - - SOCKET maxfd; - fd_set read_set, write_set, except_set; -}; -} // namespace utils -} // namespace rabit -#endif // RABIT_SOCKET_H_ diff --git a/subtree/rabit/test/.gitignore b/subtree/rabit/test/.gitignore deleted file mode 100644 index eb87d8f26..000000000 --- a/subtree/rabit/test/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.mpi -test_* -*_test -*_recover diff --git a/subtree/rabit/test/Makefile b/subtree/rabit/test/Makefile deleted file mode 100644 index 62e4e17f0..000000000 --- a/subtree/rabit/test/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -export CC = gcc -export CXX = g++ -export MPICXX = mpicxx -export LDFLAGS= -L../lib -pthread -lm -lrt -export CFLAGS = -Wall -O3 -msse2 -Wno-unknown-pragmas -fPIC -I../include -std=c++0x - -# specify tensor path -BIN = speed_test model_recover local_recover lazy_recover -OBJ = $(RABIT_OBJ) speed_test.o model_recover.o local_recover.o lazy_recover.o -MPIBIN = speed_test.mpi -.PHONY: clean all lib mpi - -all: $(BIN) $(MPIBIN) -lib: - cd ..;make;cd - -mpi: - cd ..;make mpi;cd - -# programs -speed_test.o: speed_test.cc ../include/*.h lib mpi -model_recover.o: model_recover.cc ../include/*.h lib -local_recover.o: local_recover.cc ../include/*.h lib -lazy_recover.o: lazy_recover.cc ../include/*.h lib - -# we can link against MPI version to get use MPI -speed_test: speed_test.o $(RABIT_OBJ) -speed_test.mpi: speed_test.o $(MPIOBJ) -model_recover: model_recover.o $(RABIT_OBJ) -local_recover: local_recover.o $(RABIT_OBJ) -lazy_recover: lazy_recover.o $(RABIT_OBJ) - -$(BIN) : - $(CXX) $(CFLAGS) -o $@ $(filter %.cpp %.o %.c %.cc, $^) -lrabit_mock $(LDFLAGS) - -$(OBJ) : - $(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c %.cc, $^) ) - -$(MPIBIN) : - $(MPICXX) $(CFLAGS) -o $@ $(filter %.cpp %.o %.c %.cc, $^) $(LDFLAGS) -lrabit_mpi - -clean: - $(RM) $(OBJ) $(BIN) $(MPIBIN) $(MPIOBJ) *~ ../src/*~ diff --git a/subtree/rabit/test/README.md b/subtree/rabit/test/README.md deleted file mode 100644 index fb68112bf..000000000 --- a/subtree/rabit/test/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Testcases of Rabit -==== -This folder contains internal testcases to test correctness and efficiency of rabit API - -The example running scripts for testcases are given by test.mk -* type ```make -f test.mk testcasename``` to run certain testcase - - -Helper Scripts -==== -* test.mk contains Makefile documentation of all testcases -* keepalive.sh helper bash to restart a program when it dies abnormally - -List of Programs -==== -* speed_test: test the running speed of rabit API -* test_local_recover: test recovery of local state when error happens -* test_model_recover: test recovery of global state when error happens diff --git a/subtree/rabit/test/lazy_recover.cc b/subtree/rabit/test/lazy_recover.cc deleted file mode 100644 index 610a20664..000000000 --- a/subtree/rabit/test/lazy_recover.cc +++ /dev/null @@ -1,126 +0,0 @@ -// this is a test case to test whether rabit can recover model when -// facing an exception -#include -#include -#include -#include -#include -using namespace rabit; - -// dummy model -class Model : public rabit::Serializable { - public: - // iterations - std::vector data; - // load from stream - virtual void Load(rabit::Stream *fi) { - fi->Read(&data); - } - /*! \brief save the model to the stream */ - virtual void Save(rabit::Stream *fo) const { - fo->Write(data); - } - virtual void InitModel(size_t n) { - data.clear(); - data.resize(n, 1.0f); - } -}; - -inline void TestMax(Model *model, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = iter + 111; - - std::vector ndata(model->data.size()); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + model->data[i]; - } - rabit::Allreduce(&ndata[0], ndata.size()); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rmax = (i * 1) % z + model->data[i]; - for (int r = 0; r < nproc; ++r) { - rmax = std::max(rmax, (float)((i * (r+1)) % z) + model->data[i]); - } - utils::Check(rmax == ndata[i], "[%d] TestMax check failurem i=%lu, rmax=%f, ndata=%f", rank, i, rmax, ndata[i]); - } -} - -inline void TestSum(Model *model, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = 131 + iter; - - std::vector ndata(model->data.size()); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + model->data[i]; - } - Allreduce(&ndata[0], ndata.size()); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rsum = model->data[i] * nproc; - for (int r = 0; r < nproc; ++r) { - rsum += (float)((i * (r+1)) % z); - } - utils::Check(fabsf(rsum - ndata[i]) < 1e-5 , - "[%d] TestSum check failure, local=%g, allreduce=%g", rank, rsum, ndata[i]); - } - model->data = ndata; -} - -inline void TestBcast(size_t n, int root, int ntrial, int iter) { - int rank = rabit::GetRank(); - std::string s; s.resize(n); - for (size_t i = 0; i < n; ++i) { - s[i] = char(i % 126 + 1); - } - std::string res; - if (root == rank) { - res = s; - rabit::Broadcast(&res, root); - } else { - rabit::Broadcast(&res, root); - } - utils::Check(res == s, "[%d] TestBcast fail", rank); -} - -int main(int argc, char *argv[]) { - if (argc < 3) { - printf("Usage: \n"); - return 0; - } - int n = atoi(argv[1]); - rabit::Init(argc, argv); - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - std::string name = rabit::GetProcessorName(); - Model model; - srand(0); - int ntrial = 0; - for (int i = 1; i < argc; ++i) { - int n; - if (sscanf(argv[i], "rabit_num_trial=%d", &n) == 1) ntrial = n; - } - int iter = rabit::LoadCheckPoint(&model); - if (iter == 0) { - model.InitModel(n); - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } else { - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } - for (int r = iter; r < 3; ++r) { - TestMax(&model, ntrial, r); - printf("[%d] !!!TestMax pass, iter=%d\n", rank, r); - int step = std::max(nproc / 3, 1); - for (int i = 0; i < nproc; i += step) { - TestBcast(n, i, ntrial, r); - } - printf("[%d] !!!TestBcast pass, iter=%d\n", rank, r); - TestSum(&model, ntrial, r); - printf("[%d] !!!TestSum pass, iter=%d\n", rank, r); - rabit::LazyCheckPoint(&model); - printf("[%d] !!!CheckPont pass, iter=%d\n", rank, r); - } - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/test/local_recover.cc b/subtree/rabit/test/local_recover.cc deleted file mode 100644 index 5162d5a2d..000000000 --- a/subtree/rabit/test/local_recover.cc +++ /dev/null @@ -1,138 +0,0 @@ -// this is a test case to test whether rabit can recover model when -// facing an exception -#include -#include -#include -#include -#include - -using namespace rabit; - -// dummy model -class Model : public rabit::Serializable { - public: - // iterations - std::vector data; - // load from stream - virtual void Load(rabit::Stream *fi) { - fi->Read(&data); - } - /*! \brief save the model to the stream */ - virtual void Save(rabit::Stream *fo) const { - fo->Write(data); - } - virtual void InitModel(size_t n, float v) { - data.clear(); - data.resize(n, v); - } -}; - -inline void TestMax(Model *model, Model *local, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = iter + 111; - std::vector ndata(model->data.size()); - rabit::Allreduce(&ndata[0], ndata.size(), - [&]() { - // use lambda expression to prepare the data - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + local->data[i]; - } - }); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rmax = (i * 1) % z + model->data[i]; - for (int r = 0; r < nproc; ++r) { - rmax = std::max(rmax, (float)((i * (r+1)) % z) + model->data[i] + r); - } - utils::Check(rmax == ndata[i], "[%d] TestMax check failure", rank); - } - model->data = ndata; - local->data = ndata; - for (size_t i = 0; i < ndata.size(); ++i) { - local->data[i] = ndata[i] + rank; - } -} - -inline void TestSum(Model *model, Model *local, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = 131 + iter; - - std::vector ndata(model->data.size()); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + local->data[i]; - } - Allreduce(&ndata[0], ndata.size()); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rsum = 0.0f; - for (int r = 0; r < nproc; ++r) { - rsum += (float)((i * (r+1)) % z) + model->data[i] + r; - } - utils::Check(fabsf(rsum - ndata[i]) < 1e-5 , - "[%d] TestSum check failure, local=%g, allreduce=%g", rank, rsum, ndata[i]); - } - model->data = ndata; - for (size_t i = 0; i < ndata.size(); ++i) { - local->data[i] = ndata[i] + rank; - } -} - -inline void TestBcast(size_t n, int root, int ntrial, int iter) { - int rank = rabit::GetRank(); - std::string s; s.resize(n); - for (size_t i = 0; i < n; ++i) { - s[i] = char(i % 126 + 1); - } - std::string res; - if (root == rank) { - res = s; - rabit::Broadcast(&res, root); - } else { - rabit::Broadcast(&res, root); - } - utils::Check(res == s, "[%d] TestBcast fail", rank); -} - -int main(int argc, char *argv[]) { - if (argc < 3) { - printf("Usage: \n"); - return 0; - } - int n = atoi(argv[1]); - rabit::Init(argc, argv); - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - std::string name = rabit::GetProcessorName(); - Model model, local; - srand(0); - int ntrial = 0; - for (int i = 1; i < argc; ++i) { - int n; - if (sscanf(argv[i], "repeat=%d", &n) == 1) ntrial = n; - } - int iter = rabit::LoadCheckPoint(&model, &local); - if (iter == 0) { - model.InitModel(n, 1.0f); - local.InitModel(n, 1.0f + rank); - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } else { - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } - for (int r = iter; r < 3; ++r) { - TestMax(&model, &local, ntrial, r); - printf("[%d] !!!TestMax pass, iter=%d\n", rank, r); - int step = std::max(nproc / 3, 1); - for (int i = 0; i < nproc; i += step) { - TestBcast(n, i, ntrial, r); - } - printf("[%d] !!!TestBcast pass, iter=%d\n", rank, r); - TestSum(&model, &local, ntrial, r); - printf("[%d] !!!TestSum pass, iter=%d\n", rank, r); - rabit::CheckPoint(&model, &local); - printf("[%d] !!!CheckPont pass, iter=%d\n", rank, r); - } - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/test/local_recover.py b/subtree/rabit/test/local_recover.py deleted file mode 100755 index e35bd3177..000000000 --- a/subtree/rabit/test/local_recover.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -import rabit -import numpy as np - -rabit.init(lib='mock') -rank = rabit.get_rank() -n = 10 -nround = 3 -data = np.ones(n) * rank - -version, model, local = rabit.load_checkpoint(True) -if version == 0: - model = np.zeros(n) - local = np.ones(n) -else: - print '[%d] restart from version %d' % (rank, version) - -for i in xrange(version, nround): - res = rabit.allreduce(data + model+local, rabit.SUM) - print '[%d] iter=%d: %s' % (rank, i, str(res)) - model = res - local[:] = i - rabit.checkpoint(model, local) - -rabit.finalize() diff --git a/subtree/rabit/test/model_recover.cc b/subtree/rabit/test/model_recover.cc deleted file mode 100644 index f833ef295..000000000 --- a/subtree/rabit/test/model_recover.cc +++ /dev/null @@ -1,127 +0,0 @@ -// this is a test case to test whether rabit can recover model when -// facing an exception -#include -#include -#include -#include -#include -using namespace rabit; - -// dummy model -class Model : public rabit::Serializable { - public: - // iterations - std::vector data; - // load from stream - virtual void Load(rabit::Stream *fi) { - fi->Read(&data); - } - /*! \brief save the model to the stream */ - virtual void Save(rabit::Stream *fo) const { - fo->Write(data); - } - virtual void InitModel(size_t n) { - data.clear(); - data.resize(n, 1.0f); - } -}; - -inline void TestMax(Model *model, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = iter + 111; - - std::vector ndata(model->data.size()); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + model->data[i]; - } - rabit::Allreduce(&ndata[0], ndata.size()); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rmax = (i * 1) % z + model->data[i]; - for (int r = 0; r < nproc; ++r) { - rmax = std::max(rmax, (float)((i * (r+1)) % z) + model->data[i]); - } - utils::Check(rmax == ndata[i], "[%d] TestMax check failurem i=%lu, rmax=%f, ndata=%f", rank, i, rmax, ndata[i]); - } - model->data = ndata; -} - -inline void TestSum(Model *model, int ntrial, int iter) { - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - const int z = 131 + iter; - - std::vector ndata(model->data.size()); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z + model->data[i]; - } - Allreduce(&ndata[0], ndata.size()); - - for (size_t i = 0; i < ndata.size(); ++i) { - float rsum = model->data[i] * nproc; - for (int r = 0; r < nproc; ++r) { - rsum += (float)((i * (r+1)) % z); - } - utils::Check(fabsf(rsum - ndata[i]) < 1e-5 , - "[%d] TestSum check failure, local=%g, allreduce=%g", rank, rsum, ndata[i]); - } - model->data = ndata; -} - -inline void TestBcast(size_t n, int root, int ntrial, int iter) { - int rank = rabit::GetRank(); - std::string s; s.resize(n); - for (size_t i = 0; i < n; ++i) { - s[i] = char(i % 126 + 1); - } - std::string res; - if (root == rank) { - res = s; - rabit::Broadcast(&res, root); - } else { - rabit::Broadcast(&res, root); - } - utils::Check(res == s, "[%d] TestBcast fail", rank); -} - -int main(int argc, char *argv[]) { - if (argc < 3) { - printf("Usage: \n"); - return 0; - } - int n = atoi(argv[1]); - rabit::Init(argc, argv); - int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - std::string name = rabit::GetProcessorName(); - Model model; - srand(0); - int ntrial = 0; - for (int i = 1; i < argc; ++i) { - int n; - if (sscanf(argv[i], "rabit_num_trial=%d", &n) == 1) ntrial = n; - } - int iter = rabit::LoadCheckPoint(&model); - if (iter == 0) { - model.InitModel(n); - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } else { - printf("[%d] reload-trail=%d, init iter=%d\n", rank, ntrial, iter); - } - for (int r = iter; r < 3; ++r) { - TestMax(&model, ntrial, r); - printf("[%d] !!!TestMax pass, iter=%d\n", rank, r); - int step = std::max(nproc / 3, 1); - for (int i = 0; i < nproc; i += step) { - TestBcast(n, i, ntrial, r); - } - printf("[%d] !!!TestBcast pass, iter=%d\n", rank, r); - TestSum(&model, ntrial, r); - printf("[%d] !!!TestSum pass, iter=%d\n", rank, r); - rabit::CheckPoint(&model); - printf("[%d] !!!CheckPont pass, iter=%d\n", rank, r); - } - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/test/speed_runner.py b/subtree/rabit/test/speed_runner.py deleted file mode 100644 index 1644bfe99..000000000 --- a/subtree/rabit/test/speed_runner.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import argparse -import sys - -def main(): - parser = argparse.ArgumentParser(description='TODO') - parser.add_argument('-ho', '--host_dir', required=True) - parser.add_argument('-s', '--submit_script', required=True) - parser.add_argument('-rex', '--rabit_exec', required=True) - parser.add_argument('-mpi', '--mpi_exec', required=True) - args = parser.parse_args() - - ndata = [10**4, 10**5, 10**6, 10**7] - nrepeat = [10**4, 10**3, 10**2, 10] - - machines = [2,4,8,16,31] - - executables = [args.rabit_exec, args.mpi_exec] - - for executable in executables: - sys.stderr.write('Executable %s' % executable) - sys.stderr.flush() - for i, data in enumerate(ndata): - for machine in machines: - host_file = os.path.join(args.host_dir, 'hosts%d' % machine) - cmd = 'python %s %d %s %s %d %d' % (args.submit_script, machine, host_file, executable, data, nrepeat[i]) - sys.stderr.write('data=%d, repeat=%d, machine=%d\n' % (data, nrepeat[i], machine)) - sys.stderr.flush() - os.system(cmd) - sys.stderr.write('\n') - sys.stderr.flush() - -if __name__ == "__main__": - main() diff --git a/subtree/rabit/test/speed_test.cc b/subtree/rabit/test/speed_test.cc deleted file mode 100644 index 68891bd31..000000000 --- a/subtree/rabit/test/speed_test.cc +++ /dev/null @@ -1,100 +0,0 @@ -// This program is used to test the speed of rabit API -#include -#include -#include -#include -#include -#include -#include - -using namespace rabit; - -double max_tdiff, sum_tdiff, bcast_tdiff, tot_tdiff; - -inline void TestMax(size_t n) { - int rank = rabit::GetRank(); - std::vector ndata(n); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % 111; - } - double tstart = utils::GetTime(); - rabit::Allreduce(&ndata[0], ndata.size()); - max_tdiff += utils::GetTime() - tstart; -} - -inline void TestSum(size_t n) { - int rank = rabit::GetRank(); - const int z = 131; - std::vector ndata(n); - for (size_t i = 0; i < ndata.size(); ++i) { - ndata[i] = (i * (rank+1)) % z; - } - double tstart = utils::GetTime(); - rabit::Allreduce(&ndata[0], ndata.size()); - sum_tdiff += utils::GetTime() - tstart; -} - -inline void TestBcast(size_t n, int root) { - int rank = rabit::GetRank(); - std::string s; s.resize(n); - for (size_t i = 0; i < n; ++i) { - s[i] = char(i % 126 + 1); - } - std::string res; - res.resize(n); - if (root == rank) { - res = s; - } - double tstart = utils::GetTime(); - rabit::Broadcast(&res[0], res.length(), root); - bcast_tdiff += utils::GetTime() - tstart; -} - -inline void PrintStats(const char *name, double tdiff, int n, int nrep, size_t size) { - int nproc = rabit::GetWorldSize(); - double tsum = tdiff; - rabit::Allreduce(&tsum, 1); - double tavg = tsum / nproc; - double tsqr = tdiff - tavg; - tsqr *= tsqr; - rabit::Allreduce(&tsqr, 1); - double tstd = sqrt(tsqr / nproc); - if (rabit::GetRank() == 0) { - rabit::TrackerPrintf("%s: mean=%g, std=%g sec\n", name, tavg, tstd); - double ndata = n; - ndata *= nrep * size; - if (n != 0) { - rabit::TrackerPrintf("%s-speed: %g MB/sec\n", name, (ndata / tavg) / 1024 / 1024 ); - } - } -} - -int main(int argc, char *argv[]) { - if (argc < 3) { - printf("Usage: \n"); - return 0; - } - srand(0); - int n = atoi(argv[1]); - int nrep = atoi(argv[2]); - utils::Check(nrep >= 1, "need to at least repeat running once"); - rabit::Init(argc, argv); - //int rank = rabit::GetRank(); - int nproc = rabit::GetWorldSize(); - std::string name = rabit::GetProcessorName(); - max_tdiff = sum_tdiff = bcast_tdiff = 0; - double tstart = utils::GetTime(); - for (int i = 0; i < nrep; ++i) { - TestMax(n); - TestSum(n); - TestBcast(n, rand() % nproc); - } - tot_tdiff = utils::GetTime() - tstart; - // use allreduce to get the sum and std of time - PrintStats("max_tdiff", max_tdiff, n, nrep, sizeof(float)); - PrintStats("sum_tdiff", sum_tdiff, n, nrep, sizeof(float)); - PrintStats("bcast_tdiff", bcast_tdiff, n, nrep, sizeof(char)); - PrintStats("tot_tdiff", tot_tdiff, 0, nrep, sizeof(float)); - rabit::Finalize(); - return 0; -} diff --git a/subtree/rabit/test/test.mk b/subtree/rabit/test/test.mk deleted file mode 100644 index 282a82bc4..000000000 --- a/subtree/rabit/test/test.mk +++ /dev/null @@ -1,29 +0,0 @@ -# this is a makefile used to show testcases of rabit -.PHONY: all - -all: model_recover_10_10k model_recover_10_10k_die_same - -# this experiment test recovery with actually process exit, use keepalive to keep program alive -model_recover_10_10k: - ../tracker/rabit_demo.py -n 10 model_recover 10000 mock=0,0,1,0 mock=1,1,1,0 - -model_recover_10_10k_die_same: - ../tracker/rabit_demo.py -n 10 model_recover 10000 mock=0,0,1,0 mock=1,1,1,0 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 - -model_recover_10_10k_die_hard: - ../tracker/rabit_demo.py -n 10 model_recover 10000 mock=0,0,1,0 mock=1,1,1,0 mock=1,1,1,1 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 mock=8,1,2,0 mock=4,1,3,0 - -local_recover_10_10k: - ../tracker/rabit_demo.py -n 10 local_recover 10000 mock=0,0,1,0 mock=1,1,1,0 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 mock=1,1,1,1 - -pylocal_recover_10_10k: - ../tracker/rabit_demo.py -n 10 ./local_recover.py 10000 mock=0,0,1,0 mock=1,1,1,0 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 mock=1,1,1,1 - -lazy_recover_10_10k_die_hard: - ../tracker/rabit_demo.py -n 10 lazy_recover 10000 mock=0,0,1,0 mock=1,1,1,0 mock=1,1,1,1 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 mock=8,1,2,0 mock=4,1,3,0 - -lazy_recover_10_10k_die_same: - ../tracker/rabit_demo.py -n 10 lazy_recover 10000 mock=0,0,1,0 mock=1,1,1,0 mock=0,1,1,0 mock=4,1,1,0 mock=9,1,1,0 - -ringallreduce_10_10k: - ../tracker/rabit_demo.py -v 1 -n 10 model_recover 100 rabit_reduce_ring_mincount=10 diff --git a/subtree/rabit/tracker/README.md b/subtree/rabit/tracker/README.md deleted file mode 100644 index 23d14b079..000000000 --- a/subtree/rabit/tracker/README.md +++ /dev/null @@ -1,12 +0,0 @@ -Trackers -===== -This folder contains tracker scripts that can be used to submit yarn jobs to different platforms, -the example guidelines are in the script themselfs - -***Supported Platforms*** -* Local demo: [rabit_demo.py](rabit_demo.py) -* MPI: [rabit_mpi.py](rabit_mpi.py) -* Yarn (Hadoop): [rabit_yarn.py](rabit_yarn.py) - - It is also possible to submit via hadoop streaming with rabit_hadoop_streaming.py - - However, it is higly recommended to use rabit_yarn.py because this will allocate resources more precisely and fits machine learning scenarios -* Sun Grid engine: [rabit_sge.py](rabit_sge.py) diff --git a/subtree/rabit/tracker/rabit_demo.py b/subtree/rabit/tracker/rabit_demo.py deleted file mode 100755 index 6008e0efc..000000000 --- a/subtree/rabit/tracker/rabit_demo.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -""" -This is the demo submission script of rabit for submitting jobs in local machine -""" -import argparse -import sys -import os -import subprocess -from threading import Thread -import rabit_tracker as tracker -if os.name == 'nt': - WRAPPER_PATH = os.path.dirname(__file__) + '\\..\\wrapper' -else: - WRAPPER_PATH = os.path.dirname(__file__) + '/../wrapper' - -parser = argparse.ArgumentParser(description='Rabit script to submit rabit job locally using python subprocess') -parser.add_argument('-n', '--nworker', required=True, type=int, - help = 'number of worker proccess to be launched') -parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int, - help = 'print more messages into the console') -parser.add_argument('command', nargs='+', - help = 'command for rabit program') -args = parser.parse_args() - -# bash script for keepalive -# use it so that python do not need to communicate with subprocess -echo="echo %s rabit_num_trial=$nrep;" -keepalive = """ -nrep=0 -rc=254 -while [ $rc -eq 254 ]; -do - export rabit_num_trial=$nrep - %s - %s - rc=$?; - nrep=$((nrep+1)); -done -""" - -def exec_cmd(cmd, taskid, worker_env): - if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt': - cmd[0] = './' + cmd[0] - cmd = ' '.join(cmd) - env = os.environ.copy() - for k, v in worker_env.items(): - env[k] = str(v) - env['rabit_task_id'] = str(taskid) - env['PYTHONPATH'] = WRAPPER_PATH - - ntrial = 0 - while True: - if os.name == 'nt': - env['rabit_num_trial'] = str(ntrial) - ret = subprocess.call(cmd, shell=True, env = env) - if ret == 254: - ntrial += 1 - continue - else: - if args.verbose != 0: - bash = keepalive % (echo % cmd, cmd) - else: - bash = keepalive % ('', cmd) - ret = subprocess.call(bash, shell=True, executable='bash', env = env) - if ret == 0: - if args.verbose != 0: - print 'Thread %d exit with 0' % taskid - return - else: - if os.name == 'nt': - os.exit(-1) - else: - raise Exception('Get nonzero return code=%d' % ret) -# -# Note: this submit script is only used for demo purpose -# submission script using pyhton multi-threading -# -def mthread_submit(nslave, worker_args, worker_envs): - """ - customized submit script, that submit nslave jobs, each must contain args as parameter - note this can be a lambda function containing additional parameters in input - Parameters - nslave number of slave process to start up - args arguments to launch each job - this usually includes the parameters of master_uri and parameters passed into submit - """ - procs = {} - for i in range(nslave): - procs[i] = Thread(target = exec_cmd, args = (args.command + worker_args, i, worker_envs)) - procs[i].daemon = True - procs[i].start() - for i in range(nslave): - procs[i].join() - -# call submit, with nslave, the commands to run each job and submit function -tracker.submit(args.nworker, [], fun_submit = mthread_submit, verbose = args.verbose) diff --git a/subtree/rabit/tracker/rabit_hadoop_streaming.py b/subtree/rabit/tracker/rabit_hadoop_streaming.py deleted file mode 100755 index 22b534d79..000000000 --- a/subtree/rabit/tracker/rabit_hadoop_streaming.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python -""" -Deprecated - -This is a script to submit rabit job using hadoop streaming. -It will submit the rabit process as mappers of MapReduce. - -This script is deprecated, it is highly recommended to use rabit_yarn.py instead -""" -import argparse -import sys -import os -import time -import subprocess -import warnings -import rabit_tracker as tracker - -WRAPPER_PATH = os.path.dirname(__file__) + '/../wrapper' - -#!!! Set path to hadoop and hadoop streaming jar here -hadoop_binary = 'hadoop' -hadoop_streaming_jar = None - -# code -hadoop_home = os.getenv('HADOOP_HOME') -if hadoop_home != None: - if hadoop_binary == None: - hadoop_binary = hadoop_home + '/bin/hadoop' - assert os.path.exists(hadoop_binary), "HADOOP_HOME does not contain the hadoop binary" - if hadoop_streaming_jar == None: - hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.jar' - assert os.path.exists(hadoop_streaming_jar), "HADOOP_HOME does not contain the hadoop streaming jar" - -if hadoop_binary == None or hadoop_streaming_jar == None: - warnings.warn('Warning: Cannot auto-detect path to hadoop or hadoop-streaming jar\n'\ - '\tneed to set them via arguments -hs and -hb\n'\ - '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME'\ - ', or modify rabit_hadoop.py line 16', stacklevel = 2) - -parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming.'\ - 'It is Highly recommended to use rabit_yarn.py instead') -parser.add_argument('-n', '--nworker', required=True, type=int, - help = 'number of worker proccess to be launched') -parser.add_argument('-hip', '--host_ip', default='auto', type=str, - help = 'host IP address if cannot be automatically guessed, specify the IP of submission machine') -parser.add_argument('-i', '--input', required=True, - help = 'input path in HDFS') -parser.add_argument('-o', '--output', required=True, - help = 'output path in HDFS') -parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int, - help = 'print more messages into the console') -parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type=int, - help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default') -parser.add_argument('-f', '--files', default = [], action='append', - help = 'the cached file list in mapreduce,'\ - ' the submission script will automatically cache all the files which appears in command'\ - ' This will also cause rewritten of all the file names in the command to current path,'\ - ' for example `../../kmeans ../kmeans.conf` will be rewritten to `./kmeans kmeans.conf`'\ - ' because the two files are cached to running folder.'\ - ' You may need this option to cache additional files.'\ - ' You can also use it to manually cache files when auto_file_cache is off') -parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker') -parser.add_argument('--timeout', default=600000000, type=int, - help = 'timeout (in million seconds) of each mapper job, automatically set to a very long time,'\ - 'normally you do not need to set this ') -parser.add_argument('--vcores', default = -1, type=int, - help = 'number of vcpores to request in each mapper, set it if each rabit job is multi-threaded') -parser.add_argument('-mem', '--memory_mb', default=-1, type=int, - help = 'maximum memory used by the process. Guide: set it large (near mapred.cluster.max.map.memory.mb)'\ - 'if you are running multi-threading rabit,'\ - 'so that each node can occupy all the mapper slots in a machine for maximum performance') -if hadoop_binary == None: - parser.add_argument('-hb', '--hadoop_binary', required = True, - help="path to hadoop binary file") -else: - parser.add_argument('-hb', '--hadoop_binary', default = hadoop_binary, - help="path to hadoop binary file") - -if hadoop_streaming_jar == None: - parser.add_argument('-hs', '--hadoop_streaming_jar', required = True, - help='path to hadoop streamimg jar file') -else: - parser.add_argument('-hs', '--hadoop_streaming_jar', default = hadoop_streaming_jar, - help='path to hadoop streamimg jar file') -parser.add_argument('command', nargs='+', - help = 'command for rabit program') -args = parser.parse_args() - -if args.jobname == 'auto': - args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1]; - -# detech hadoop version -(out, err) = subprocess.Popen('%s version' % args.hadoop_binary, shell = True, stdout=subprocess.PIPE).communicate() -out = out.split('\n')[0].split() -assert out[0] == 'Hadoop', 'cannot parse hadoop version string' -hadoop_version = out[1].split('.') -use_yarn = int(hadoop_version[0]) >= 2 -if use_yarn: - warnings.warn('It is highly recommended to use rabit_yarn.py to submit jobs to yarn instead', stacklevel = 2) - -print 'Current Hadoop Version is %s' % out[1] - -def hadoop_streaming(nworker, worker_args, worker_envs, use_yarn): - worker_envs['CLASSPATH'] = '`$HADOOP_HOME/bin/hadoop classpath --glob` ' - worker_envs['LD_LIBRARY_PATH'] = '{LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server' - fset = set() - if args.auto_file_cache: - for i in range(len(args.command)): - f = args.command[i] - if os.path.exists(f): - fset.add(f) - if i == 0: - args.command[i] = './' + args.command[i].split('/')[-1] - else: - args.command[i] = args.command[i].split('/')[-1] - if args.command[0].endswith('.py'): - flst = [WRAPPER_PATH + '/rabit.py', - WRAPPER_PATH + '/librabit_wrapper.so', - WRAPPER_PATH + '/librabit_wrapper_mock.so'] - for f in flst: - if os.path.exists(f): - fset.add(f) - kmap = {} - kmap['env'] = 'mapred.child.env' - # setup keymaps - if use_yarn: - kmap['nworker'] = 'mapreduce.job.maps' - kmap['jobname'] = 'mapreduce.job.name' - kmap['nthread'] = 'mapreduce.map.cpu.vcores' - kmap['timeout'] = 'mapreduce.task.timeout' - kmap['memory_mb'] = 'mapreduce.map.memory.mb' - else: - kmap['nworker'] = 'mapred.map.tasks' - kmap['jobname'] = 'mapred.job.name' - kmap['nthread'] = None - kmap['timeout'] = 'mapred.task.timeout' - kmap['memory_mb'] = 'mapred.job.map.memory.mb' - cmd = '%s jar %s' % (args.hadoop_binary, args.hadoop_streaming_jar) - cmd += ' -D%s=%d' % (kmap['nworker'], nworker) - cmd += ' -D%s=%s' % (kmap['jobname'], args.jobname) - envstr = ','.join('%s=%s' % (k, str(v)) for k, v in worker_envs.items()) - cmd += ' -D%s=\"%s\"' % (kmap['env'], envstr) - if args.vcores != -1: - if kmap['nthread'] is None: - warnings.warn('nthread can only be set in Yarn(Hadoop version greater than 2.0),'\ - 'it is recommended to use Yarn to submit rabit jobs', stacklevel = 2) - else: - cmd += ' -D%s=%d' % (kmap['nthread'], args.vcores) - cmd += ' -D%s=%d' % (kmap['timeout'], args.timeout) - if args.memory_mb != -1: - cmd += ' -D%s=%d' % (kmap['timeout'], args.timeout) - - cmd += ' -input %s -output %s' % (args.input, args.output) - cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args)) - if args.files != None: - for flst in args.files: - for f in flst.split('#'): - fset.add(f) - for f in fset: - cmd += ' -file %s' % f - print cmd - subprocess.check_call(cmd, shell = True) - -fun_submit = lambda nworker, worker_args, worker_envs: hadoop_streaming(nworker, worker_args, worker_envs, int(hadoop_version[0]) >= 2) -tracker.submit(args.nworker, [], fun_submit = fun_submit, verbose = args.verbose, hostIP = args.host_ip) diff --git a/subtree/rabit/tracker/rabit_mpi.py b/subtree/rabit/tracker/rabit_mpi.py deleted file mode 100755 index f62696050..000000000 --- a/subtree/rabit/tracker/rabit_mpi.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -""" -Submission script to submit rabit jobs using MPI -""" -import argparse -import sys -import os -import subprocess -import rabit_tracker as tracker - -parser = argparse.ArgumentParser(description='Rabit script to submit rabit job using MPI') -parser.add_argument('-n', '--nworker', required=True, type=int, - help = 'number of worker proccess to be launched') -parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int, - help = 'print more messages into the console') -parser.add_argument('-H', '--hostfile', type=str, - help = 'the hostfile of mpi server') -parser.add_argument('command', nargs='+', - help = 'command for rabit program') -args = parser.parse_args() -# -# submission script using MPI -# -def mpi_submit(nslave, worker_args, worker_envs): - """ - customized submit script, that submit nslave jobs, each must contain args as parameter - note this can be a lambda function containing additional parameters in input - Parameters - nslave number of slave process to start up - args arguments to launch each job - this usually includes the parameters of master_uri and parameters passed into submit - """ - worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()] - sargs = ' '.join(args.command + worker_args) - if args.hostfile is None: - cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args) - else: - cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args) - print cmd - subprocess.check_call(cmd, shell = True) - -# call submit, with nslave, the commands to run each job and submit function -tracker.submit(args.nworker, [], fun_submit = mpi_submit, verbose = args.verbose) diff --git a/subtree/rabit/tracker/rabit_sge.py b/subtree/rabit/tracker/rabit_sge.py deleted file mode 100755 index 3026a4fcb..000000000 --- a/subtree/rabit/tracker/rabit_sge.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -""" -Submit rabit jobs to Sun Grid Engine -""" -import argparse -import sys -import os -import subprocess -import rabit_tracker as tracker - -parser = argparse.ArgumentParser(description='Rabit script to submit rabit job using MPI') -parser.add_argument('-n', '--nworker', required=True, type=int, - help = 'number of worker proccess to be launched') -parser.add_argument('-q', '--queue', default='default', type=str, - help = 'the queue we want to submit the job to') -parser.add_argument('-hip', '--host_ip', default='auto', type=str, - help = 'host IP address if cannot be automatically guessed, specify the IP of submission machine') -parser.add_argument('--vcores', default = 1, type=int, - help = 'number of vcpores to request in each mapper, set it if each rabit job is multi-threaded') -parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker') -parser.add_argument('--logdir', default='auto', help = 'customize the directory to place the logs') -parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int, - help = 'print more messages into the console') -parser.add_argument('command', nargs='+', - help = 'command for rabit program') -args = parser.parse_args() - -if args.jobname == 'auto': - args.jobname = ('rabit%d.' % args.nworker) + args.command[0].split('/')[-1]; -if args.logdir == 'auto': - args.logdir = args.jobname + '.log' - -if os.path.exists(args.logdir): - if not os.path.isdir(args.logdir): - raise RuntimeError('specified logdir %s is a file instead of directory' % args.logdir) -else: - os.mkdir(args.logdir) - -runscript = '%s/runrabit.sh' % args.logdir -fo = open(runscript, 'w') -fo.write('source ~/.bashrc\n') -fo.write('\"$@\"\n') -fo.close() -# -# submission script using MPI -# -def sge_submit(nslave, worker_args, worker_envs): - """ - customized submit script, that submit nslave jobs, each must contain args as parameter - note this can be a lambda function containing additional parameters in input - Parameters - nslave number of slave process to start up - args arguments to launch each job - this usually includes the parameters of master_uri and parameters passed into submit - """ - env_arg = ','.join(['%s=\"%s\"' % (k, str(v)) for k, v in worker_envs.items()]) - cmd = 'qsub -cwd -t 1-%d -S /bin/bash' % nslave - if args.queue != 'default': - cmd += '-q %s' % args.queue - cmd += ' -N %s ' % args.jobname - cmd += ' -e %s -o %s' % (args.logdir, args.logdir) - cmd += ' -pe orte %d' % (args.vcores) - cmd += ' -v %s,PATH=${PATH}:.' % env_arg - cmd += ' %s %s' % (runscript, ' '.join(args.command + worker_args)) - print cmd - subprocess.check_call(cmd, shell = True) - print 'Waiting for the jobs to get up...' - -# call submit, with nslave, the commands to run each job and submit function -tracker.submit(args.nworker, [], fun_submit = sge_submit, verbose = args.verbose) diff --git a/subtree/rabit/tracker/rabit_tracker.py b/subtree/rabit/tracker/rabit_tracker.py deleted file mode 100644 index d8e6ae84d..000000000 --- a/subtree/rabit/tracker/rabit_tracker.py +++ /dev/null @@ -1,317 +0,0 @@ -""" -Tracker script for rabit -Implements the tracker control protocol - - start rabit jobs - - help nodes to establish links with each other - -Tianqi Chen -""" - -import sys -import os -import socket -import struct -import subprocess -import random -import time -from threading import Thread - -""" -Extension of socket to handle recv and send of special data -""" -class ExSocket: - def __init__(self, sock): - self.sock = sock - def recvall(self, nbytes): - res = [] - sock = self.sock - nread = 0 - while nread < nbytes: - chunk = self.sock.recv(min(nbytes - nread, 1024)) - nread += len(chunk) - res.append(chunk) - return ''.join(res) - def recvint(self): - return struct.unpack('@i', self.recvall(4))[0] - def sendint(self, n): - self.sock.sendall(struct.pack('@i', n)) - def sendstr(self, s): - self.sendint(len(s)) - self.sock.sendall(s) - def recvstr(self): - slen = self.recvint() - return self.recvall(slen) - -# magic number used to verify existence of data -kMagic = 0xff99 - -class SlaveEntry: - def __init__(self, sock, s_addr): - slave = ExSocket(sock) - self.sock = slave - self.host = socket.gethostbyname(s_addr[0]) - magic = slave.recvint() - assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host) - slave.sendint(kMagic) - self.rank = slave.recvint() - self.world_size = slave.recvint() - self.jobid = slave.recvstr() - self.cmd = slave.recvstr() - - def decide_rank(self, job_map): - if self.rank >= 0: - return self.rank - if self.jobid != 'NULL' and self.jobid in job_map: - return job_map[self.jobid] - return -1 - - def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map): - self.rank = rank - nnset = set(tree_map[rank]) - rprev, rnext = ring_map[rank] - self.sock.sendint(rank) - # send parent rank - self.sock.sendint(parent_map[rank]) - # send world size - self.sock.sendint(len(tree_map)) - self.sock.sendint(len(nnset)) - # send the rprev and next link - for r in nnset: - self.sock.sendint(r) - # send prev link - if rprev != -1 and rprev != rank: - nnset.add(rprev) - self.sock.sendint(rprev) - else: - self.sock.sendint(-1) - # send next link - if rnext != -1 and rnext != rank: - nnset.add(rnext) - self.sock.sendint(rnext) - else: - self.sock.sendint(-1) - while True: - ngood = self.sock.recvint() - goodset = set([]) - for i in xrange(ngood): - goodset.add(self.sock.recvint()) - assert goodset.issubset(nnset) - badset = nnset - goodset - conset = [] - for r in badset: - if r in wait_conn: - conset.append(r) - self.sock.sendint(len(conset)) - self.sock.sendint(len(badset) - len(conset)) - for r in conset: - self.sock.sendstr(wait_conn[r].host) - self.sock.sendint(wait_conn[r].port) - self.sock.sendint(r) - nerr = self.sock.recvint() - if nerr != 0: - continue - self.port = self.sock.recvint() - rmset = [] - # all connection was successuly setup - for r in conset: - wait_conn[r].wait_accept -= 1 - if wait_conn[r].wait_accept == 0: - rmset.append(r) - for r in rmset: - wait_conn.pop(r, None) - self.wait_accept = len(badset) - len(conset) - return rmset - -class Tracker: - def __init__(self, port = 9091, port_end = 9999, verbose = True, hostIP = 'auto'): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - for port in range(port, port_end): - try: - sock.bind(('', port)) - self.port = port - break - except socket.error: - continue - sock.listen(128) - self.sock = sock - self.verbose = verbose - if hostIP == 'auto': - hostIP = 'ip' - self.hostIP = hostIP - self.log_print('start listen on %s:%d' % (socket.gethostname(), self.port), 1) - def __del__(self): - self.sock.close() - def slave_envs(self): - """ - get enviroment variables for slaves - can be passed in as args or envs - """ - if self.hostIP == 'dns': - host = socket.gethostname() - elif self.hostIP == 'ip': - host = socket.gethostbyname(socket.getfqdn()) - else: - host = self.hostIP - return {'rabit_tracker_uri': host, - 'rabit_tracker_port': self.port} - def get_neighbor(self, rank, nslave): - rank = rank + 1 - ret = [] - if rank > 1: - ret.append(rank / 2 - 1) - if rank * 2 - 1 < nslave: - ret.append(rank * 2 - 1) - if rank * 2 < nslave: - ret.append(rank * 2) - return ret - def get_tree(self, nslave): - tree_map = {} - parent_map = {} - for r in range(nslave): - tree_map[r] = self.get_neighbor(r, nslave) - parent_map[r] = (r + 1) / 2 - 1 - return tree_map, parent_map - def find_share_ring(self, tree_map, parent_map, r): - """ - get a ring structure that tends to share nodes with the tree - return a list starting from r - """ - nset = set(tree_map[r]) - cset = nset - set([parent_map[r]]) - if len(cset) == 0: - return [r] - rlst = [r] - cnt = 0 - for v in cset: - vlst = self.find_share_ring(tree_map, parent_map, v) - cnt += 1 - if cnt == len(cset): - vlst.reverse() - rlst += vlst - return rlst - - def get_ring(self, tree_map, parent_map): - """ - get a ring connection used to recover local data - """ - assert parent_map[0] == -1 - rlst = self.find_share_ring(tree_map, parent_map, 0) - assert len(rlst) == len(tree_map) - ring_map = {} - nslave = len(tree_map) - for r in range(nslave): - rprev = (r + nslave - 1) % nslave - rnext = (r + 1) % nslave - ring_map[rlst[r]] = (rlst[rprev], rlst[rnext]) - return ring_map - - def get_link_map(self, nslave): - """ - get the link map, this is a bit hacky, call for better algorithm - to place similar nodes together - """ - tree_map, parent_map = self.get_tree(nslave) - ring_map = self.get_ring(tree_map, parent_map) - rmap = {0 : 0} - k = 0 - for i in range(nslave - 1): - k = ring_map[k][1] - rmap[k] = i + 1 - - ring_map_ = {} - tree_map_ = {} - parent_map_ ={} - for k, v in ring_map.items(): - ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]]) - for k, v in tree_map.items(): - tree_map_[rmap[k]] = [rmap[x] for x in v] - for k, v in parent_map.items(): - if k != 0: - parent_map_[rmap[k]] = rmap[v] - else: - parent_map_[rmap[k]] = -1 - return tree_map_, parent_map_, ring_map_ - - def handle_print(self,slave, msg): - sys.stdout.write(msg) - - def log_print(self, msg, level): - if level == 1: - if self.verbose: - sys.stderr.write(msg + '\n') - else: - sys.stderr.write(msg + '\n') - - def accept_slaves(self, nslave): - # set of nodes that finishs the job - shutdown = {} - # set of nodes that is waiting for connections - wait_conn = {} - # maps job id to rank - job_map = {} - # list of workers that is pending to be assigned rank - pending = [] - # lazy initialize tree_map - tree_map = None - - while len(shutdown) != nslave: - fd, s_addr = self.sock.accept() - s = SlaveEntry(fd, s_addr) - if s.cmd == 'print': - msg = s.sock.recvstr() - self.handle_print(s, msg) - continue - if s.cmd == 'shutdown': - assert s.rank >= 0 and s.rank not in shutdown - assert s.rank not in wait_conn - shutdown[s.rank] = s - self.log_print('Recieve %s signal from %d' % (s.cmd, s.rank), 1) - continue - assert s.cmd == 'start' or s.cmd == 'recover' - # lazily initialize the slaves - if tree_map == None: - assert s.cmd == 'start' - if s.world_size > 0: - nslave = s.world_size - tree_map, parent_map, ring_map = self.get_link_map(nslave) - # set of nodes that is pending for getting up - todo_nodes = range(nslave) - else: - assert s.world_size == -1 or s.world_size == nslave - if s.cmd == 'recover': - assert s.rank >= 0 - - rank = s.decide_rank(job_map) - # batch assignment of ranks - if rank == -1: - assert len(todo_nodes) != 0 - pending.append(s) - if len(pending) == len(todo_nodes): - pending.sort(key = lambda x : x.host) - for s in pending: - rank = todo_nodes.pop(0) - if s.jobid != 'NULL': - job_map[s.jobid] = rank - s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map) - if s.wait_accept > 0: - wait_conn[rank] = s - self.log_print('Recieve %s signal from %s; assign rank %d' % (s.cmd, s.host, s.rank), 1) - if len(todo_nodes) == 0: - self.log_print('@tracker All of %d nodes getting started' % nslave, 2) - self.start_time = time.time() - else: - s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map) - self.log_print('Recieve %s signal from %d' % (s.cmd, s.rank), 1) - if s.wait_accept > 0: - wait_conn[rank] = s - self.log_print('@tracker All nodes finishes job', 2) - self.end_time = time.time() - self.log_print('@tracker %s secs between node start and job finish' % str(self.end_time - self.start_time), 2) - -def submit(nslave, args, fun_submit, verbose, hostIP = 'auto'): - master = Tracker(verbose = verbose, hostIP = hostIP) - submit_thread = Thread(target = fun_submit, args = (nslave, args, master.slave_envs())) - submit_thread.daemon = True - submit_thread.start() - master.accept_slaves(nslave) - submit_thread.join() diff --git a/subtree/rabit/tracker/rabit_yarn.py b/subtree/rabit/tracker/rabit_yarn.py deleted file mode 100755 index 56b9d1e71..000000000 --- a/subtree/rabit/tracker/rabit_yarn.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python -""" -This is a script to submit rabit job via Yarn -rabit will run as a Yarn application -""" -import argparse -import sys -import os -import time -import subprocess -import warnings -import rabit_tracker as tracker - -WRAPPER_PATH = os.path.dirname(__file__) + '/../wrapper' -YARN_JAR_PATH = os.path.dirname(__file__) + '/../yarn/rabit-yarn.jar' -YARN_BOOT_PY = os.path.dirname(__file__) + '/../yarn/run_hdfs_prog.py' - -if not os.path.exists(YARN_JAR_PATH): - warnings.warn("cannot find \"%s\", I will try to run build" % YARN_JAR_PATH) - cmd = 'cd %s;./build.sh' % (os.path.dirname(__file__) + '/../yarn/') - print cmd - subprocess.check_call(cmd, shell = True, env = os.environ) - assert os.path.exists(YARN_JAR_PATH), "failed to build rabit-yarn.jar, try it manually" - -hadoop_binary = None -# code -hadoop_home = os.getenv('HADOOP_HOME') - -if hadoop_home != None: - if hadoop_binary == None: - hadoop_binary = hadoop_home + '/bin/hadoop' - assert os.path.exists(hadoop_binary), "HADOOP_HOME does not contain the hadoop binary" - - -parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs to Yarn.') -parser.add_argument('-n', '--nworker', required=True, type=int, - help = 'number of worker proccess to be launched') -parser.add_argument('-hip', '--host_ip', default='auto', type=str, - help = 'host IP address if cannot be automatically guessed, specify the IP of submission machine') -parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int, - help = 'print more messages into the console') -parser.add_argument('-q', '--queue', default='default', type=str, - help = 'the queue we want to submit the job to') -parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type=int, - help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default') -parser.add_argument('-f', '--files', default = [], action='append', - help = 'the cached file list in mapreduce,'\ - ' the submission script will automatically cache all the files which appears in command'\ - ' This will also cause rewritten of all the file names in the command to current path,'\ - ' for example `../../kmeans ../kmeans.conf` will be rewritten to `./kmeans kmeans.conf`'\ - ' because the two files are cached to running folder.'\ - ' You may need this option to cache additional files.'\ - ' You can also use it to manually cache files when auto_file_cache is off') -parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker') -parser.add_argument('--tempdir', default='/tmp', help = 'temporary directory in HDFS that can be used to store intermediate results') -parser.add_argument('--vcores', default = 1, type=int, - help = 'number of vcpores to request in each mapper, set it if each rabit job is multi-threaded') -parser.add_argument('-mem', '--memory_mb', default=1024, type=int, - help = 'maximum memory used by the process. Guide: set it large (near mapred.cluster.max.map.memory.mb)'\ - 'if you are running multi-threading rabit,'\ - 'so that each node can occupy all the mapper slots in a machine for maximum performance') -parser.add_argument('--libhdfs-opts', default='-Xmx128m', type=str, - help = 'setting to be passed to libhdfs') -parser.add_argument('--name-node', default='default', type=str, - help = 'the namenode address of hdfs, libhdfs should connect to, normally leave it as default') - -parser.add_argument('command', nargs='+', - help = 'command for rabit program') -args = parser.parse_args() - -if args.jobname == 'auto': - args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1]; - -if hadoop_binary == None: - parser.add_argument('-hb', '--hadoop_binary', required = True, - help="path to hadoop binary file") -else: - parser.add_argument('-hb', '--hadoop_binary', default = hadoop_binary, - help="path to hadoop binary file") - -args = parser.parse_args() - -if args.jobname == 'auto': - args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1]; - -# detech hadoop version -(out, err) = subprocess.Popen('%s version' % args.hadoop_binary, shell = True, stdout=subprocess.PIPE).communicate() -out = out.split('\n')[0].split() -assert out[0] == 'Hadoop', 'cannot parse hadoop version string' -hadoop_version = out[1].split('.') - -(classpath, err) = subprocess.Popen('%s classpath --glob' % args.hadoop_binary, shell = True, stdout=subprocess.PIPE).communicate() - -if hadoop_version < 2: - print 'Current Hadoop Version is %s, rabit_yarn will need Yarn(Hadoop 2.0)' % out[1] - -def submit_yarn(nworker, worker_args, worker_env): - fset = set([YARN_JAR_PATH, YARN_BOOT_PY]) - if args.auto_file_cache != 0: - for i in range(len(args.command)): - f = args.command[i] - if os.path.exists(f): - fset.add(f) - if i == 0: - args.command[i] = './' + args.command[i].split('/')[-1] - else: - args.command[i] = './' + args.command[i].split('/')[-1] - if args.command[0].endswith('.py'): - flst = [WRAPPER_PATH + '/rabit.py', - WRAPPER_PATH + '/librabit_wrapper.so', - WRAPPER_PATH + '/librabit_wrapper_mock.so'] - for f in flst: - if os.path.exists(f): - fset.add(f) - - cmd = 'java -cp `%s classpath`:%s org.apache.hadoop.yarn.rabit.Client ' % (args.hadoop_binary, YARN_JAR_PATH) - env = os.environ.copy() - for k, v in worker_env.items(): - env[k] = str(v) - env['rabit_cpu_vcores'] = str(args.vcores) - env['rabit_memory_mb'] = str(args.memory_mb) - env['rabit_world_size'] = str(args.nworker) - env['rabit_hdfs_opts'] = str(args.libhdfs_opts) - env['rabit_hdfs_namenode'] = str(args.name_node) - - if args.files != None: - for flst in args.files: - for f in flst.split('#'): - fset.add(f) - for f in fset: - cmd += ' -file %s' % f - cmd += ' -jobname %s ' % args.jobname - cmd += ' -tempdir %s ' % args.tempdir - cmd += ' -queue %s ' % args.queue - cmd += (' '.join(['./run_hdfs_prog.py'] + args.command + worker_args)) - if args.verbose != 0: - print cmd - subprocess.check_call(cmd, shell = True, env = env) - -tracker.submit(args.nworker, [], fun_submit = submit_yarn, verbose = args.verbose, hostIP = args.host_ip) diff --git a/subtree/rabit/windows/.gitignore b/subtree/rabit/windows/.gitignore deleted file mode 100644 index 3bc83e45f..000000000 --- a/subtree/rabit/windows/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.suo -*.exp -*sdf -*.exe -ipch -x64 -*.filters -Release -*.user diff --git a/subtree/rabit/windows/README.md b/subtree/rabit/windows/README.md deleted file mode 100644 index 9bdeb7988..000000000 --- a/subtree/rabit/windows/README.md +++ /dev/null @@ -1,12 +0,0 @@ -The solution has been created with Visual Studio Express 2010. -Make sure to compile the Release version - -Build -==== -* Build the project ```rabit``` , this will give you ```rabit.lib``` in ```x64\Release``` - -Build Your code with rabit -==== -* Add include to the dependency path of your project -* Add ```rabit.lib``` to the linker dependency -* The project basic is an example to show you how to build rabit with basic.cc diff --git a/subtree/rabit/windows/basic/basic.vcxproj b/subtree/rabit/windows/basic/basic.vcxproj deleted file mode 100644 index 109c405ef..000000000 --- a/subtree/rabit/windows/basic/basic.vcxproj +++ /dev/null @@ -1,118 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {A6A95246-EB0A-46BA-9471-5939CB6B0006} - basic - - - - Application - true - MultiByte - - - Application - true - MultiByte - - - Application - false - true - MultiByte - - - Application - false - true - MultiByte - - - - - - - - - - - - - - - - - - - - - Level3 - Disabled - - - true - - - - - Level3 - Disabled - - - true - - - - - Level3 - MaxSpeed - true - true - - - true - true - true - - - - - Level3 - MaxSpeed - true - true - ..\..\include - MultiThreaded - - - true - true - true - $(OutDir)\rabit.lib;%(AdditionalDependencies) - - - - - - - - - \ No newline at end of file diff --git a/subtree/rabit/windows/rabit.sln b/subtree/rabit/windows/rabit.sln deleted file mode 100644 index bf61256d6..000000000 --- a/subtree/rabit/windows/rabit.sln +++ /dev/null @@ -1,50 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 11.00 -# Visual Studio 2010 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rabit", "rabit\rabit.vcxproj", "{D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "basic", "basic\basic.vcxproj", "{A6A95246-EB0A-46BA-9471-5939CB6B0006}" - ProjectSection(ProjectDependencies) = postProject - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F} = {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rabit_wrapper", "rabit_wrapper\rabit_wrapper.vcxproj", "{2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}" - ProjectSection(ProjectDependencies) = postProject - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F} = {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F} - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Debug|Win32.ActiveCfg = Debug|Win32 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Debug|Win32.Build.0 = Debug|Win32 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Debug|x64.ActiveCfg = Debug|x64 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Debug|x64.Build.0 = Debug|x64 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Release|Win32.ActiveCfg = Release|Win32 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Release|Win32.Build.0 = Release|Win32 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Release|x64.ActiveCfg = Release|x64 - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F}.Release|x64.Build.0 = Release|x64 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Debug|Win32.ActiveCfg = Debug|Win32 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Debug|Win32.Build.0 = Debug|Win32 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Debug|x64.ActiveCfg = Debug|Win32 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Release|Win32.ActiveCfg = Release|Win32 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Release|Win32.Build.0 = Release|Win32 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Release|x64.ActiveCfg = Release|x64 - {A6A95246-EB0A-46BA-9471-5939CB6B0006}.Release|x64.Build.0 = Release|x64 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Debug|Win32.ActiveCfg = Debug|Win32 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Debug|Win32.Build.0 = Debug|Win32 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Debug|x64.ActiveCfg = Debug|Win32 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Release|Win32.ActiveCfg = Release|Win32 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Release|Win32.Build.0 = Release|Win32 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Release|x64.ActiveCfg = Release|x64 - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/subtree/rabit/windows/rabit/rabit.vcxproj b/subtree/rabit/windows/rabit/rabit.vcxproj deleted file mode 100644 index c670484d2..000000000 --- a/subtree/rabit/windows/rabit/rabit.vcxproj +++ /dev/null @@ -1,133 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {D7B77D06-4F5F-4BD7-B81E-7CC8EBBE684F} - rabit - - - - StaticLibrary - true - MultiByte - - - StaticLibrary - true - MultiByte - - - StaticLibrary - false - true - MultiByte - - - StaticLibrary - false - true - MultiByte - - - - - - - - - - - - - - - - - - - - - Level3 - Disabled - - - true - - - - - Level3 - Disabled - - - true - - - - - Level3 - MaxSpeed - true - true - MultiThreaded - - - true - true - true - - - - - Level3 - MaxSpeed - true - true - ..\..\include;%(AdditionalIncludeDirectories) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/subtree/rabit/windows/rabit_wrapper/rabit_wrapper.vcxproj b/subtree/rabit/windows/rabit_wrapper/rabit_wrapper.vcxproj deleted file mode 100644 index 73eb5abb4..000000000 --- a/subtree/rabit/windows/rabit_wrapper/rabit_wrapper.vcxproj +++ /dev/null @@ -1,121 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {2F89A7C5-CA4F-4D77-A728-6702D9F33F9F} - rabit_wrapper - - - - Application - true - MultiByte - - - Application - true - MultiByte - - - DynamicLibrary - false - true - MultiByte - - - DynamicLibrary - false - true - MultiByte - - - - - - - - - - - - - - - - - - - - - Level3 - Disabled - - - true - - - - - Level3 - Disabled - - - true - - - - - Level3 - MaxSpeed - true - true - - - true - true - true - ..\..\x64\Release\rabit.lib;%(AdditionalDependencies) - - - - - Level3 - MaxSpeed - true - true - ..\..\include - - - true - true - true - $(OutDir)\rabit.lib;%(AdditionalDependencies) - - - - - - - - - - - - \ No newline at end of file diff --git a/subtree/rabit/wrapper/rabit.py b/subtree/rabit/wrapper/rabit.py deleted file mode 100644 index 91ce3e6ae..000000000 --- a/subtree/rabit/wrapper/rabit.py +++ /dev/null @@ -1,327 +0,0 @@ -""" -Reliable Allreduce and Broadcast Library. - -Author: Tianqi Chen -""" -# pylint: disable=unused-argument,invalid-name,global-statement,dangerous-default-value, -import cPickle as pickle -import ctypes -import os -import sys -import warnings -import numpy as np - -# version information about the doc -__version__ = '1.0' - -if os.name == 'nt': - WRAPPER_PATH = os.path.dirname(__file__) + '\\..\\windows\\x64\\Release\\rabit_wrapper%s.dll' -else: - WRAPPER_PATH = os.path.dirname(__file__) + '/librabit_wrapper%s.so' - -_LIB = None - -# load in xgboost library -def _loadlib(lib='standard'): - """Load rabit library.""" - global _LIB - if _LIB != None: - warnings.warn('rabit.int call was ignored because it has'\ - ' already been initialized', level=2) - return - if lib == 'standard': - _LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '') - elif lib == 'mock': - _LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mock') - elif lib == 'mpi': - _LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mpi') - else: - raise Exception('unknown rabit lib %s, can be standard, mock, mpi' % lib) - _LIB.RabitGetRank.restype = ctypes.c_int - _LIB.RabitGetWorldSize.restype = ctypes.c_int - _LIB.RabitVersionNumber.restype = ctypes.c_int - -def _unloadlib(): - """Unload rabit library.""" - global _LIB - del _LIB - _LIB = None - -# reduction operators -MAX = 0 -MIN = 1 -SUM = 2 -BITOR = 3 - -def init(args=None, lib='standard'): - """Intialize the rabit module, call this once before using anything. - - Parameters - ---------- - args: list of str, optional - The list of arguments used to initialized the rabit - usually you need to pass in sys.argv. - Defaults to sys.argv when it is None. - lib: {'standard', 'mock', 'mpi'} - Type of library we want to load - """ - if args is None: - args = sys.argv - _loadlib(lib) - arr = (ctypes.c_char_p * len(args))() - arr[:] = args - _LIB.RabitInit(len(args), arr) - -def finalize(): - """Finalize the rabit engine. - - Call this function after you finished all jobs. - """ - _LIB.RabitFinalize() - _unloadlib() - -def get_rank(): - """Get rank of current process. - - Returns - ------- - rank : int - Rank of current process. - """ - ret = _LIB.RabitGetRank() - return ret - -def get_world_size(): - """Get total number workers. - - Returns - ------- - n : int - Total number of process. - """ - ret = _LIB.RabitGetWorldSize() - return ret - -def tracker_print(msg): - """Print message to the tracker. - - This function can be used to communicate the information of - the progress to the tracker - - Parameters - ---------- - msg : str - The message to be printed to tracker. - """ - if not isinstance(msg, str): - msg = str(msg) - _LIB.RabitTrackerPrint(ctypes.c_char_p(msg).encode('utf-8')) - -def get_processor_name(): - """Get the processor name. - - Returns - ------- - name : str - the name of processor(host) - """ - mxlen = 256 - length = ctypes.c_ulong() - buf = ctypes.create_string_buffer(mxlen) - _LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen) - return buf.value - -def broadcast(data, root): - """Broadcast object from one node to all other nodes. - - Parameters - ---------- - data : any type that can be pickled - Input data, if current rank does not equal root, this can be None - root : int - Rank of the node to broadcast data from. - - Returns - ------- - object : int - the result of broadcast. - """ - rank = get_rank() - length = ctypes.c_ulong() - if root == rank: - assert data is not None, 'need to pass in data when broadcasting' - s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL) - length.value = len(s) - # run first broadcast - _LIB.RabitBroadcast(ctypes.byref(length), - ctypes.sizeof(ctypes.c_ulong), root) - if root != rank: - dptr = (ctypes.c_char * length.value)() - # run second - _LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p), - length.value, root) - data = pickle.loads(dptr.raw) - del dptr - else: - _LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p), - length.value, root) - del s - return data - -# enumeration of dtypes -DTYPE_ENUM__ = { - np.dtype('int8') : 0, - np.dtype('uint8') : 1, - np.dtype('int32') : 2, - np.dtype('uint32') : 3, - np.dtype('int64') : 4, - np.dtype('uint64') : 5, - np.dtype('float32') : 6, - np.dtype('float64') : 7 -} - -def allreduce(data, op, prepare_fun=None): - """Perform allreduce, return the result. - - Parameters - ---------- - data: numpy array - Input data. - op: int - Reduction operators, can be MIN, MAX, SUM, BITOR - prepare_fun: function - Lazy preprocessing function, if it is not None, prepare_fun(data) - will be called by the function before performing allreduce, to intialize the data - If the result of Allreduce can be recovered directly, - then prepare_fun will NOT be called - - Returns - ------- - result : array_like - The result of allreduce, have same shape as data - - Notes - ----- - This function is not thread-safe. - """ - if not isinstance(data, np.ndarray): - raise Exception('allreduce only takes in numpy.ndarray') - buf = data.ravel() - if buf.base is data.base: - buf = buf.copy() - if buf.dtype not in DTYPE_ENUM__: - raise Exception('data type %s not supported' % str(buf.dtype)) - if prepare_fun is None: - _LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p), - buf.size, DTYPE_ENUM__[buf.dtype], - op, None, None) - else: - func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p) - def pfunc(args): - """prepare function.""" - prepare_fun(data) - _LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p), - buf.size, DTYPE_ENUM__[buf.dtype], - op, func_ptr(pfunc), None) - return buf - - -def _load_model(ptr, length): - """ - Internal function used by the module, - unpickle a model from a buffer specified by ptr, length - Arguments: - ptr: ctypes.POINTER(ctypes._char) - pointer to the memory region of buffer - length: int - the length of buffer - """ - data = (ctypes.c_char * length).from_address(ctypes.addressof(ptr.contents)) - return pickle.loads(data.raw) - -def load_checkpoint(with_local=False): - """Load latest check point. - - Parameters - ---------- - with_local: bool, optional - whether the checkpoint contains local model - - Returns - ------- - tuple : tuple - if with_local: return (version, gobal_model, local_model) - else return (version, gobal_model) - if returned version == 0, this means no model has been CheckPointed - and global_model, local_model returned will be None - """ - gptr = ctypes.POINTER(ctypes.c_char)() - global_len = ctypes.c_ulong() - if with_local: - lptr = ctypes.POINTER(ctypes.c_char)() - local_len = ctypes.c_ulong() - version = _LIB.RabitLoadCheckPoint( - ctypes.byref(gptr), - ctypes.byref(global_len), - ctypes.byref(lptr), - ctypes.byref(local_len)) - if version == 0: - return (version, None, None) - return (version, - _load_model(gptr, global_len.value), - _load_model(lptr, local_len.value)) - else: - version = _LIB.RabitLoadCheckPoint( - ctypes.byref(gptr), - ctypes.byref(global_len), - None, None) - if version == 0: - return (version, None) - return (version, - _load_model(gptr, global_len.value)) - -def checkpoint(global_model, local_model=None): - """Checkpoint the model. - - This means we finished a stage of execution. - Every time we call check point, there is a version number which will increase by one. - - Parameters - ---------- - global_model: anytype that can be pickled - globally shared model/state when calling this function, - the caller need to gauranttees that global_model is the same in all nodes - - local_model: anytype that can be pickled - Local model, that is specific to current node/rank. - This can be None when no local state is needed. - - Notes - ----- - local_model requires explicit replication of the model for fault-tolerance. - This will bring replication cost in checkpoint function. - while global_model do not need explicit replication. - It is recommended to use global_model if possible. - """ - sglobal = pickle.dumps(global_model) - if local_model is None: - _LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0) - del sglobal - else: - slocal = pickle.dumps(local_model) - _LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal)) - del slocal - del sglobal - -def version_number(): - """Returns version number of current stored model. - - This means how many calls to CheckPoint we made so far. - - Returns - ------- - version : int - Version number of currently stored model - """ - ret = _LIB.RabitVersionNumber() - return ret diff --git a/subtree/rabit/wrapper/rabit_wrapper.cc b/subtree/rabit/wrapper/rabit_wrapper.cc deleted file mode 100644 index 7025b3ffe..000000000 --- a/subtree/rabit/wrapper/rabit_wrapper.cc +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright by Contributors -// implementations in ctypes -#define _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_DEPRECATE - -#include -#include -#include "../include/rabit.h" -#include "./rabit_wrapper.h" -namespace rabit { -namespace wrapper { -// helper use to avoid BitOR operator -template -struct FHelper { - inline static void - Allreduce(DType *senrecvbuf_, - size_t count, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - rabit::Allreduce(senrecvbuf_, count, - prepare_fun, prepare_arg); - } -}; -template -struct FHelper { - inline static void - Allreduce(DType *senrecvbuf_, - size_t count, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - utils::Error("DataType does not support bitwise or operation"); - } -}; -template -inline void Allreduce_(void *sendrecvbuf_, - size_t count, - engine::mpi::DataType enum_dtype, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - using namespace engine::mpi; - switch (enum_dtype) { - case kChar: - rabit::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - case kUChar: - rabit::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - case kInt: - rabit::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - case kUInt: - rabit::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - case kLong: - rabit::Allreduce - (static_cast(sendrecvbuf_), // NOLINT(*) - count, prepare_fun, prepare_arg); - return; - case kULong: - rabit::Allreduce - (static_cast(sendrecvbuf_), // NOLINT(*) - count, prepare_fun, prepare_arg); - return; - case kFloat: - FHelper::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - case kDouble: - FHelper::Allreduce - (static_cast(sendrecvbuf_), - count, prepare_fun, prepare_arg); - return; - default: utils::Error("unknown data_type"); - } -} -inline void Allreduce(void *sendrecvbuf, - size_t count, - engine::mpi::DataType enum_dtype, - engine::mpi::OpType enum_op, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - using namespace engine::mpi; - switch (enum_op) { - case kMax: - Allreduce_ - (sendrecvbuf, - count, enum_dtype, - prepare_fun, prepare_arg); - return; - case kMin: - Allreduce_ - (sendrecvbuf, - count, enum_dtype, - prepare_fun, prepare_arg); - return; - case kSum: - Allreduce_ - (sendrecvbuf, - count, enum_dtype, - prepare_fun, prepare_arg); - return; - case kBitwiseOR: - Allreduce_ - (sendrecvbuf, - count, enum_dtype, - prepare_fun, prepare_arg); - return; - default: utils::Error("unknown enum_op"); - } -} -// temporal memory for global and local model -std::string global_buffer, local_buffer; -// wrapper for serialization -struct ReadWrapper : public Serializable { - std::string *p_str; - explicit ReadWrapper(std::string *p_str) - : p_str(p_str) {} - virtual void Load(Stream *fi) { - uint64_t sz; - utils::Assert(fi->Read(&sz, sizeof(sz)) != 0, - "Read pickle string"); - p_str->resize(sz); - if (sz != 0) { - utils::Assert(fi->Read(&(*p_str)[0], sizeof(char) * sz) != 0, - "Read pickle string"); - } - } - virtual void Save(Stream *fo) const { - utils::Error("not implemented"); - } -}; -struct WriteWrapper : public Serializable { - const char *data; - size_t length; - explicit WriteWrapper(const char *data, - size_t length) - : data(data), length(length) { - } - virtual void Load(Stream *fi) { - utils::Error("not implemented"); - } - virtual void Save(Stream *fo) const { - uint64_t sz = static_cast(length); - fo->Write(&sz, sizeof(sz)); - fo->Write(data, length * sizeof(char)); - } -}; -} // namespace wrapper -} // namespace rabit -extern "C" { - void RabitInit(int argc, char *argv[]) { - rabit::Init(argc, argv); - } - void RabitFinalize(void) { - rabit::Finalize(); - } - int RabitGetRank(void) { - return rabit::GetRank(); - } - int RabitGetWorldSize(void) { - return rabit::GetWorldSize(); - } - void RabitTrackerPrint(const char *msg) { - std::string m(msg); - rabit::TrackerPrint(m); - } - void RabitGetProcessorName(char *out_name, - rbt_ulong *out_len, - rbt_ulong max_len) { - std::string s = rabit::GetProcessorName(); - if (s.length() > max_len) { - s.resize(max_len - 1); - } - strcpy(out_name, s.c_str()); // NOLINT(*) - *out_len = static_cast(s.length()); - } - void RabitBroadcast(void *sendrecv_data, - rbt_ulong size, int root) { - rabit::Broadcast(sendrecv_data, size, root); - } - void RabitAllreduce(void *sendrecvbuf, - size_t count, - int enum_dtype, - int enum_op, - void (*prepare_fun)(void *arg), - void *prepare_arg) { - rabit::wrapper::Allreduce - (sendrecvbuf, count, - static_cast(enum_dtype), - static_cast(enum_op), - prepare_fun, prepare_arg); - } - int RabitLoadCheckPoint(char **out_global_model, - rbt_ulong *out_global_len, - char **out_local_model, - rbt_ulong *out_local_len) { - using rabit::BeginPtr; - using namespace rabit::wrapper; - ReadWrapper sg(&global_buffer); - ReadWrapper sl(&local_buffer); - int version; - if (out_local_model == NULL) { - version = rabit::LoadCheckPoint(&sg, NULL); - *out_global_model = BeginPtr(global_buffer); - *out_global_len = static_cast(global_buffer.length()); - } else { - version = rabit::LoadCheckPoint(&sg, &sl); - *out_global_model = BeginPtr(global_buffer); - *out_global_len = static_cast(global_buffer.length()); - *out_local_model = BeginPtr(local_buffer); - *out_local_len = static_cast(local_buffer.length()); - } - return version; - } - void RabitCheckPoint(const char *global_model, - rbt_ulong global_len, - const char *local_model, - rbt_ulong local_len) { - using namespace rabit::wrapper; - WriteWrapper sg(global_model, global_len); - WriteWrapper sl(local_model, local_len); - if (local_model == NULL) { - rabit::CheckPoint(&sg, NULL); - } else { - rabit::CheckPoint(&sg, &sl); - } - } - int RabitVersionNumber(void) { - return rabit::VersionNumber(); - } -} diff --git a/subtree/rabit/wrapper/rabit_wrapper.h b/subtree/rabit/wrapper/rabit_wrapper.h deleted file mode 100644 index d00a31fda..000000000 --- a/subtree/rabit/wrapper/rabit_wrapper.h +++ /dev/null @@ -1,126 +0,0 @@ -/*! - * Copyright by Contributors - * \file rabit_wrapper.h - * \author Tianqi Chen - * \brief a C style wrapper of rabit - * can be used to create wrapper of other languages - */ -#ifndef RABIT_WRAPPER_H_ -#define RABIT_WRAPPER_H_ -#ifdef _MSC_VER -#define RABIT_DLL __declspec(dllexport) -#else -#define RABIT_DLL -#endif -// manually define unsign long -typedef unsigned long rbt_ulong; // NOLINT(*) - -#ifdef __cplusplus -extern "C" { -#endif -/*! - * \brief intialize the rabit module, call this once before using anything - * \param argc number of arguments in argv - * \param argv the array of input arguments - */ - RABIT_DLL void RabitInit(int argc, char *argv[]); - /*! - * \brief finalize the rabit engine, call this function after you finished all jobs - */ - RABIT_DLL void RabitFinalize(void); - /*! \brief get rank of current process */ - RABIT_DLL int RabitGetRank(void); - /*! \brief get total number of process */ - RABIT_DLL int RabitGetWorldSize(void); - /*! - * \brief print the msg to the tracker, - * this function can be used to communicate the information of the progress to - * the user who monitors the tracker - * \param msg the message to be printed - */ - RABIT_DLL void RabitTrackerPrint(const char *msg); - /*! - * \brief get name of processor - * \param out_name hold output string - * \param out_len hold length of output string - * \param max_len maximum buffer length of input - */ - RABIT_DLL void RabitGetProcessorName(char *out_name, - rbt_ulong *out_len, - rbt_ulong max_len); - /*! - * \brief broadcast an memory region to all others from root - * - * Example: int a = 1; Broadcast(&a, sizeof(a), root); - * \param sendrecv_data the pointer to send or recive buffer, - * \param size the size of the data - * \param root the root of process - */ - RABIT_DLL void RabitBroadcast(void *sendrecv_data, - rbt_ulong size, int root); - /*! - * \brief perform in-place allreduce, on sendrecvbuf - * this function is NOT thread-safe - * - * Example Usage: the following code gives sum of the result - * vector data(10); - * ... - * Allreduce(&data[0], data.size()); - * ... - * \param sendrecvbuf buffer for both sending and recving data - * \param count number of elements to be reduced - * \param enum_dtype the enumeration of data type, see rabit::engine::mpi::DataType in engine.h of rabit include - * \param enum_op the enumeration of operation type, see rabit::engine::mpi::OpType in engine.h of rabit - * \param prepare_fun Lazy preprocessing function, if it is not NULL, prepare_fun(prepare_arg) - * will be called by the function before performing Allreduce, to intialize the data in sendrecvbuf_. - * If the result of Allreduce can be recovered directly, then prepare_func will NOT be called - * \param prepare_arg argument used to passed into the lazy preprocessing function - */ - RABIT_DLL void RabitAllreduce(void *sendrecvbuf, - size_t count, - int enum_dtype, - int enum_op, - void (*prepare_fun)(void *arg), - void *prepare_arg); - - /*! - * \brief load latest check point - * \param out_global_model hold output of serialized global_model - * \param out_global_len the output length of serialized global model - * \param out_local_model hold output of serialized local_model, can be NULL - * \param out_local_len the output length of serialized local model, can be NULL - * - * \return the version number of check point loaded - * if returned version == 0, this means no model has been CheckPointed - * nothing will be touched - */ - RABIT_DLL int RabitLoadCheckPoint(char **out_global_model, - rbt_ulong *out_global_len, - char **out_local_model, - rbt_ulong *out_local_len); - /*! - * \brief checkpoint the model, meaning we finished a stage of execution - * every time we call check point, there is a version number which will increase by one - * - * \param global_model hold content of serialized global_model - * \param global_len the content length of serialized global model - * \param local_model hold content of serialized local_model, can be NULL - * \param local_len the content length of serialized local model, can be NULL - * - * NOTE: local_model requires explicit replication of the model for fault-tolerance, which will - * bring replication cost in CheckPoint function. global_model do not need explicit replication. - * So only CheckPoint with global_model if possible - */ - RABIT_DLL void RabitCheckPoint(const char *global_model, - rbt_ulong global_len, - const char *local_model, - rbt_ulong local_len); - /*! - * \return version number of current stored model, - * which means how many calls to CheckPoint we made so far - */ - RABIT_DLL int RabitVersionNumber(void); -#ifdef __cplusplus -} // C -#endif -#endif // RABIT_WRAPPER_H_ diff --git a/subtree/rabit/yarn/.gitignore b/subtree/rabit/yarn/.gitignore deleted file mode 100644 index 1162c62ea..000000000 --- a/subtree/rabit/yarn/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -bin -.classpath -.project -*.jar diff --git a/subtree/rabit/yarn/README.md b/subtree/rabit/yarn/README.md deleted file mode 100644 index a1f924fd9..000000000 --- a/subtree/rabit/yarn/README.md +++ /dev/null @@ -1,5 +0,0 @@ -rabit-yarn -===== -* This folder contains Application code to allow rabit run on Yarn. -* You can use [../tracker/rabit_yarn.py](../tracker/rabit_yarn.py) to submit the job - - run ```./build.sh``` to build the jar, before using the script diff --git a/subtree/rabit/yarn/build.sh b/subtree/rabit/yarn/build.sh deleted file mode 100755 index 8908cafdd..000000000 --- a/subtree/rabit/yarn/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -if [ ! -d bin ]; then - mkdir bin -fi - -CPATH=`${HADOOP_HOME}/bin/hadoop classpath` -javac -cp $CPATH -d bin src/org/apache/hadoop/yarn/rabit/* -jar cf rabit-yarn.jar -C bin . diff --git a/subtree/rabit/yarn/run_hdfs_prog.py b/subtree/rabit/yarn/run_hdfs_prog.py deleted file mode 100755 index d3962bfa6..000000000 --- a/subtree/rabit/yarn/run_hdfs_prog.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -""" -this script helps setup classpath env for HDFS, before running program -that links with libhdfs -""" -import glob -import sys -import os -import subprocess - -if len(sys.argv) < 2: - print 'Usage: the command you want to run' - -hadoop_home = os.getenv('HADOOP_HOME') -hdfs_home = os.getenv('HADOOP_HDFS_HOME') -java_home = os.getenv('JAVA_HOME') -if hadoop_home is None: - hadoop_home = os.getenv('HADOOP_PREFIX') -assert hadoop_home is not None, 'need to set HADOOP_HOME' -assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME' -assert java_home is not None, 'need to set JAVA_HOME' - -(classpath, err) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home, - stdout=subprocess.PIPE, shell = True, - env = os.environ).communicate() -cpath = [] -for f in classpath.split(':'): - cpath += glob.glob(f) - -lpath = [] -lpath.append('%s/lib/native' % hdfs_home) -lpath.append('%s/jre/lib/amd64/server' % java_home) - -env = os.environ.copy() -env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(cpath)) - -# setup hdfs options -if 'rabit_hdfs_opts' in env: - env['LIBHDFS_OPTS'] = env['rabit_hdfs_opts'] -elif 'LIBHDFS_OPTS' not in env: - env['LIBHDFS_OPTS'] = '--Xmx128m' - -env['LD_LIBRARY_PATH'] = '${LD_LIBRARY_PATH}:' + (':'.join(lpath)) -ret = subprocess.call(args = sys.argv[1:], env = env) -sys.exit(ret) diff --git a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/ApplicationMaster.java b/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/ApplicationMaster.java deleted file mode 100644 index 47432aa26..000000000 --- a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/ApplicationMaster.java +++ /dev/null @@ -1,570 +0,0 @@ -package org.apache.hadoop.yarn.rabit; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Collection; -import java.util.Collections; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.api.ApplicationConstants; -import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; -import org.apache.hadoop.yarn.api.records.Container; -import org.apache.hadoop.yarn.api.records.ContainerExitStatus; -import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; -import org.apache.hadoop.yarn.api.records.ContainerState; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.api.records.LocalResource; -import org.apache.hadoop.yarn.api.records.LocalResourceType; -import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; -import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerStatus; -import org.apache.hadoop.yarn.api.records.NodeReport; -import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; -import org.apache.hadoop.yarn.client.api.async.NMClientAsync; -import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * application master for allocating resources of rabit client - * - * @author Tianqi Chen - */ -public class ApplicationMaster { - // logger - private static final Log LOG = LogFactory.getLog(ApplicationMaster.class); - // configuration - private Configuration conf = new YarnConfiguration(); - // hdfs handler - private FileSystem dfs; - - // number of cores allocated for each task - private int numVCores = 1; - // memory needed requested for the task - private int numMemoryMB = 10; - // priority of the app master - private int appPriority = 0; - // total number of tasks - private int numTasks = 1; - // maximum number of attempts to try in each task - private int maxNumAttempt = 3; - // command to launch - private String command = ""; - - // username - private String userName = ""; - // user credentials - private Credentials credentials = null; - // security tokens - private ByteBuffer securityTokens = null; - // application tracker hostname - private String appHostName = ""; - // tracker URL to do - private String appTrackerUrl = ""; - // tracker port - private int appTrackerPort = 0; - - // whether we start to abort the application, due to whatever fatal reasons - private boolean startAbort = false; - // worker resources - private Map workerResources = new java.util.HashMap(); - // record the aborting reason - private String abortDiagnosis = ""; - // resource manager - private AMRMClientAsync rmClient = null; - // node manager - private NMClientAsync nmClient = null; - - // list of tasks that pending for resources to be allocated - private final Queue pendingTasks = new java.util.LinkedList(); - // map containerId->task record of tasks that was running - private final Map runningTasks = new java.util.HashMap(); - // collection of tasks - private final Collection finishedTasks = new java.util.LinkedList(); - // collection of killed tasks - private final Collection killedTasks = new java.util.LinkedList(); - - public static void main(String[] args) throws Exception { - new ApplicationMaster().run(args); - } - - private ApplicationMaster() throws IOException { - dfs = FileSystem.get(conf); - userName = UserGroupInformation.getCurrentUser().getShortUserName(); - credentials = UserGroupInformation.getCurrentUser().getCredentials(); - DataOutputBuffer buffer = new DataOutputBuffer(); - this.credentials.writeTokenStorageToStream(buffer); - this.securityTokens = ByteBuffer.wrap(buffer.getData()); - } - /** - * get integer argument from environment variable - * - * @param name - * name of key - * @param required - * whether this is required - * @param defv - * default value - * @return the requested result - */ - private int getEnvInteger(String name, boolean required, int defv) - throws IOException { - String value = System.getenv(name); - if (value == null) { - if (required) { - throw new IOException("environment variable " + name - + " not set"); - } else { - return defv; - } - } - return Integer.valueOf(value); - } - - /** - * initialize from arguments and command lines - * - * @param args - */ - private void initArgs(String args[]) throws IOException { - LOG.info("Start AM as user=" + this.userName); - // get user name - userName = UserGroupInformation.getCurrentUser().getShortUserName(); - // cached maps - Map cacheFiles = new java.util.HashMap(); - for (int i = 0; i < args.length; ++i) { - if (args[i].equals("-file")) { - String[] arr = args[++i].split("#"); - Path path = new Path(arr[0]); - if (arr.length == 1) { - cacheFiles.put(path.getName(), path); - } else { - cacheFiles.put(arr[1], path); - } - } else { - this.command += args[i] + " "; - } - } - for (Map.Entry e : cacheFiles.entrySet()) { - LocalResource r = Records.newRecord(LocalResource.class); - FileStatus status = dfs.getFileStatus(e.getValue()); - r.setResource(ConverterUtils.getYarnUrlFromPath(e.getValue())); - r.setSize(status.getLen()); - r.setTimestamp(status.getModificationTime()); - r.setType(LocalResourceType.FILE); - r.setVisibility(LocalResourceVisibility.APPLICATION); - workerResources.put(e.getKey(), r); - } - numVCores = this.getEnvInteger("rabit_cpu_vcores", true, numVCores); - numMemoryMB = this.getEnvInteger("rabit_memory_mb", true, numMemoryMB); - numTasks = this.getEnvInteger("rabit_world_size", true, numTasks); - maxNumAttempt = this.getEnvInteger("rabit_max_attempt", false, - maxNumAttempt); - } - - /** - * called to start the application - */ - private void run(String args[]) throws Exception { - this.initArgs(args); - this.rmClient = AMRMClientAsync.createAMRMClientAsync(1000, - new RMCallbackHandler()); - this.nmClient = NMClientAsync - .createNMClientAsync(new NMCallbackHandler()); - this.rmClient.init(conf); - this.rmClient.start(); - this.nmClient.init(conf); - this.nmClient.start(); - RegisterApplicationMasterResponse response = this.rmClient - .registerApplicationMaster(this.appHostName, - this.appTrackerPort, this.appTrackerUrl); - - boolean success = false; - String diagnostics = ""; - try { - // list of tasks that waits to be submit - java.util.Collection tasks = new java.util.LinkedList(); - // add waiting tasks - for (int i = 0; i < this.numTasks; ++i) { - tasks.add(new TaskRecord(i)); - } - Resource maxResource = response.getMaximumResourceCapability(); - - if (maxResource.getMemory() < this.numMemoryMB) { - LOG.warn("[Rabit] memory requested exceed bound " - + maxResource.getMemory()); - this.numMemoryMB = maxResource.getMemory(); - } - if (maxResource.getVirtualCores() < this.numVCores) { - LOG.warn("[Rabit] memory requested exceed bound " - + maxResource.getVirtualCores()); - this.numVCores = maxResource.getVirtualCores(); - } - this.submitTasks(tasks); - LOG.info("[Rabit] ApplicationMaster started"); - while (!this.doneAllJobs()) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - } - assert (killedTasks.size() + finishedTasks.size() == numTasks); - success = finishedTasks.size() == numTasks; - LOG.info("Application completed. Stopping running containers"); - diagnostics = "Diagnostics." + ", num_tasks" + this.numTasks - + ", finished=" + this.finishedTasks.size() + ", failed=" - + this.killedTasks.size() + "\n" + this.abortDiagnosis; - nmClient.stop(); - LOG.info(diagnostics); - } catch (Exception e) { - diagnostics = e.toString(); - } - rmClient.unregisterApplicationMaster( - success ? FinalApplicationStatus.SUCCEEDED - : FinalApplicationStatus.FAILED, diagnostics, - appTrackerUrl); - if (!success) - throw new Exception("Application not successful"); - } - - /** - * check if the job finishes - * - * @return whether we finished all the jobs - */ - private synchronized boolean doneAllJobs() { - return pendingTasks.size() == 0 && runningTasks.size() == 0; - } - - /** - * submit tasks to request containers for the tasks - * - * @param tasks - * a collection of tasks we want to ask container for - */ - private synchronized void submitTasks(Collection tasks) { - for (TaskRecord r : tasks) { - Resource resource = Records.newRecord(Resource.class); - resource.setMemory(numMemoryMB); - resource.setVirtualCores(numVCores); - Priority priority = Records.newRecord(Priority.class); - priority.setPriority(this.appPriority); - r.containerRequest = new ContainerRequest(resource, null, null, - priority); - rmClient.addContainerRequest(r.containerRequest); - pendingTasks.add(r); - } - } - - /** - * launch the task on container - * - * @param container - * container to run the task - * @param task - * the task - */ - private void launchTask(Container container, TaskRecord task) { - task.container = container; - task.containerRequest = null; - ContainerLaunchContext ctx = Records - .newRecord(ContainerLaunchContext.class); - String cmd = - // use this to setup CLASSPATH correctly for libhdfs - this.command + " 1>" - + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" - + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR - + "/stderr"; - ctx.setCommands(Collections.singletonList(cmd)); - ctx.setTokens(this.securityTokens); - LOG.info(workerResources); - ctx.setLocalResources(this.workerResources); - // setup environment variables - Map env = new java.util.HashMap(); - - // setup class path, this is kind of duplicated, ignoring - StringBuilder cpath = new StringBuilder("${CLASSPATH}:./*"); - for (String c : conf.getStrings( - YarnConfiguration.YARN_APPLICATION_CLASSPATH, - YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { - String[] arrPath = c.split(":"); - for (String ps : arrPath) { - if (ps.endsWith("*.jar") || ps.endsWith("*")) { - ps = ps.substring(0, ps.lastIndexOf('*')); - String prefix = ps.substring(0, ps.lastIndexOf('/')); - if (ps.startsWith("$")) { - String[] arr =ps.split("/", 2); - if (arr.length != 2) continue; - try { - ps = System.getenv(arr[0].substring(1)) + '/' + arr[1]; - } catch (Exception e){ - continue; - } - } - File dir = new File(ps); - if (dir.isDirectory()) { - for (File f: dir.listFiles()) { - if (f.isFile() && f.getPath().endsWith(".jar")) { - cpath.append(":"); - cpath.append(prefix + '/' + f.getName()); - } - } - } - } else { - cpath.append(':'); - cpath.append(ps.trim()); - } - } - } - // already use hadoop command to get class path in worker, maybe a - // better solution in future - env.put("CLASSPATH", cpath.toString()); - //LOG.info("CLASSPATH =" + cpath.toString()); - // setup LD_LIBARY_PATH path for libhdfs - env.put("LD_LIBRARY_PATH", - "${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server"); - env.put("PYTHONPATH", "${PYTHONPATH}:."); - // inherit all rabit variables - for (Map.Entry e : System.getenv().entrySet()) { - if (e.getKey().startsWith("rabit_")) { - env.put(e.getKey(), e.getValue()); - } - if (e.getKey() == "LIBHDFS_OPTS") { - env.put(e.getKey(), e.getValue()); - } - } - env.put("rabit_task_id", String.valueOf(task.taskId)); - env.put("rabit_num_trial", String.valueOf(task.attemptCounter)); - // ctx.setUser(userName); - ctx.setEnvironment(env); - synchronized (this) { - assert (!this.runningTasks.containsKey(container.getId())); - this.runningTasks.put(container.getId(), task); - this.nmClient.startContainerAsync(container, ctx); - } - } - - /** - * free the containers that have not yet been launched - * - * @param containers - */ - private synchronized void freeUnusedContainers( - Collection containers) { - } - - /** - * handle method for AMRMClientAsync.CallbackHandler container allocation - * - * @param containers - */ - private synchronized void onContainersAllocated(List containers) { - if (this.startAbort) { - this.freeUnusedContainers(containers); - return; - } - Collection freelist = new java.util.LinkedList(); - for (Container c : containers) { - TaskRecord task; - task = pendingTasks.poll(); - if (task == null) { - freelist.add(c); - continue; - } - this.launchTask(c, task); - } - this.freeUnusedContainers(freelist); - } - - /** - * start aborting the job - * - * @param msg - * the fatal message - */ - private synchronized void abortJob(String msg) { - if (!this.startAbort) - this.abortDiagnosis = msg; - this.startAbort = true; - for (TaskRecord r : this.runningTasks.values()) { - if (!r.abortRequested) { - nmClient.stopContainerAsync(r.container.getId(), - r.container.getNodeId()); - r.abortRequested = true; - } - } - this.killedTasks.addAll(this.pendingTasks); - for (TaskRecord r : this.pendingTasks) { - rmClient.removeContainerRequest(r.containerRequest); - } - this.pendingTasks.clear(); - LOG.info(msg); - } - - /** - * handle non fatal failures - * - * @param cid - */ - private synchronized void handleFailure(Collection failed) { - Collection tasks = new java.util.LinkedList(); - for (ContainerId cid : failed) { - TaskRecord r = runningTasks.remove(cid); - if (r == null) { - continue; - } - LOG.info("Task " - + r.taskId - + "failed on " - + r.container.getId() - + ". See LOG at : " - + String.format("http://%s/node/containerlogs/%s/" - + userName, r.container.getNodeHttpAddress(), - r.container.getId())); - r.attemptCounter += 1; - r.container = null; - tasks.add(r); - if (r.attemptCounter >= this.maxNumAttempt) { - this.abortJob("[Rabit] Task " + r.taskId + " failed more than " - + r.attemptCounter + "times"); - } - } - if (this.startAbort) { - this.killedTasks.addAll(tasks); - } else { - this.submitTasks(tasks); - } - } - - /** - * handle method for AMRMClientAsync.CallbackHandler container allocation - * - * @param status - * list of status - */ - private synchronized void onContainersCompleted(List status) { - Collection failed = new java.util.LinkedList(); - for (ContainerStatus s : status) { - assert (s.getState().equals(ContainerState.COMPLETE)); - int exstatus = s.getExitStatus(); - TaskRecord r = runningTasks.get(s.getContainerId()); - if (r == null) - continue; - if (exstatus == ContainerExitStatus.SUCCESS) { - finishedTasks.add(r); - runningTasks.remove(s.getContainerId()); - } else { - try { - if (exstatus == ContainerExitStatus.class.getField( - "KILLED_EXCEEDED_PMEM").getInt(null)) { - this.abortJob("[Rabit] Task " - + r.taskId - + " killed because of exceeding allocated physical memory"); - continue; - } - if (exstatus == ContainerExitStatus.class.getField( - "KILLED_EXCEEDED_VMEM").getInt(null)) { - this.abortJob("[Rabit] Task " - + r.taskId - + " killed because of exceeding allocated virtual memory"); - continue; - } - } catch (Exception e) { - } - LOG.info("[Rabit] Task " + r.taskId + " exited with status " - + exstatus + " Diagnostics:"+ s.getDiagnostics()); - failed.add(s.getContainerId()); - } - } - this.handleFailure(failed); - } - - /** - * callback handler for resource manager - */ - private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler { - @Override - public float getProgress() { - return 1.0f - (float) (pendingTasks.size()) / numTasks; - } - - @Override - public void onContainersAllocated(List containers) { - ApplicationMaster.this.onContainersAllocated(containers); - } - - @Override - public void onContainersCompleted(List status) { - ApplicationMaster.this.onContainersCompleted(status); - } - - @Override - public void onError(Throwable ex) { - ApplicationMaster.this.abortJob("[Rabit] Resource manager Error " - + ex.toString()); - } - - @Override - public void onNodesUpdated(List nodereport) { - } - - @Override - public void onShutdownRequest() { - ApplicationMaster.this - .abortJob("[Rabit] Get shutdown request, start to shutdown..."); - } - } - - private class NMCallbackHandler implements NMClientAsync.CallbackHandler { - @Override - public void onContainerStarted(ContainerId cid, - Map services) { - LOG.debug("onContainerStarted Invoked"); - } - - @Override - public void onContainerStatusReceived(ContainerId cid, - ContainerStatus status) { - LOG.debug("onContainerStatusReceived Invoked"); - } - - @Override - public void onContainerStopped(ContainerId cid) { - LOG.debug("onContainerStopped Invoked"); - } - - @Override - public void onGetContainerStatusError(ContainerId cid, Throwable ex) { - LOG.debug("onGetContainerStatusError Invoked: " + ex.toString()); - ApplicationMaster.this - .handleFailure(Collections.singletonList(cid)); - } - - @Override - public void onStartContainerError(ContainerId cid, Throwable ex) { - LOG.debug("onStartContainerError Invoked: " + ex.toString()); - ApplicationMaster.this - .handleFailure(Collections.singletonList(cid)); - } - - @Override - public void onStopContainerError(ContainerId cid, Throwable ex) { - LOG.info("onStopContainerError Invoked: " + ex.toString()); - } - } -} diff --git a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/Client.java b/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/Client.java deleted file mode 100644 index 9dbdc2619..000000000 --- a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/Client.java +++ /dev/null @@ -1,269 +0,0 @@ -package org.apache.hadoop.yarn.rabit; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.yarn.api.ApplicationConstants; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.api.records.LocalResource; -import org.apache.hadoop.yarn.api.records.LocalResourceType; -import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.QueueInfo; -import org.apache.hadoop.yarn.api.records.YarnApplicationState; -import org.apache.hadoop.yarn.client.api.YarnClient; -import org.apache.hadoop.yarn.client.api.YarnClientApplication; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; - -public class Client { - // logger - private static final Log LOG = LogFactory.getLog(Client.class); - // permission for temp file - private static final FsPermission permTemp = new FsPermission("777"); - // configuration - private YarnConfiguration conf = new YarnConfiguration(); - // hdfs handler - private FileSystem dfs; - // cached maps - private Map cacheFiles = new java.util.HashMap(); - // enviroment variable to setup cachefiles - private String cacheFileArg = ""; - // args to pass to application master - private String appArgs = ""; - // HDFS Path to store temporal result - private String tempdir = "/tmp"; - // user name - private String userName = ""; - // user credentials - private Credentials credentials = null; - // job name - private String jobName = ""; - // queue - private String queue = "default"; - /** - * constructor - * @throws IOException - */ - private Client() throws IOException { - conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/core-site.xml")); - conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/hdfs-site.xml")); - dfs = FileSystem.get(conf); - userName = UserGroupInformation.getCurrentUser().getShortUserName(); - credentials = UserGroupInformation.getCurrentUser().getCredentials(); - } - - /** - * setup security token given current user - * @return the ByeBuffer containing the security tokens - * @throws IOException - */ - private ByteBuffer setupTokens() throws IOException { - DataOutputBuffer buffer = new DataOutputBuffer(); - this.credentials.writeTokenStorageToStream(buffer); - return ByteBuffer.wrap(buffer.getData()); - } - - /** - * setup all the cached files - * - * @param fmaps - * the file maps - * @return the resource map - * @throws IOException - */ - private Map setupCacheFiles(ApplicationId appId) throws IOException { - // create temporary rabit directory - Path tmpPath = new Path(this.tempdir); - if (!dfs.exists(tmpPath)) { - dfs.mkdirs(tmpPath, permTemp); - LOG.info("HDFS temp directory do not exist, creating.. " + tmpPath); - } - tmpPath = new Path(tmpPath + "/temp-rabit-yarn-" + appId); - if (dfs.exists(tmpPath)) { - dfs.delete(tmpPath, true); - } - // create temporary directory - FileSystem.mkdirs(dfs, tmpPath, permTemp); - - StringBuilder cstr = new StringBuilder(); - Map rmap = new java.util.HashMap(); - for (Map.Entry e : cacheFiles.entrySet()) { - LocalResource r = Records.newRecord(LocalResource.class); - Path path = new Path(e.getValue()); - // copy local data to temporary folder in HDFS - if (!e.getValue().startsWith("hdfs://")) { - Path dst = new Path("hdfs://" + tmpPath + "/"+ path.getName()); - dfs.copyFromLocalFile(false, true, path, dst); - dfs.setPermission(dst, permTemp); - dfs.deleteOnExit(dst); - path = dst; - } - FileStatus status = dfs.getFileStatus(path); - r.setResource(ConverterUtils.getYarnUrlFromPath(path)); - r.setSize(status.getLen()); - r.setTimestamp(status.getModificationTime()); - r.setType(LocalResourceType.FILE); - r.setVisibility(LocalResourceVisibility.APPLICATION); - rmap.put(e.getKey(), r); - cstr.append(" -file \""); - cstr.append(path.toString()); - cstr.append('#'); - cstr.append(e.getKey()); - cstr.append("\""); - } - - dfs.deleteOnExit(tmpPath); - this.cacheFileArg = cstr.toString(); - return rmap; - } - - /** - * get the environment variables for container - * - * @return the env variable for child class - */ - private Map getEnvironment() { - // Setup environment variables - Map env = new java.util.HashMap(); - String cpath = "${CLASSPATH}:./*"; - for (String c : conf.getStrings( - YarnConfiguration.YARN_APPLICATION_CLASSPATH, - YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { - cpath += ':'; - cpath += c.trim(); - } - env.put("CLASSPATH", cpath); - for (Map.Entry e : System.getenv().entrySet()) { - if (e.getKey().startsWith("rabit_")) { - env.put(e.getKey(), e.getValue()); - } - if (e.getKey() == "LIBHDFS_OPTS") { - env.put(e.getKey(), e.getValue()); - } - } - LOG.debug(env); - return env; - } - - /** - * initialize the settings - * - * @param args - */ - private void initArgs(String[] args) { - // directly pass all arguments except args0 - StringBuilder sargs = new StringBuilder(""); - for (int i = 0; i < args.length; ++i) { - if (args[i].equals("-file")) { - String[] arr = args[++i].split("#"); - if (arr.length == 1) { - cacheFiles.put(new Path(arr[0]).getName(), arr[0]); - } else { - cacheFiles.put(arr[1], arr[0]); - } - } else if(args[i].equals("-jobname")) { - this.jobName = args[++i]; - } else if(args[i].equals("-tempdir")) { - this.tempdir = args[++i]; - } else if(args[i].equals("-queue")) { - this.queue = args[++i]; - } else { - sargs.append(" "); - sargs.append(args[i]); - } - } - this.appArgs = sargs.toString(); - } - - private void run(String[] args) throws Exception { - if (args.length == 0) { - System.out.println("Usage: [options] [commands..]"); - System.out.println("options: [-file filename]"); - return; - } - this.initArgs(args); - // Create yarnClient - YarnClient yarnClient = YarnClient.createYarnClient(); - yarnClient.init(conf); - yarnClient.start(); - - // Create application via yarnClient - YarnClientApplication app = yarnClient.createApplication(); - - // Set up the container launch context for the application master - ContainerLaunchContext amContainer = Records - .newRecord(ContainerLaunchContext.class); - ApplicationSubmissionContext appContext = app - .getApplicationSubmissionContext(); - // Submit application - ApplicationId appId = appContext.getApplicationId(); - // setup security token - amContainer.setTokens(this.setupTokens()); - // setup cache-files and environment variables - amContainer.setLocalResources(this.setupCacheFiles(appId)); - amContainer.setEnvironment(this.getEnvironment()); - String cmd = "$JAVA_HOME/bin/java" - + " -Xmx900M" - + " org.apache.hadoop.yarn.rabit.ApplicationMaster" - + this.cacheFileArg + ' ' + this.appArgs + " 1>" - + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" - + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"; - LOG.debug(cmd); - amContainer.setCommands(Collections.singletonList(cmd)); - - // Set up resource type requirements for ApplicationMaster - Resource capability = Records.newRecord(Resource.class); - capability.setMemory(1024); - capability.setVirtualCores(1); - LOG.info("jobname=" + this.jobName + ",username=" + this.userName); - - appContext.setApplicationName(jobName + ":RABIT-YARN"); - appContext.setAMContainerSpec(amContainer); - appContext.setResource(capability); - appContext.setQueue(queue); - //appContext.setUser(userName); - LOG.info("Submitting application " + appId); - yarnClient.submitApplication(appContext); - - ApplicationReport appReport = yarnClient.getApplicationReport(appId); - YarnApplicationState appState = appReport.getYarnApplicationState(); - while (appState != YarnApplicationState.FINISHED - && appState != YarnApplicationState.KILLED - && appState != YarnApplicationState.FAILED) { - Thread.sleep(100); - appReport = yarnClient.getApplicationReport(appId); - appState = appReport.getYarnApplicationState(); - } - - System.out.println("Application " + appId + " finished with" - + " state " + appState + " at " + appReport.getFinishTime()); - if (!appReport.getFinalApplicationStatus().equals( - FinalApplicationStatus.SUCCEEDED)) { - System.err.println(appReport.getDiagnostics()); - System.out.println("Available queues:"); - for (QueueInfo q : yarnClient.getAllQueues()) { - System.out.println(q.getQueueName()); - } - } - } - - public static void main(String[] args) throws Exception { - new Client().run(args); - } -} diff --git a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/TaskRecord.java b/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/TaskRecord.java deleted file mode 100644 index c1b70d320..000000000 --- a/subtree/rabit/yarn/src/org/apache/hadoop/yarn/rabit/TaskRecord.java +++ /dev/null @@ -1,24 +0,0 @@ -package org.apache.hadoop.yarn.rabit; - -import org.apache.hadoop.yarn.api.records.Container; -import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; - -/** - * data structure to hold the task information - */ -public class TaskRecord { - // task id of the task - public int taskId = 0; - // number of failed attempts to run the task - public int attemptCounter = 0; - // container request, can be null if task is already running - public ContainerRequest containerRequest = null; - // running container, can be null if the task is not launched - public Container container = null; - // whether we have requested abortion of this task - public boolean abortRequested = false; - - public TaskRecord(int taskId) { - this.taskId = taskId; - } -}