From 80b0d06b7e3ba9802da762d07b80e60230015071 Mon Sep 17 00:00:00 2001 From: Boliang Chen Date: Sun, 11 Jan 2015 14:56:20 +0800 Subject: [PATCH] merger from tqchen --- tracker/rabit_hadoop.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index 135f13df5..7808f9143 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -12,8 +12,7 @@ import rabit_tracker as tracker #!!! Set path to hadoop and hadoop streaming jar here hadoop_binary = 'hadoop' -#hadoop_streaming_jar = None -hadoop_streaming_jar = '/home/likewise-open/APEXLAB/blchen/streaming.jar' +hadoop_streaming_jar = None # code hadoop_home = os.getenv('HADOOP_HOME') @@ -50,7 +49,7 @@ parser.add_argument('-f', '--files', default = [], action='append', ' You can also use it to manually cache files when auto_file_cache is off') parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker') parser.add_argument('--timeout', default=600000000, type=int, - help = 'timeout ((in milli seconds)) of each mapper job, automatically set to a very long time,'\ + help = 'timeout (in milli seconds) of each mapper job, automatically set to a very long time,'\ 'normally you do not need to set this ') parser.add_argument('-m', '--memory_mb', default=-1, type=int, help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\