From 697a01bfb4dcc8d747a5bca0c83dfed4206ef6e8 Mon Sep 17 00:00:00 2001 From: Boliang Chen Date: Sat, 10 Jan 2015 10:54:12 +0800 Subject: [PATCH 1/9] har -> jar --- tracker/rabit_hadoop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index 59866d55a..bd0abc0c4 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -21,7 +21,7 @@ if hadoop_home != None: hadoop_binary = hadoop_home + '/bin/hadoop' assert os.path.exists(hadoop_binary), "HADDOP_HOME does not contain the hadoop binary" if hadoop_streaming_jar == None: - hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.har' + hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.jar' assert os.path.exists(hadoop_streaming_jar), "HADDOP_HOME does not contain the haddop streaming jar" if hadoop_binary == None or hadoop_streaming_jar == None: From 7f5cb3aa0e64c88f05f10c26ae346e06b043280b Mon Sep 17 00:00:00 2001 From: Boliang Chen Date: Sat, 10 Jan 2015 10:58:53 +0800 Subject: [PATCH 2/9] modify hs --- tracker/rabit_hadoop.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index bd0abc0c4..01d242e31 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -25,7 +25,7 @@ if hadoop_home != None: assert os.path.exists(hadoop_streaming_jar), "HADDOP_HOME does not contain the haddop streaming jar" if hadoop_binary == None or hadoop_streaming_jar == None: - print 'Warning: Cannot auto-detect path to hadoop and streaming jar, need to set them via arguments -hs and -hb' + print 'Warning: Cannot auto-detect path to hadoop and hadoop-streaming jar, need to set them via arguments -hs and -hb' print '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME or modify rabit_hadoop.py line 14' parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming') @@ -53,10 +53,10 @@ else: help="path-to-hadoop binary folder") if hadoop_streaming_jar == None: - parser.add_argument('-jar', '--hadoop_streaming_jar', required = True, + parser.add_argument('-hs', '--hadoop_streaming_jar', required = True, help='path-to hadoop streamimg jar file') else: - parser.add_argument('-jar', '--hadoop_streaming_jar', default = hadoop_streaming_jar, + parser.add_argument('-hs', '--hadoop_streaming_jar', default = hadoop_streaming_jar, help='path-to hadoop streamimg jar file') parser.add_argument('command', nargs='+', help = 'command for rabit program') From d986693fbd4c021bf582d931510c3a9523751738 Mon Sep 17 00:00:00 2001 From: Boliang Chen Date: Sun, 11 Jan 2015 00:14:37 +0800 Subject: [PATCH 3/9] fix bugs --- tracker/rabit_hadoop.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index 01d242e31..30d21748f 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -12,7 +12,7 @@ import rabit_tracker as tracker #!!! Set path to hadoop and hadoop streaming jar here hadoop_binary = 'hadoop' -hadoop_streaming_jar = None +hadoop_streaming_jar = None # code hadoop_home = os.getenv('HADOOP_HOME') @@ -67,7 +67,7 @@ if args.jobname is None: def hadoop_streaming(nworker, worker_args): cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker) - cmd += ' -D mapred.job.name=%d' % (a) + cmd += ' -D mapred.job.name=%s' % (args.jobname) cmd += ' -input %s -output %s' % (args.input, args.output) cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args)) fset = set() @@ -75,9 +75,10 @@ def hadoop_streaming(nworker, worker_args): for f in args.command: if os.path.exists(f): fset.add(f) - for flst in args.files: - for f in flst.split('#'): - fset.add(f) + if args.files != None: + for flst in args.files: + for f in flst.split('#'): + fset.add(f) for f in fset: cmd += ' -file %s' % f print cmd From 76c15dffdef6b0dbf792d111096a913c3bbe283c Mon Sep 17 00:00:00 2001 From: Boliang Chen Date: Sun, 11 Jan 2015 00:16:05 +0800 Subject: [PATCH 4/9] remove blank --- tracker/rabit_hadoop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index 30d21748f..e9b53afbd 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -12,7 +12,7 @@ import rabit_tracker as tracker #!!! Set path to hadoop and hadoop streaming jar here hadoop_binary = 'hadoop' -hadoop_streaming_jar = None +hadoop_streaming_jar = None # code hadoop_home = os.getenv('HADOOP_HOME') From 6b30fb2bea6475a1f3cf205d8632031173eebd1c Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 10 Jan 2015 09:58:10 -0800 Subject: [PATCH 5/9] update cache script --- tracker/rabit_hadoop.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index e9b53afbd..da9e161ce 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -41,7 +41,9 @@ parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type= help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default') parser.add_argument('-f', '--files', nargs = '*', help = 'the cached file list in mapreduce,'\ - ' the submission script will automatically cache all the files which appears in command.'\ + ' the submission script will automatically cache all the files which appears in command to local folder'\ + ' This will also cause rewritten of all the file names in the command to current path,'\ + ' for example `../../kmeans ../kmeans.conf` will be rewritten to ./kmeans kmeans.conf because the two files are cached to running folder.'\ ' You may need this option to cache additional files.'\ ' You can also use it to manually cache files when auto_file_cache is off') parser.add_argument('--jobname', help = 'customize jobname in tracker') @@ -66,15 +68,20 @@ if args.jobname is None: args.jobname = ('Rabit(nworker=%d):' % args.nworker) + args.command[0].split('/')[-1]; def hadoop_streaming(nworker, worker_args): + fset = set() + if args.auto_file_cache: + for i in range(len(args.command)): + f = args.command[i] + if os.path.exists(f): + fset.add(f) + if i == 0: + args.command[i] = './' + args.command[i].split('/')[-1] + else: + args.command[i] = args.command[i].split('/')[-1] cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker) cmd += ' -D mapred.job.name=%s' % (args.jobname) cmd += ' -input %s -output %s' % (args.input, args.output) cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args)) - fset = set() - if args.auto_file_cache: - for f in args.command: - if os.path.exists(f): - fset.add(f) if args.files != None: for flst in args.files: for f in flst.split('#'): From c2ab64afe3bf95a72ea579bc8505cb4481213955 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 10 Jan 2015 10:01:31 -0800 Subject: [PATCH 6/9] fix comment --- tracker/rabit_hadoop.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index da9e161ce..aaed47cb0 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -43,7 +43,8 @@ parser.add_argument('-f', '--files', nargs = '*', help = 'the cached file list in mapreduce,'\ ' the submission script will automatically cache all the files which appears in command to local folder'\ ' This will also cause rewritten of all the file names in the command to current path,'\ - ' for example `../../kmeans ../kmeans.conf` will be rewritten to ./kmeans kmeans.conf because the two files are cached to running folder.'\ + ' for example `../../kmeans ../kmeans.conf` will be rewritten to `./kmeans kmeans.conf`'\ + ' because the two files are cached to running folder.'\ ' You may need this option to cache additional files.'\ ' You can also use it to manually cache files when auto_file_cache is off') parser.add_argument('--jobname', help = 'customize jobname in tracker') From 500a57697d72ac738abc073d67ae215b680f4ba3 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 10 Jan 2015 17:45:53 -0800 Subject: [PATCH 7/9] chg script --- tracker/rabit_hadoop.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index aaed47cb0..f037387e4 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -48,6 +48,11 @@ parser.add_argument('-f', '--files', nargs = '*', ' You may need this option to cache additional files.'\ ' You can also use it to manually cache files when auto_file_cache is off') parser.add_argument('--jobname', help = 'customize jobname in tracker') +parser.add_argument('--timeout', default=600000000, type=int, + help = 'timeout of each mapper job, automatically set to a very long time normally you donot need to set this ') +parser.add_argument('-m', '--memory_mb', default=-1, type=int, + help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb) if you are running multi-threading rabit,'\ + 'so that each node can occupy all the mapper slots in a machine for maximum performance') if hadoop_binary == None: parser.add_argument('-hb', '--hadoop_binary', required = True, help="path-to-hadoop binary folder") @@ -80,7 +85,9 @@ def hadoop_streaming(nworker, worker_args): else: args.command[i] = args.command[i].split('/')[-1] cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker) - cmd += ' -D mapred.job.name=%s' % (args.jobname) + cmd += ' -Dmapred.job.name=%s' % (args.jobname) + cmd += ' -Dmapred.task.timeout=%d' % (args.timeout) + cmd += ' -Dmapred.job.map.memory.mb=%d' % (args.memory_mb) cmd += ' -input %s -output %s' % (args.input, args.output) cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args)) if args.files != None: From 43c129f431130fb8c5628cbc9565ca750f346d14 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 10 Jan 2015 17:49:09 -0800 Subject: [PATCH 8/9] chg script --- tracker/rabit_hadoop.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index f037387e4..470455cb6 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -49,9 +49,11 @@ parser.add_argument('-f', '--files', nargs = '*', ' You can also use it to manually cache files when auto_file_cache is off') parser.add_argument('--jobname', help = 'customize jobname in tracker') parser.add_argument('--timeout', default=600000000, type=int, - help = 'timeout of each mapper job, automatically set to a very long time normally you donot need to set this ') + help = 'timeout of each mapper job, automatically set to a very long time,'\ + 'normally you do not need to set this ') parser.add_argument('-m', '--memory_mb', default=-1, type=int, - help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb) if you are running multi-threading rabit,'\ + help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\ + 'if you are running multi-threading rabit,'\ 'so that each node can occupy all the mapper slots in a machine for maximum performance') if hadoop_binary == None: parser.add_argument('-hb', '--hadoop_binary', required = True, From 0100fdd18d67dbf365f10ee9623c97f24a566005 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 10 Jan 2015 21:21:39 -0800 Subject: [PATCH 9/9] auto jobname --- tracker/rabit_hadoop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tracker/rabit_hadoop.py b/tracker/rabit_hadoop.py index 470455cb6..d0241dcb1 100755 --- a/tracker/rabit_hadoop.py +++ b/tracker/rabit_hadoop.py @@ -47,7 +47,7 @@ parser.add_argument('-f', '--files', nargs = '*', ' because the two files are cached to running folder.'\ ' You may need this option to cache additional files.'\ ' You can also use it to manually cache files when auto_file_cache is off') -parser.add_argument('--jobname', help = 'customize jobname in tracker') +parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker') parser.add_argument('--timeout', default=600000000, type=int, help = 'timeout of each mapper job, automatically set to a very long time,'\ 'normally you do not need to set this ') @@ -72,7 +72,7 @@ parser.add_argument('command', nargs='+', help = 'command for rabit program') args = parser.parse_args() -if args.jobname is None: +if args.jobname == 'auto': args.jobname = ('Rabit(nworker=%d):' % args.nworker) + args.command[0].split('/')[-1]; def hadoop_streaming(nworker, worker_args):