Merge branch 'master' of ssh://github.com/tqchen/rabit

Conflicts:
	tracker/rabit_hadoop.py
This commit is contained in:
tqchen 2015-01-10 22:50:38 -08:00
commit ed264002a0

View File

@ -21,11 +21,11 @@ if hadoop_home != None:
hadoop_binary = hadoop_home + '/bin/hadoop'
assert os.path.exists(hadoop_binary), "HADDOP_HOME does not contain the hadoop binary"
if hadoop_streaming_jar == None:
hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.har'
hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.jar'
assert os.path.exists(hadoop_streaming_jar), "HADDOP_HOME does not contain the haddop streaming jar"
if hadoop_binary == None or hadoop_streaming_jar == None:
print 'Warning: Cannot auto-detect path to hadoop and streaming jar, need to set them via arguments -hs and -hb'
print 'Warning: Cannot auto-detect path to hadoop and hadoop-streaming jar, need to set them via arguments -hs and -hb'
print '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME or modify rabit_hadoop.py line 14'
parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming')
@ -41,10 +41,20 @@ parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type=
help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default')
parser.add_argument('-f', '--files', default = [], action='append',
help = 'the cached file list in mapreduce,'\
' the submission script will automatically cache all the files which appears in command.'\
' the submission script will automatically cache all the files which appears in command to local folder'\
' This will also cause rewritten of all the file names in the command to current path,'\
' for example `../../kmeans ../kmeans.conf` will be rewritten to `./kmeans kmeans.conf`'\
' because the two files are cached to running folder.'\
' You may need this option to cache additional files.'\
' You can also use it to manually cache files when auto_file_cache is off')
parser.add_argument('--jobname', help = 'customize jobname in tracker')
parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker')
parser.add_argument('--timeout', default=600000000, type=int,
help = 'timeout of each mapper job, automatically set to a very long time,'\
'normally you do not need to set this ')
parser.add_argument('-m', '--memory_mb', default=-1, type=int,
help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\
'if you are running multi-threading rabit,'\
'so that each node can occupy all the mapper slots in a machine for maximum performance')
if hadoop_binary == None:
parser.add_argument('-hb', '--hadoop_binary', required = True,
help="path-to-hadoop binary folder")
@ -53,31 +63,39 @@ else:
help="path-to-hadoop binary folder")
if hadoop_streaming_jar == None:
parser.add_argument('-jar', '--hadoop_streaming_jar', required = True,
parser.add_argument('-hs', '--hadoop_streaming_jar', required = True,
help='path-to hadoop streamimg jar file')
else:
parser.add_argument('-jar', '--hadoop_streaming_jar', default = hadoop_streaming_jar,
parser.add_argument('-hs', '--hadoop_streaming_jar', default = hadoop_streaming_jar,
help='path-to hadoop streamimg jar file')
parser.add_argument('command', nargs='+',
help = 'command for rabit program')
args = parser.parse_args()
if args.jobname is None:
if args.jobname == 'auto':
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
def hadoop_streaming(nworker, worker_args):
cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
cmd += ' -D mapred.job.name=%d' % (a)
cmd += ' -input %s -output %s' % (args.input, args.output)
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
fset = set()
if args.auto_file_cache:
for f in args.command:
for i in range(len(args.command)):
f = args.command[i]
if os.path.exists(f):
fset.add(f)
for flst in args.files:
for f in flst.split('#'):
fset.add(f)
if i == 0:
args.command[i] = './' + args.command[i].split('/')[-1]
else:
args.command[i] = args.command[i].split('/')[-1]
cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
cmd += ' -Dmapred.job.name=%s' % (args.jobname)
cmd += ' -Dmapred.task.timeout=%d' % (args.timeout)
cmd += ' -Dmapred.job.map.memory.mb=%d' % (args.memory_mb)
cmd += ' -input %s -output %s' % (args.input, args.output)
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
if args.files != None:
for flst in args.files:
for f in flst.split('#'):
fset.add(f)
for f in fset:
cmd += ' -file %s' % f
print cmd