Merge pull request #1 from tqchen/master

merge from tqchen
This commit is contained in:
cblsjtu 2015-01-11 11:00:08 +08:00
commit c0f85c681e

View File

@ -41,10 +41,20 @@ parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type=
help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default')
parser.add_argument('-f', '--files', nargs = '*',
help = 'the cached file list in mapreduce,'\
' the submission script will automatically cache all the files which appears in command.'\
' the submission script will automatically cache all the files which appears in command to local folder'\
' This will also cause rewritten of all the file names in the command to current path,'\
' for example `../../kmeans ../kmeans.conf` will be rewritten to `./kmeans kmeans.conf`'\
' because the two files are cached to running folder.'\
' You may need this option to cache additional files.'\
' You can also use it to manually cache files when auto_file_cache is off')
parser.add_argument('--jobname', help = 'customize jobname in tracker')
parser.add_argument('--timeout', default=600000000, type=int,
help = 'timeout of each mapper job, automatically set to a very long time,'\
'normally you do not need to set this ')
parser.add_argument('-m', '--memory_mb', default=-1, type=int,
help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\
'if you are running multi-threading rabit,'\
'so that each node can occupy all the mapper slots in a machine for maximum performance')
if hadoop_binary == None:
parser.add_argument('-hb', '--hadoop_binary', required = True,
help="path-to-hadoop binary folder")
@ -66,15 +76,22 @@ if args.jobname is None:
args.jobname = ('Rabit(nworker=%d):' % args.nworker) + args.command[0].split('/')[-1];
def hadoop_streaming(nworker, worker_args):
cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
cmd += ' -D mapred.job.name=%s' % (args.jobname)
cmd += ' -input %s -output %s' % (args.input, args.output)
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
fset = set()
if args.auto_file_cache:
for f in args.command:
for i in range(len(args.command)):
f = args.command[i]
if os.path.exists(f):
fset.add(f)
if i == 0:
args.command[i] = './' + args.command[i].split('/')[-1]
else:
args.command[i] = args.command[i].split('/')[-1]
cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
cmd += ' -Dmapred.job.name=%s' % (args.jobname)
cmd += ' -Dmapred.task.timeout=%d' % (args.timeout)
cmd += ' -Dmapred.job.map.memory.mb=%d' % (args.memory_mb)
cmd += ' -input %s -output %s' % (args.input, args.output)
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
if args.files != None:
for flst in args.files:
for f in flst.split('#'):