yarn script

This commit is contained in:
chenshuaihua 2015-01-12 00:09:00 +08:00
parent 26b5fdac40
commit b2dec95862

View File

@ -54,6 +54,8 @@ parser.add_argument('-m', '--memory_mb', default=1024, type=int,
help = 'maximum memory used by the process, Guide: set it large (near mapreduce.jobtracker.maxmapmemory.mb).'\
'if you are running multi-threading rabit,'\
'so that each node can occupy all the mapper slots in a machine for maximum performance')
if hadoop_binary == None:
parser.add_argument('-hb', '--hadoop_binary', required = True,
help="path-to-hadoop binary folder")
@ -74,13 +76,12 @@ args = parser.parse_args()
if args.jobname == 'auto':
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
def hadoop_streaming(nworker, slave_args):
cmd = '%s jar %s -D mapreduce.job.maps=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
cmd += ' -D mapreduce.job.name' % (args.jobname)
cmd += ' -D mapreduce.job.name=%s' % (args.jobname)
cmd += ' -D mapreduce.map.cpu.vcores=%d' % (args.nthread)
cmd += ' -D mapreduce.task.timeout=%d' % (args.timeout)
cmd += ' -D mapreduce.map.memory.mb=%d' % (args.memory_mb)
cmd += ' -D mapreduce.map.cpu.vcores=%d' % (args.nthread)
cmd += ' -input %s -output %s' % (args.input, args.output)
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + slave_args))
fset = set()