unify hadoop and yarn script
This commit is contained in:
parent
03dca6d6b3
commit
78bfe867e6
@ -10,6 +10,7 @@ import time
|
|||||||
import subprocess
|
import subprocess
|
||||||
import rabit_tracker as tracker
|
import rabit_tracker as tracker
|
||||||
|
|
||||||
|
|
||||||
#!!! Set path to hadoop and hadoop streaming jar here
|
#!!! Set path to hadoop and hadoop streaming jar here
|
||||||
hadoop_binary = 'hadoop'
|
hadoop_binary = 'hadoop'
|
||||||
hadoop_streaming_jar = None
|
hadoop_streaming_jar = None
|
||||||
@ -28,9 +29,12 @@ if hadoop_binary == None or hadoop_streaming_jar == None:
|
|||||||
print 'Warning: Cannot auto-detect path to hadoop and hadoop-streaming jar, need to set them via arguments -hs and -hb'
|
print 'Warning: Cannot auto-detect path to hadoop and hadoop-streaming jar, need to set them via arguments -hs and -hb'
|
||||||
print '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME or modify rabit_hadoop.py line 14'
|
print '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME or modify rabit_hadoop.py line 14'
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming')
|
parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming.'\
|
||||||
|
'This script support both Hadoop 1.0 and Yarn(MRv2), Yarn is recommended')
|
||||||
parser.add_argument('-n', '--nworker', required=True, type=int,
|
parser.add_argument('-n', '--nworker', required=True, type=int,
|
||||||
help = 'number of worker proccess to be launched')
|
help = 'number of worker proccess to be launched')
|
||||||
|
parser.add_argument('-nt', '--nthread', default = -1, type=int,
|
||||||
|
help = 'number of thread in each mapper to be launched, set it if each rabit job is multi-threaded')
|
||||||
parser.add_argument('-i', '--input', required=True,
|
parser.add_argument('-i', '--input', required=True,
|
||||||
help = 'input path in HDFS')
|
help = 'input path in HDFS')
|
||||||
parser.add_argument('-o', '--output', required=True,
|
parser.add_argument('-o', '--output', required=True,
|
||||||
@ -51,7 +55,7 @@ parser.add_argument('--jobname', default='auto', help = 'customize jobname in tr
|
|||||||
parser.add_argument('--timeout', default=600000000, type=int,
|
parser.add_argument('--timeout', default=600000000, type=int,
|
||||||
help = 'timeout (in million seconds) of each mapper job, automatically set to a very long time,'\
|
help = 'timeout (in million seconds) of each mapper job, automatically set to a very long time,'\
|
||||||
'normally you do not need to set this ')
|
'normally you do not need to set this ')
|
||||||
parser.add_argument('-m', '--memory_mb', default=-1, type=int,
|
parser.add_argument('-mem', '--memory_mb', default=-1, type=int,
|
||||||
help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\
|
help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\
|
||||||
'if you are running multi-threading rabit,'\
|
'if you are running multi-threading rabit,'\
|
||||||
'so that each node can occupy all the mapper slots in a machine for maximum performance')
|
'so that each node can occupy all the mapper slots in a machine for maximum performance')
|
||||||
@ -75,7 +79,7 @@ args = parser.parse_args()
|
|||||||
if args.jobname == 'auto':
|
if args.jobname == 'auto':
|
||||||
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
|
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
|
||||||
|
|
||||||
def hadoop_streaming(nworker, worker_args):
|
def hadoop_streaming(nworker, worker_args, yarn = False):
|
||||||
fset = set()
|
fset = set()
|
||||||
if args.auto_file_cache:
|
if args.auto_file_cache:
|
||||||
for i in range(len(args.command)):
|
for i in range(len(args.command)):
|
||||||
@ -86,10 +90,30 @@ def hadoop_streaming(nworker, worker_args):
|
|||||||
args.command[i] = './' + args.command[i].split('/')[-1]
|
args.command[i] = './' + args.command[i].split('/')[-1]
|
||||||
else:
|
else:
|
||||||
args.command[i] = args.command[i].split('/')[-1]
|
args.command[i] = args.command[i].split('/')[-1]
|
||||||
cmd = '%s jar %s -D mapred.map.tasks=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
|
kmap = {}
|
||||||
cmd += ' -Dmapred.job.name=%s' % (args.jobname)
|
# setup keymaps
|
||||||
cmd += ' -Dmapred.task.timeout=%d' % (args.timeout)
|
if yarn:
|
||||||
cmd += ' -Dmapred.job.map.memory.mb=%d' % (args.memory_mb)
|
kmap['nworker'] = 'mapreduce.job.maps'
|
||||||
|
kmap['jobname'] = 'mapreduce.job.name'
|
||||||
|
kmap['nthread'] = 'mapreduce.map.cpu.vcores'
|
||||||
|
kmap['timeout'] = 'mapreduce.task.timeout'
|
||||||
|
kmap['memory_mb'] = 'mapreduce.map.memory.mb'
|
||||||
|
else:
|
||||||
|
kmap['nworker'] = 'mapred.map.tasks'
|
||||||
|
kmap['jobname'] = 'mapred.job.name'
|
||||||
|
kmap['nthread'] = None
|
||||||
|
kmap['timeout'] = 'mapred.task.timeout'
|
||||||
|
kmap['memory_mb'] = 'mapred.job.map.memory.mb'
|
||||||
|
cmd = '%s jar %s' % (args.hadoop_binary, args.hadoop_streaming_jar)
|
||||||
|
cmd += ' -D%s=%d' % (kmap['nworker'], nworker)
|
||||||
|
cmd += ' -D%s=%s' % (kmap['jobname'], args.jobname)
|
||||||
|
if args.nthread != -1:
|
||||||
|
assert kmap['nthread'] is not None, "nthread can only be set in Yarn cluster, it is highly recommended to "
|
||||||
|
cmd += ' -D%s=%d' % (kmap['ntread'], args.nthread)
|
||||||
|
cmd += ' -D%s=%d' % (kmap['timeout'], args.timeout)
|
||||||
|
if args.memory_mb != -1:
|
||||||
|
cmd += ' -D%s=%d' % (kmap['timeout'], args.timeout)
|
||||||
|
|
||||||
cmd += ' -input %s -output %s' % (args.input, args.output)
|
cmd += ' -input %s -output %s' % (args.input, args.output)
|
||||||
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
|
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + worker_args))
|
||||||
if args.files != None:
|
if args.files != None:
|
||||||
@ -101,4 +125,6 @@ def hadoop_streaming(nworker, worker_args):
|
|||||||
print cmd
|
print cmd
|
||||||
subprocess.check_call(cmd, shell = True)
|
subprocess.check_call(cmd, shell = True)
|
||||||
|
|
||||||
tracker.submit(args.nworker, [], fun_submit = hadoop_streaming, verbose = args.verbose)
|
if __name__ == 'main':
|
||||||
|
fun_submit = lambda nworker, worker_args: hadoop_streaming(nworker, worker_args, False)
|
||||||
|
tracker.submit(args.nworker, [], fun_submit = fun_submit, verbose = args.verbose)
|
||||||
|
|||||||
100
tracker/rabit_yarn.py
Normal file → Executable file
100
tracker/rabit_yarn.py
Normal file → Executable file
@ -1,100 +1,10 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
"""
|
"""
|
||||||
This is a script to submit rabit job using hadoop streaming
|
This is a script to submit rabit job using Yarn
|
||||||
submit the rabit process as mappers of MapReduce
|
submit the rabit process as mappers of MapReduce
|
||||||
"""
|
"""
|
||||||
import argparse
|
import rabit_hadoop
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import subprocess
|
|
||||||
import rabit_tracker as tracker
|
|
||||||
|
|
||||||
#!!! Set path to hadoop and hadoop streaming jar here
|
if __name__ == 'main':
|
||||||
hadoop_binary = 'hadoop'
|
fun_submit = lambda nworker, worker_args: hadoop_streaming(nworker, worker_args, True)
|
||||||
hadoop_streaming_jar = None
|
tracker.submit(args.nworker, [], fun_submit = fun_submit, verbose = args.verbose)
|
||||||
|
|
||||||
# code
|
|
||||||
hadoop_home = os.getenv('HADOOP_HOME')
|
|
||||||
if hadoop_home != None:
|
|
||||||
if hadoop_binary == None:
|
|
||||||
hadoop_binary = hadoop_home + '/bin/hadoop'
|
|
||||||
assert os.path.exists(hadoop_binary), "HADDOP_HOME does not contain the hadoop binary"
|
|
||||||
if hadoop_streaming_jar == None:
|
|
||||||
hadoop_streaming_jar = hadoop_home + '/lib/hadoop-streaming.jar'
|
|
||||||
assert os.path.exists(hadoop_streaming_jar), "HADDOP_HOME does not contain the haddop streaming jar"
|
|
||||||
|
|
||||||
if hadoop_binary == None or hadoop_streaming_jar == None:
|
|
||||||
print 'Warning: Cannot auto-detect path to hadoop and streaming jar, need to set them via arguments -hs and -hb'
|
|
||||||
print '\tTo enable auto-detection, you can set enviroment variable HADOOP_HOME or modify rabit_hadoop.py line 14'
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Rabit script to submit rabit jobs using Hadoop Streaming')
|
|
||||||
parser.add_argument('-nw', '--nworker', required=True, type=int,
|
|
||||||
help = 'number of worker proccess to be launched')
|
|
||||||
parser.add_argument('-nt', '--nthread', required=True, type=int,
|
|
||||||
help = 'number of thread of each mapper to be launched')
|
|
||||||
parser.add_argument('-i', '--input', required=True,
|
|
||||||
help = 'input path in HDFS')
|
|
||||||
parser.add_argument('-o', '--output', required=True,
|
|
||||||
help = 'output path in HDFS')
|
|
||||||
parser.add_argument('-v', '--verbose', default=0, choices=[0, 1], type=int,
|
|
||||||
help = 'print more messages into the console')
|
|
||||||
parser.add_argument('-ac', '--auto_file_cache', default=1, choices=[0, 1], type=int,
|
|
||||||
help = 'whether automatically cache the files in the command to hadoop localfile, this is on by default')
|
|
||||||
parser.add_argument('-f', '--files', nargs = '*',
|
|
||||||
help = 'the cached file list in mapreduce,'\
|
|
||||||
' the submission script will automatically cache all the files which appears in command.'\
|
|
||||||
' You may need this option to cache additional files.'\
|
|
||||||
' You can also use it to manually cache files when auto_file_cache is off')
|
|
||||||
parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker')
|
|
||||||
parser.add_argument('--timeout', default=600000000, type=int,
|
|
||||||
help = 'timeout (in million seconds) of each mapper job, automatically set to a very long time,'\
|
|
||||||
'normally you do not need to set this ')
|
|
||||||
parser.add_argument('-m', '--memory_mb', default=1024, type=int,
|
|
||||||
help = 'maximum memory used by the process, Guide: set it large (near mapreduce.jobtracker.maxmapmemory.mb).'\
|
|
||||||
'if you are running multi-threading rabit,'\
|
|
||||||
'so that each node can occupy all the mapper slots in a machine for maximum performance')
|
|
||||||
|
|
||||||
|
|
||||||
if hadoop_binary == None:
|
|
||||||
parser.add_argument('-hb', '--hadoop_binary', required = True,
|
|
||||||
help="path-to-hadoop binary folder")
|
|
||||||
else:
|
|
||||||
parser.add_argument('-hb', '--hadoop_binary', default = hadoop_binary,
|
|
||||||
help="path-to-hadoop binary folder")
|
|
||||||
|
|
||||||
if hadoop_streaming_jar == None:
|
|
||||||
parser.add_argument('-jar', '--hadoop_streaming_jar', required = True,
|
|
||||||
help='path-to hadoop streamimg jar file')
|
|
||||||
else:
|
|
||||||
parser.add_argument('-jar', '--hadoop_streaming_jar', default = hadoop_streaming_jar,
|
|
||||||
help='path-to hadoop streamimg jar file')
|
|
||||||
parser.add_argument('command', nargs='+',
|
|
||||||
help = 'command for rabit program')
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.jobname == 'auto':
|
|
||||||
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
|
|
||||||
|
|
||||||
def hadoop_streaming(nworker, slave_args):
|
|
||||||
cmd = '%s jar %s -D mapreduce.job.maps=%d' % (args.hadoop_binary, args.hadoop_streaming_jar, nworker)
|
|
||||||
cmd += ' -D mapreduce.job.name=%s' % (args.jobname)
|
|
||||||
cmd += ' -D mapreduce.map.cpu.vcores=%d' % (args.nthread)
|
|
||||||
cmd += ' -D mapreduce.task.timeout=%d' % (args.timeout)
|
|
||||||
cmd += ' -D mapreduce.map.memory.mb=%d' % (args.memory_mb)
|
|
||||||
cmd += ' -input %s -output %s' % (args.input, args.output)
|
|
||||||
cmd += ' -mapper \"%s\" -reducer \"/bin/cat\" ' % (' '.join(args.command + slave_args))
|
|
||||||
fset = set()
|
|
||||||
if args.auto_file_cache:
|
|
||||||
for f in args.command:
|
|
||||||
if os.path.exists(f):
|
|
||||||
fset.add(f)
|
|
||||||
for flst in args.files:
|
|
||||||
for f in flst.split('#'):
|
|
||||||
fset.add(f)
|
|
||||||
for f in fset:
|
|
||||||
cmd += ' -file %s' % f
|
|
||||||
print cmd
|
|
||||||
subprocess.check_call(cmd, shell = True)
|
|
||||||
|
|
||||||
tracker.submit(args.nworker, [], fun_submit = hadoop_streaming, verbose = args.verbose)
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user