diff --git a/submit_job_hadoop.py b/submit_job_hadoop.py new file mode 100755 index 000000000..af061360c --- /dev/null +++ b/submit_job_hadoop.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +""" +This is an example job submit script for hadoop streaming +""" +import argparse +import sys +import os +import subprocess +sys.path.append('./src/') +from rabit_master import Master +from threading import Thread + + +def hadoop_streaming(nslaves, slave_args): + cmd = '%s jar %s -input %s -output %s -mapper %s -reducer /bin/cat stdin %d %d stdout' % (args.hadoop_binary, args.hadoop_streaming_jar, args.input, args.output, args.mapper, args.nclusters, args.iterations) + print cmd + subprocess.check_call(cmd, shell = True) + + +parser = argparse.ArgumentParser(description='Hadoop Streaming submission script') +parser.add_argument('-s', '--nslaves', required=True, type=int) +parser.add_argument('-hb', '--hadoop_binary', required=True) +parser.add_argument('-hs', '--hadoop_streaming_jar', required=True) +parser.add_argument('-i', '--input', required=True) +parser.add_argument('-o', '--output', required=True) +parser.add_argument('-m', '--mapper', required=True) +#parser.add_argument('-r', '--reducer', required=False) +parser.add_argument('-k', '--nclusters', required=True, type=int) +parser.add_argument('-itr', '--iterations', required=True, type=int) +args = parser.parse_args() + +master = Master() +# this is availabe after triggered the hadoop streaming job, not sure how to do it +# os.environ["mapred_job_id"] +slave_args = ['master_uri=%s' % 'TODO', 'master_port=%s' % 'TODO'] +submit_thread = Thread(target = hadoop_streaming, args = slave_args) +submit_thread.start() +master.accept_slaves(args.nslaves) +submit_thread.join() diff --git a/test/test.sh b/test/test.sh index 78d267157..c323785dd 100755 --- a/test/test.sh +++ b/test/test.sh @@ -5,4 +5,4 @@ then exit -1 fi -../submit_job_tcp.py $1 test_recover "${@:2}" +../submit_job.py $1 test_recover "${@:2}"