modify default jobname

This commit is contained in:
Boliang Chen 2015-01-11 14:52:48 +08:00
parent 363994f29d
commit 7fa23f2d2f

View File

@ -50,7 +50,7 @@ parser.add_argument('-f', '--files', nargs = '*',
' You can also use it to manually cache files when auto_file_cache is off')
parser.add_argument('--jobname', default='auto', help = 'customize jobname in tracker')
parser.add_argument('--timeout', default=600000000, type=int,
help = 'timeout of each mapper job, automatically set to a very long time,'\
help = 'timeout ((in milli seconds)) of each mapper job, automatically set to a very long time,'\
'normally you do not need to set this ')
parser.add_argument('-m', '--memory_mb', default=-1, type=int,
help = 'maximum memory used by the process, Guide: set it large (near mapred.cluster.max.map.memory.mb)'\
@ -74,7 +74,7 @@ parser.add_argument('command', nargs='+',
args = parser.parse_args()
if args.jobname == 'auto':
args.jobname = ('Rabit(nworker=%d):' % args.nworker) + args.command[0].split('/')[-1];
args.jobname = ('Rabit[nworker=%d]:' % args.nworker) + args.command[0].split('/')[-1];
def hadoop_streaming(nworker, worker_args):
fset = set()