parser=argparse.ArgumentParser(description='Helper script for running and managing loadleveller Monte Carlo jobs.',usage='''loadl <command> <jobfile> [<args>]
<jobfile> is a script file containing the parameters for the job of interest.
Possible commands and their shorthands are
delete, d delete all data related to a job
merge, m merges results of an unfinished job into an output file
run, r runs the job
status, s print job completion information''')
parser.add_argument('command')
parser.add_argument('jobfile')
args=parser.parse_args(sys.argv[1:3])
jobdir=os.path.dirname(args.jobfile)
jobfile_name=os.path.basename(args.jobfile)
job=jobfile.JobFile(jobfile_name)
ifjobdir!='':
os.chdir(jobdir)
defrun():
importglob
fromloadlevellerimportclusterutils
parser=argparse.ArgumentParser(description='run a loadleveller job on a cluster or locally')
parser.add_argument('-s','--single',action='store_true',help='Run in the single core scheduler mode')
parser.add_argument('-f','--force',action='store_true',help='Ignore warnings about possible job corruption')
parser.add_argument('-r','--restart',action='store_true',help='Delete all existing job data before starting.')
parser=argparse.ArgumentParser(description='This script deletes all the simulation result directories of the job specified by JOBFILE so that you can restart it from scratch. Use with caution.')
parser.add_argument('jobfile',metavar='JOBFILE',help='Configuration file containing all the job information. May be generated using ytaskmaker')
args=parser.parse_args()
withopen(args.jobfile,'r')asf:
jobfile=yaml.safe_load(f)
try:
shutil.rmtree('{}.data'.format(args.jobfile))# adjust this if you adjust the task dir format in load-leveller
parser=argparse.ArgumentParser(description='This helper program runs a loadleveller Monte Carlo program using a provided YAML-formatted jobfile. The jobfile contains information on how to run the job (what mc binary, mpi-parameters, ...) and a number of tasks with different simulation parameters each. When running on a cluster batch system, the batch script is generated using ygeneratebatchscript.')
parser.add_argument('jobfile',metavar='JOBFILE',help='Configuration file containing all the job information. May be generated using ytaskmaker')
parser.add_argument('-s','--single',action='store_true',help='Run in the single core scheduler mode')
parser.add_argument('-m','--merge',action='store_true',help='Merge the measurement data of all tasks')
parser.add_argument('--force',action='store_true',help='Ignore warnings about possible job corruption')
args=parser.parse_args()
jobdir=os.path.dirname(args.jobfile)
jobfile_name=os.path.basename(args.jobfile)
ifjobdir!='':
os.chdir(jobdir)
ifargs.singleandargs.merge:
print('Error: cannot merge and run in single mode at the same time.')
sys.exit(1)
job=jobfile.JobFile(jobfile_name)
# check age of the different files
binary_modtime=os.stat(job.mc_binary).st_mtime
jobfile_modtime=os.stat(jobfile_name).st_mtime
try:
f=next(glob.iglob('{}.data/*/*.h5'.format(jobfile_name)))# only check one of the output files for speed
data_modtime=os.stat(f).st_mtime
error=False
label='Warning'ifargs.forceelse'Error'
ifbinary_modtime>data_modtime:
print('{}: binary \'{}\' is newer than the checkpoint files.'.format(label,job.mc_binary))
error=True
ifjobfile_modtime>data_modtime:
print('{}: jobfile \'{}\' is newer than the checkpoint files.'.format(label,jobfile_name))
error=True
ifnotargs.forceanderror:
print('Use ydelete to start from a blank run or use \'--force\' to proceed if you are sure\nthe changes you made are compatible.')