Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
cwd = os.getcwd() + '/'
bin_dir = cwd + 'Bin/'
bin_name = executable_name(compiler, architecture)
log_dir = cwd
perf_database_file = cwd + perf_database_file
day = time.strftime('%d')
month = time.strftime('%m')
year = time.strftime('%Y')
# Initialize tests
# ----------------
if args.mode == 'run':
start_date = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# Set default options for compilation and execution
config_command = get_config_command(compiler, architecture)
# Create main result directory if does not exist
if not os.path.exists(res_dir_base):
os.mkdir(res_dir_base)
# Recompile if requested
# ----------------------
if recompile == True:
if pull_3_repos == True:
git_repo = git.cmd.Git( picsar_dir )
git_repo.pull()
git_repo = git.cmd.Git( amrex_dir )
git_repo.pull()
git_repo = git.cmd.Git( warpx_dir )
git_repo.pull()
# Copy WarpX/GNUmakefile to current directory and recompile
# ------------------
source_dir_base = os.environ['AUTOMATED_PERF_TESTS']
warpx_dir = source_dir_base + '/warpx/'
picsar_dir = source_dir_base + '/picsar/'
amrex_dir = source_dir_base + '/amrex/'
res_dir_base = os.environ['SCRATCH'] + '/performance_warpx/'
perf_logs_repo = source_dir_base + 'perf_logs/'
# Define dictionaries
# -------------------
compiler_name = {'intel': 'intel', 'gnu': 'gcc', 'pgi':'pgi'}
module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache', 'gpu':''}
csv_file = {'cori':'cori_knl.csv', 'summit':'summit.csv'}
cwd = os.getcwd() + '/'
bin_dir = cwd + 'Bin/'
bin_name = executable_name(compiler, architecture)
log_dir = cwd
perf_database_file = cwd + perf_database_file
day = time.strftime('%d')
month = time.strftime('%m')
year = time.strftime('%Y')
# Initialize tests
# ----------------
if args.mode == 'run':
start_date = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# Set default options for compilation and execution
config_command = get_config_command(compiler, architecture)
# Create main result directory if does not exist
if not os.path.exists(res_dir_base):
os.mkdir(res_dir_base)
# One batch job submitted per n_node. Several
# tests run within the same batch job.
# --------------------------------------------
if args.mode == 'run':
if os.path.exists( 'log_jobids_tmp.txt' ):
os.remove( 'log_jobids_tmp.txt' )
# loop on n_node. One batch script per n_node
for n_node in n_node_list:
res_dir = res_dir_base
res_dir += '_'.join([run_name, compiler, architecture, str(n_node)]) + '/'
runtime_param_list = []
# Deep copy as we change the attribute n_cell of
# each instance of class test_element
test_list_n_node = copy.deepcopy(test_list)
job_time_min = time_min(len(test_list))
batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)
# Loop on tests
for count, current_run in enumerate(test_list_n_node):
current_run.scale_n_cell(n_node)
runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)
runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size)
runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor)
runtime_param_string += ' max_step=' + str( current_run.n_step )
# runtime_param_list.append( runtime_param_string )
run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
batch_string += run_string
batch_string += 'rm -rf plotfiles lab_frame_data diags\n'
submit_job_command = get_submit_job_command()
# Run the simulations.
run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)
os.chdir(cwd)
res_dir += '_'.join([run_name, compiler, architecture, str(n_node)]) + '/'
runtime_param_list = []
# Deep copy as we change the attribute n_cell of
# each instance of class test_element
test_list_n_node = copy.deepcopy(test_list)
job_time_min = time_min(len(test_list))
batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)
# Loop on tests
for count, current_run in enumerate(test_list_n_node):
current_run.scale_n_cell(n_node)
runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)
runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size)
runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor)
runtime_param_string += ' max_step=' + str( current_run.n_step )
# runtime_param_list.append( runtime_param_string )
run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
batch_string += run_string
batch_string += 'rm -rf plotfiles lab_frame_data diags\n'
submit_job_command = get_submit_job_command()
# Run the simulations.
run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)
os.chdir(cwd)
# submit batch for analysis
if os.path.exists( 'read_error.txt' ):
os.remove( 'read_error.txt' )
if os.path.exists( 'read_output.txt' ):
os.remove( 'read_output.txt' )
process_analysis(args.automated, cwd, compiler, architecture, args.n_node_list, start_date)
# read the output file from each test and store timers in
# hdf5 file with pandas format
test_list_n_node = copy.deepcopy(test_list)
job_time_min = time_min(len(test_list))
batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)
# Loop on tests
for count, current_run in enumerate(test_list_n_node):
current_run.scale_n_cell(n_node)
runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)
runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size)
runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor)
runtime_param_string += ' max_step=' + str( current_run.n_step )
# runtime_param_list.append( runtime_param_string )
run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
batch_string += run_string
batch_string += 'rm -rf plotfiles lab_frame_data diags\n'
submit_job_command = get_submit_job_command()
# Run the simulations.
run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)
os.chdir(cwd)
# submit batch for analysis
if os.path.exists( 'read_error.txt' ):
os.remove( 'read_error.txt' )
if os.path.exists( 'read_output.txt' ):
os.remove( 'read_output.txt' )
process_analysis(args.automated, cwd, compiler, architecture, args.n_node_list, start_date)
# read the output file from each test and store timers in
# hdf5 file with pandas format
# -------------------------------------------------------
for n_node in n_node_list:
print(n_node)
if browse_output_files:
rename_archive = True
store_full_input = False
update_perf_log_repo = True
push_on_perf_log_repo = False
pull_3_repos = True
recompile = True
if machine == 'summit':
compiler = 'pgi'
architecture = 'gpu'
# List of tests to perform
# ------------------------
# Each test runs n_repeat times
n_repeat = 2
# test_list is machine-specific
test_list = get_test_list(n_repeat)
# Define directories
# ------------------
source_dir_base = os.environ['AUTOMATED_PERF_TESTS']
warpx_dir = source_dir_base + '/warpx/'
picsar_dir = source_dir_base + '/picsar/'
amrex_dir = source_dir_base + '/amrex/'
res_dir_base = os.environ['SCRATCH'] + '/performance_warpx/'
perf_logs_repo = source_dir_base + 'perf_logs/'
# Define dictionaries
# -------------------
compiler_name = {'intel': 'intel', 'gnu': 'gcc', 'pgi':'pgi'}
module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache', 'gpu':''}
csv_file = {'cori':'cori_knl.csv', 'summit':'summit.csv'}
cwd = os.getcwd() + '/'
runtime_param_string += ' max_step=' + str( current_run.n_step )
# runtime_param_list.append( runtime_param_string )
run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
batch_string += run_string
batch_string += 'rm -rf plotfiles lab_frame_data diags\n'
submit_job_command = get_submit_job_command()
# Run the simulations.
run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)
os.chdir(cwd)
# submit batch for analysis
if os.path.exists( 'read_error.txt' ):
os.remove( 'read_error.txt' )
if os.path.exists( 'read_output.txt' ):
os.remove( 'read_output.txt' )
process_analysis(args.automated, cwd, compiler, architecture, args.n_node_list, start_date)
# read the output file from each test and store timers in
# hdf5 file with pandas format
# -------------------------------------------------------
for n_node in n_node_list:
print(n_node)
if browse_output_files:
res_dir = res_dir_base
res_dir += '_'.join([run_name, compiler,\
architecture, str(n_node)]) + '/'
for count, current_run in enumerate(test_list):
# Read performance data from the output file
output_filename = 'out_' + '_'.join([current_run.input_file, str(n_node), str(current_run.n_mpi_per_node), str(current_run.n_omp), str(count)]) + '.txt'
# Read data for all test to put in hdf5 a database
# This is an hdf5 file containing ALL the simulation
# parameters and results. Might be too large for a repo
# Loop over the tests and run all simulations:
# One batch job submitted per n_node. Several
# tests run within the same batch job.
# --------------------------------------------
if args.mode == 'run':
if os.path.exists( 'log_jobids_tmp.txt' ):
os.remove( 'log_jobids_tmp.txt' )
# loop on n_node. One batch script per n_node
for n_node in n_node_list:
res_dir = res_dir_base
res_dir += '_'.join([run_name, compiler, architecture, str(n_node)]) + '/'
runtime_param_list = []
# Deep copy as we change the attribute n_cell of
# each instance of class test_element
test_list_n_node = copy.deepcopy(test_list)
job_time_min = time_min(len(test_list))
batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node)
# Loop on tests
for count, current_run in enumerate(test_list_n_node):
current_run.scale_n_cell(n_node)
runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell)
runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size)
runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor)
runtime_param_string += ' max_step=' + str( current_run.n_step )
# runtime_param_list.append( runtime_param_string )
run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
batch_string += run_string
batch_string += 'rm -rf plotfiles lab_frame_data diags\n'
submit_job_command = get_submit_job_command()
# Run the simulations.
run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)