How to use the summit.process_analysis function in summit

To help you get started, we’ve selected a few summit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ECP-WarpX / WarpX / Tools / performance_tests / run_automated.py View on Github external
runtime_param_string += ' max_step=' + str( current_run.n_step )
            # runtime_param_list.append( runtime_param_string )
            run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string)
            batch_string += run_string
            batch_string += 'rm -rf plotfiles lab_frame_data diags\n'

        submit_job_command = get_submit_job_command()
        # Run the simulations.
        run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command)
    os.chdir(cwd)
    # submit batch for analysis
    if os.path.exists( 'read_error.txt' ):
        os.remove( 'read_error.txt' )
    if os.path.exists( 'read_output.txt' ):
        os.remove( 'read_output.txt' )
    process_analysis(args.automated, cwd, compiler, architecture, args.n_node_list, start_date)

# read the output file from each test and store timers in
# hdf5 file with pandas format
# -------------------------------------------------------
for n_node in n_node_list:
    print(n_node)
    if browse_output_files:
        res_dir = res_dir_base
        res_dir += '_'.join([run_name, compiler,\
                             architecture, str(n_node)]) + '/'
        for count, current_run in enumerate(test_list):
            # Read performance data from the output file
            output_filename = 'out_' + '_'.join([current_run.input_file, str(n_node), str(current_run.n_mpi_per_node), str(current_run.n_omp), str(count)]) + '.txt'
            # Read data for all test to put in hdf5 a database
            # This is an hdf5 file containing ALL the simulation
            # parameters and results. Might be too large for a repo