Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def itest_scf(self, lp, fworker, fwp, tmpdir, benchmark_input_scf):
wf = InputFWWorkflow(benchmark_input_scf, task_type=ScfFWTask)
scf_fw_id = wf.fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir))
fw = lp.get_fw_by_id(scf_fw_id)
assert fw.state == "COMPLETED"
# Build the flow
flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
work = flow.register_task(benchmark_input_scf, task_class=abilab.ScfTask)
flow.allocate()
flow.build_and_pickle_dump()
# Run t0, and check status
t0 = work[0]
t0.start_and_wait()
t0.check_status()
def itest_scf_not_converged(self, lp, fworker, fwp, tmpdir, benchmark_input_scf):
old_cwd = os.getcwd()
benchmark_input_scf.set_vars(nstep=4)
wf = InputFWWorkflow(benchmark_input_scf, task_type=ScfFWTask)
scf_fw_id = wf.fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
while lp.run_exists(fworker):
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(scf_fw_id)
assert wf.state == "COMPLETED"
num_restarts_fw = wf.fws[-1].tasks[0].restart_info.num_restarts
# Build the flow
flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
work = flow.register_task(benchmark_input_scf, task_class=abilab.ScfTask)
flow.allocate()
flow.build_and_pickle_dump()
# go to the main dir (to have the abipy configuration files)
os.chdir(old_cwd)
def testing_for_errors():
"""
only for dev. bugfixing
"""
wf = workflow_creator(input_dict,'skopt_gp')
launchpad.add_wf(wf)
rapidfire(launchpad, FWorker(), nlaunches=run_num, sleep_time=0)
gp_max = manageDB.get_optima('f','min')
launchpad.defuse_wf(launchpad.get_fw_ids()[-1])
print (gp_max)
"""
# Run run_num iterations using Skopt Gaussian Processes
wf = workflow_creator(input_dict, 'skopt_gp')
launchpad.add_wf(wf)
rapidfire(launchpad, FWorker(), nlaunches=run_num, sleep_time=0)
gp_best = manageDB.get_optima('f', min_or_max='max')[0]
gp_average = manageDB.get_avg('f')
gp_total = manageDB.get_param('f')
manageDB.clean()
# Run run_num iterations using a dummy optimizer (returns random)
launchpad.defuse_wf(launchpad.get_fw_ids()[-1])
wf = workflow_creator(input_dict, 'dummy')
launchpad.add_wf(wf)
rapidfire(launchpad, FWorker(), nlaunches=run_num, sleep_time=0)
dummy_best = manageDB.get_optima('f', min_or_max='max')[0]
dummy_average = manageDB.get_avg('f')
dummy_total = manageDB.get_param('f')
manageDB.clean()
# Compare the two optimizations graphically
print('GP average: ', gp_average, '\n GP best: ', gp_best)
print('Dummy average: ', dummy_average, '\n Dummy best: ', dummy_best)
iterations = list(range(run_num))
plt.plot(iterations, gp_total, 'go', iterations, dummy_total, 'ro')
plt.show()
if __name__ == "__main__":
# set up the LaunchPad and reset it
launchpad = LaunchPad()
# launchpad.reset('', require_password=False)
fw_A = Firework([TaskA()])
fw_B = Firework([TaskB()])
fw_C = Firework([TaskC()], parents=[fw_A, fw_B])
# assemble Workflow from FireWorks and their connections by id
workflow = Workflow([fw_A, fw_B, fw_C])
# store workflow and launch it locally
launchpad.add_wf(workflow)
rapidfire(launchpad)
from fw_tutorials.dynamic_wf.printjob_task import PrintJobTask
if __name__ == "__main__":
# set up the LaunchPad and reset it
launchpad = LaunchPad()
# launchpad.reset('', require_password=False)
# create the Workflow that passes job info
fw1 = Firework([ScriptTask.from_str('echo "This is the first FireWork"')], spec={"_pass_job_info": True}, fw_id=1)
fw2 = Firework([PrintJobTask()], parents=[fw1], fw_id=2)
wf = Workflow([fw1, fw2])
# store workflow and launch it locally
launchpad.add_wf(wf)
rapidfire(launchpad)
def await_results(self) -> bool:
# Launch all results consecutively
import fireworks.core.rocket_launcher
fireworks.core.rocket_launcher.rapidfire(self.client, strm_lvl="CRITICAL")
return True
# Reset the launchpad and optimization db for this example
launchpad.reset(password=None, require_password=False)
mc.reset(hard=True)
# Configure the optimization db with MissionControl
mc.configure(
wf_creator=wf_creator,
dimensions=x_dim,
acq="maximin",
predictor="GaussianProcessRegressor",
get_z=get_z,
)
# Run 30 workflows + optimization
launchpad.add_wf(wf_creator([100, 45.0, "dolphin fin"]))
rapidfire(launchpad, nlaunches=30)
# Examine and plot the optimization
plt = mc.plot(print_pareto=True)
plt.show()
launchpad = LaunchPad.from_file(args.launchpad_file) if args.launchpad_file else LaunchPad(
strm_lvl=args.loglvl)
if args.fworker_file:
fworker = FWorker.from_file(args.fworker_file)
else:
fworker = FWorker()
# prime addr lookups
_log = get_fw_logger("rlaunch", stream_level="INFO")
_log.info("Hostname/IP lookup (this will take a few seconds)")
get_my_host()
get_my_ip()
if args.command == 'rapidfire':
rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=args.nlaunches,
max_loops=args.max_loops, sleep_time=args.sleep, strm_lvl=args.loglvl,
timeout=args.timeout, local_redirect=args.local_redirect)
elif args.command == 'multi':
total_node_list = None
if args.nodefile:
if args.nodefile in os.environ:
args.nodefile = os.environ[args.nodefile]
with open(args.nodefile, 'r') as f:
total_node_list = [line.strip() for line in f.readlines()]
launch_multiprocess(launchpad, fworker, args.loglvl, args.nlaunches, args.num_jobs,
args.sleep, total_node_list, args.ppn, timeout=args.timeout,
exclude_current_node=args.exclude_current_node,
local_redirect=args.local_redirect)
else:
launch_rocket(launchpad, fworker, args.fw_id, args.loglvl, pdb_on_exception=args.pdb)
sub_nproc (int): number of processors of the sub job
timeout (int): # of seconds after which to stop the rapidfire process
local_redirect (bool): redirect standard input and output to local file
"""
ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD)
ds.connect()
launchpad = ds.LaunchPad()
FWData().DATASERVER = ds
FWData().MULTIPROCESSING = True
FWData().NODE_LIST = node_list
FWData().SUB_NPROCS = sub_nproc
FWData().Running_IDs = running_ids_dict
sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
l_dir = launchpad.get_logdir() if launchpad else None
l_logger = get_fw_logger('rocket.launcher', l_dir=l_dir, stream_level=loglvl)
rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=nlaunches,
max_loops=-1, sleep_time=sleep, strm_lvl=loglvl, timeout=timeout,
local_redirect=local_redirect)
while nlaunches == 0:
time.sleep(1.5) # wait for LaunchPad to be initialized
launch_ids = FWData().Running_IDs.values()
live_ids = list(set(launch_ids) - {None})
if len(live_ids) > 0:
# Some other sub jobs are still running
log_multi(l_logger, 'Sleeping for {} secs before resubmit sub job'.format(sleep_time))
time.sleep(sleep_time)
log_multi(l_logger, 'Resubmit sub job')
rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=nlaunches,
max_loops=-1, sleep_time=sleep, strm_lvl=loglvl, timeout=timeout,
local_redirect=local_redirect)
else:
break