Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# return FWAction(detours=new_fw)
else:
measure = load_class("mpinterfaces.measurement",
self['measurement'])(cal_objs, **self.get(
"other_params", {}))
job_cmd = None
if self.get("job_cmd", None) is not None:
job_cmd = self.get("job_cmd")
measure.setup()
measure.run(job_cmd=job_cmd)
cal_list = []
for cal in measure.cal_objs:
d = cal.as_dict()
d.update({'que_params': self.get('que_params')})
cal_list.append(d)
return FWAction(update_spec={'cal_objs': cal_list})
fws.append(Firework([VaspWriterTask(), get_custodian_task(spec)], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=100+raman_count))
priority = fw_spec['_priority']
spec = {'task_type': 'VASP db insertion', '_priority': priority, "_pass_job_info": True, '_allow_fizzled_parents': True, '_queueadapter': QA_DB}
fws.append(Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=100+raman_count+1))
connections[100+raman_count] = [100+raman_count+1]
connections[100+raman_count+1] = -1
raman_count += 2
passed_vars = [eigvals, eigvecs, norms]
spec= {'task_type': 'Setup Raman Verification Task', '_priority': priority, "_pass_job_info": True, '_allow_fizzled_parents': False, '_queueadapter': QA_CONTROL}
spec['passed_vars'] = []
fws.append(Firework([SetupRamanVerificationTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-1))
wf.append(Workflow(fws, connections))
return FWAction(additions=wf, stored_data={'passed_vars': passed_vars}, mod_spec=[{'_push': {'passed_vars': passed_vars}}])
if self.store_stdout:
output['stdout'] = stdout
if self.store_stderr:
output['stderr'] = stderr
output['returncode'] = returncodes[-1]
output['all_returncodes'] = returncodes
if self.defuse_bad_rc and sum(returncodes) != 0:
return FWAction(stored_data=output, defuse_children=True)
elif self.fizzle_bad_rc and sum(returncodes) != 0:
raise RuntimeError('ScriptTask fizzled! Return code: {}'.format(returncodes))
return FWAction(stored_data=output)
if len(olabels) > 1:
assert len(olabels) == len(outlist)
for olab, out in zip(olabels, outlist):
for item in out:
mod_spec.append({'_push': {olab: item}})
else:
for out in outlist:
mod_spec.append({'_push': {olabels[0]: out}})
return FWAction(mod_spec=mod_spec)
else:
output_dict = {}
for olab, out in zip(olabels, outlist):
output_dict[olab] = out
return FWAction(update_spec=output_dict)
else:
return FWAction()
priority = fw_spec['_priority']*3
spec = {'task_type': 'VASP db insertion',
'_priority': priority,
'_allow_fizzled_parents': True,
'_queueadapter': QA_DB,
'elastic_constant':"deformed_structure",
'clean_task_doc':True,
'deformation_matrix':d_struct_set.deformations[i].tolist(),
'original_task_id':fw_spec["task_id"]}
fws.append(Firework([VaspToDBTask(), AddElasticDataToDBTask()], spec,
name=get_slug(f + '--' + spec['task_type']),
fw_id=-998+i*10))
connections[-999+i*10] = [-998+i*10]
wf.append(Workflow(fws, connections))
return FWAction(additions=wf)
os.chdir(os.path.join(cwd, folder))
fw_env = fw_spec.get("_fw_env", {})
custodian_params = self.get("custodian_params", {})
# Get the scratch directory
if fw_env.get('scratch_root'):
custodian_params['scratch_dir'] = os.path.expandvars(
fw_env['scratch_root'])
job = VaspJob(["mpirun", "-np", "16",
"/opt/vasp/5.2.12/openmpi_ib/bin/vasp"],
auto_npar=False, copy_magmom=True, suffix=".relax1")
c = Custodian(jobs=[job], gzipped_output=True, **custodian_params)
output = c.run()
return FWAction(stored_data=output)
x1 = fw_spec['input']['x1']
x2 = fw_spec['input']['x2']
# Run black box objective algorithm Branin-Hoo function
pi = 3.14159
a = 1
b = 5.1/(4*(pi**2))
c = 5/pi
r = 6
s = 10
t = 1/(8*pi)
f = a*((x2 - b*(x1**2)+ c*x1 - r)**2) + s*(1-t)*math.cos(x1) + s
f_write = {'output': {'f':f}}
# Modify changes in spec
return FWAction(update_spec=f_write)
#f = self.fireworks.find_one_and_update({'fw_id': fw_id},
# {'$set':
# {'state': 'RUNNING',
# 'updated_on': datetime.datetime.utcnow()
# }
# })
#if f:
self._refresh_wf(fw_id)
# could cause file size problems doing this before checking for FWAction
if 'checkpoint' in offline_data:
m_fw.touch_history(checkpoint=offline_data['checkpoint'])
self._update_fw(m_fw, touch_history=False)
if 'fwaction' in offline_data:
fwaction = FWAction.from_dict(offline_data['fwaction'])
state = offline_data['state']
# start here
m_fw = Firework.from_dict(
self.checkin_fw(m_fw.fw_id, fwaction, state, m_fw.launch_idx))
for s in m_fw.state_history:
if s['state'] == offline_data['state']:
s['created_on'] = reconstitute_dates(offline_data['completed_on'])
#self.launches.find_one_and_update({'launch_id': m_fw.launch_id},
# {'$set':
# {'state_history': m_launch.state_history}
# })
self._update_fw(m_fw, state=offline_data['state'], touch_history=False)
#self.offline_runs.update_one({"launch_id": launch_id},
# {"$set": {"completed": True}})
# update the updated_on
d['state'] = 'FIZZLED'
d['completed_on'] = datetime.utcnow().isoformat()
with zopen(fpath, "wt") as f_out:
f_out.write(json.dumps(d, ensure_ascii=False))
return True
# read in a FWAction from a file, in case the task is not Python and cannot return
# it explicitly
if os.path.exists('FWAction.json'):
m_action = FWAction.from_file('FWAction.json')
elif os.path.exists('FWAction.yaml'):
m_action = FWAction.from_file('FWAction.yaml')
if not m_action:
m_action = FWAction()
# update the global stored data with the data to store and update from this
# particular Task
all_stored_data.update(m_action.stored_data)
all_update_spec.update(m_action.update_spec)
all_mod_spec.extend(m_action.mod_spec)
# update spec for next task as well
my_spec.update(m_action.update_spec)
for mod in m_action.mod_spec:
apply_mod(mod, my_spec)
if lp:
l_logger.log(logging.INFO, "Task completed: %s " % t.fw_name)
if m_action.skip_remaining_tasks:
break