Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
else:
# Allow at most num_active_gens active generator instances
if gen_count >= gen_specs['user'].get('num_active_gens', gen_count+1):
break
# No gen instances in batch mode if workers still working
still_working = ~H['returned']
if alloc_specs['user'].get('batch_mode') and np.any(still_working):
break
# Give gen work
gen_count += 1
if 'in' in gen_specs and len(gen_specs['in']):
gen_work(Work, i, gen_specs['in'], range(len(H)), persis_info[i])
else:
gen_work(Work, i, [], [], persis_info[i])
return Work, persis_info
Work = {}
if 'next_to_give' not in persis_info:
persis_info['next_to_give'] = 0
# If any persistent worker's calculated values have returned, give them back.
for i in avail_worker_ids(W, persistent=True):
if persis_info.get('sample_done') or sum(H['returned']) >= gen_specs['user']['initial_sample_size']:
# Don't return if the initial sample is not complete
persis_info['sample_done'] = True
returned_but_not_given = np.logical_and(H['returned'], ~H['given_back'])
if np.any(returned_but_not_given):
inds_to_give = np.where(returned_but_not_given)[0]
gen_work(Work, i, [n[0] for n in sim_specs['out']] + [n[0] for n in gen_specs['out']],
np.atleast_1d(inds_to_give), persis_info[i], persistent=True)
H['given_back'][inds_to_give] = True
for i in avail_worker_ids(W, persistent=False):
if persis_info['next_to_give'] < len(H):
# perform sim evaluations (if they exist in History).
sim_work(Work, i, sim_specs['in'], np.atleast_1d(persis_info['next_to_give']), persis_info[i])
persis_info['next_to_give'] += 1
elif persis_info.get('gen_started') is None:
# Finally, call a persistent generator as there is nothing else to do.
persis_info['gen_started'] = True
gen_work(Work, i, gen_specs['in'], [], persis_info[i],
persistent=True)
task_avail = ~H['given']
for i in avail_worker_ids(W, persistent=False):
if np.any(task_avail):
# perform sim evaluations (if any point hasn't been given).
sim_subbatches = H['subbatch'][task_avail]
sim_inds = (sim_subbatches == np.min(sim_subbatches))
sim_ids_to_send = np.nonzero(task_avail)[0][sim_inds]
sim_work(Work, i, sim_specs['in'], np.atleast_1d(sim_ids_to_send), [])
task_avail[sim_ids_to_send] = False
elif gen_count == 0:
# Finally, generate points since there is nothing else to do.
gen_count += 1
gen_work(Work, i, gen_specs['in'], [], persis_info[i],
persistent=True)
return Work, persis_info
break
# Don't call APOSMM if there are runs going but none need advancing
if len(persis_info[lw]['run_order']):
runs_needing_to_advance = np.zeros(len(persis_info[lw]['run_order']), dtype=bool)
for run, inds in enumerate(persis_info[lw]['run_order'].values()):
runs_needing_to_advance[run] = H['returned'][inds[-1]]
if not np.any(runs_needing_to_advance):
break
persis_info['last_size'] = len(H)
# Give gen work
persis_info['total_gen_calls'] += 1
gen_count += 1
gen_work(Work, i, gen_specs['in'], range(len(H)), persis_info[lw])
persis_info['last_worker'] = i
return Work, persis_info
# Perform sim evaluations from existing runs
q_inds_logical = np.logical_and(task_avail, H['local_pt'])
if not np.any(q_inds_logical):
q_inds_logical = task_avail
sim_ids_to_send = np.nonzero(q_inds_logical)[0][0] # oldest point
sim_work(Work, i, sim_specs['in'], np.atleast_1d(sim_ids_to_send), [])
task_avail[sim_ids_to_send] = False
elif (gen_count == 0
and not np.any(np.logical_and(W['active'] == EVAL_GEN_TAG,
W['persis_state'] == 0))):
# Finally, generate points since there is nothing else to do
gen_count += 1
gen_work(Work, i, gen_specs['in'], [], persis_info[i])
return Work, persis_info
# if > 1 persistant generator, assign the correct work to it
inds_generated_by_i = (H['gen_worker'] == i)
if np.all(H['returned'][inds_generated_by_i]):
# Has sim_f completed everything from this persistent worker?
# Then give back everything in the last batch
batch_ids = H['batch'][inds_generated_by_i]
last_batch_inds = (batch_ids == np.max(batch_ids))
inds_to_send_back = np.where(np.logical_and(inds_generated_by_i,
last_batch_inds))[0]
if H['batch'][-1] > 0:
n = gen_specs['user']['subbatch_size']*gen_specs['user']['num_subbatches']
k = H['batch'][-1]
H['weight'][(n*(k-1)):(n*k)] = H['weight'][(n*k):(n*(k+1))]
gen_work(Work, i, ['like'], np.atleast_1d(inds_to_send_back),
persis_info[i], persistent=True)
task_avail = ~H['given']
for i in avail_worker_ids(W, persistent=False):
if np.any(task_avail):
# perform sim evaluations (if any point hasn't been given).
sim_subbatches = H['subbatch'][task_avail]
sim_inds = (sim_subbatches == np.min(sim_subbatches))
sim_ids_to_send = np.nonzero(task_avail)[0][sim_inds]
sim_work(Work, i, sim_specs['in'], np.atleast_1d(sim_ids_to_send), [])
task_avail[sim_ids_to_send] = False
elif gen_count == 0:
# Finally, generate points since there is nothing else to do.
# Allow at most num_active_gens active generator instances
if gen_count >= gen_specs['user'].get('num_active_gens', gen_count+1):
break
# No gen instances in batch mode if workers still working
still_working = ~H['returned']
if alloc_specs['user'].get('batch_mode') and np.any(still_working):
break
# Give gen work
gen_count += 1
if 'in' in gen_specs and len(gen_specs['in']):
gen_work(Work, i, gen_specs['in'], range(len(H)), persis_info[i])
else:
gen_work(Work, i, [], [], persis_info[i])
return Work, persis_info
gen_work(Work, i, [n[0] for n in sim_specs['out']] + [n[0] for n in gen_specs['out']],
np.atleast_1d(inds_to_give), persis_info[i], persistent=True)
H['given_back'][inds_to_give] = True
for i in avail_worker_ids(W, persistent=False):
if persis_info['next_to_give'] < len(H):
# perform sim evaluations (if they exist in History).
sim_work(Work, i, sim_specs['in'], np.atleast_1d(persis_info['next_to_give']), persis_info[i])
persis_info['next_to_give'] += 1
elif persis_info.get('gen_started') is None:
# Finally, call a persistent generator as there is nothing else to do.
persis_info['gen_started'] = True
gen_work(Work, i, gen_specs['in'], [], persis_info[i],
persistent=True)
return Work, persis_info
# Don't call APOSMM if there are runs going but none need advancing
if len(persis_info[lw]['run_order']):
runs_needing_to_advance = np.zeros(len(persis_info[lw]['run_order']), dtype=bool)
for run, inds in enumerate(persis_info[lw]['run_order'].values()):
runs_needing_to_advance[run] = np.all(H['returned'][inds])
if not np.any(runs_needing_to_advance):
break
persis_info['last_size'] = len(H)
# Give gen work
persis_info['total_gen_calls'] += 1
gen_count += 1
i, idle_workers = idle_workers[0], idle_workers[1:]
gen_work(Work, i, gen_specs['in'], range(len(H)), persis_info[lw])
persis_info['last_worker'] = i
elif gen_count >= gen_specs['user'].get('num_active_gens', gen_count+1):
idle_workers = []
return Work, persis_info
if 'done' in persis_info[i]:
H['num_active_runs'][persis_info[i]['run_order']] -= 1
if 'x_opt' in persis_info[i]:
opt_ind = np.all(H['x'] == persis_info[i]['x_opt'], axis=1)
assert sum(opt_ind) == 1, "There must be just one optimum"
H['local_min'][opt_ind] = True
persis_info[i] = {'rand_stream': persis_info[i]['rand_stream']}
# If i is idle, but in persistent mode, and its calculated values have
# returned, give them back to i. Otherwise, give nothing to i
for i in avail_worker_ids(W, persistent=True):
gen_inds = (H['gen_worker'] == i)
if np.all(H['returned'][gen_inds]):
last_time_pos = np.argmax(H['given_time'][gen_inds])
last_ind = np.nonzero(gen_inds)[0][last_time_pos]
gen_work(Work, i,
sim_specs['in'] + [n[0] for n in sim_specs['out']],
np.atleast_1d(last_ind), persis_info[i], persistent=True)
persis_info[i]['run_order'].append(last_ind)
for i in avail_worker_ids(W, persistent=False):
# Find candidates to start local opt runs if a sample has been evaluated
if np.any(np.logical_and(~H['local_pt'], H['returned'])):
n, _, _, _, r_k, mu, nu = initialize_APOSMM(H, gen_specs)
update_history_dist(H, n, gen_specs['user'], c_flag=False)
starting_inds = decide_where_to_start_localopt(H, r_k, mu, nu)
else:
starting_inds = []
# Start persistent generator for local opt run unless it would use all workers
if starting_inds and gen_count + 1 < len(W):
# Start at the best possible starting point