Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test():
print 'cpuCount() = %d\n' % cpuCount()
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
pool = Pool(PROCESSES)
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
def run_multiple_backtest(self, initial_portf, start_time,
end_time, policies,
loglevel=logging.WARNING, parallel=True):
"""Backtest multiple policies.
"""
def _run_backtest(policy):
return self.run_backtest(initial_portf, start_time, end_time,
policy, loglevel=loglevel)
num_workers = min(multiprocess.cpu_count(), len(policies))
if parallel:
workers = multiprocess.Pool(num_workers)
results = workers.map(_run_backtest, policies)
workers.close()
return results
else:
return list(map(_run_backtest, policies))
`fn` should take a `music21.stream.Score` and return a `[(FileName, [String]|None)]` where
each element represents an extracted univariate sequence of discrete tokens from the
score.
* `music21` is used to get Bach chorales using BWV numbering system
* Each chorale is processed using `fn`
* The output is written to `${SCRATCH_DIR}/${FileName}.{txt,utf}
* `utf_to_txt.json` is a dictionary mapping UTF8 symbols to plain text
Existing files are overwritten because the vocabulary can change between runs.
"""
# used for encoding/decoding tokens to UTF8 symbols
plain_text_data = []
vocabulary = set() # remember all unique (note,duration) tuples seen
p = mp.Pool(processes=mp.cpu_count())
processed_scores = p.map(lambda score: list(fn(score)), corpus.chorales.Iterator(
numberingSystem='bwv',
returnType='stream'))
for processed_score in processed_scores:
for fname, pairs_text in processed_score:
if pairs_text:
plain_text_data.append((fname, pairs_text))
vocabulary.update(set(pairs_text))
# construct vocab <=> UTF8 mapping
pairs_to_utf = dict(map(lambda x: (x[1], unichr(x[0])), enumerate(vocabulary)))
utf_to_txt = {utf:txt for txt,utf in pairs_to_utf.items()}
utf_to_txt[START_DELIM] = 'START'
utf_to_txt[END_DELIM] = 'END'
# save outputs
Parameters
----------
policies : list of StochProblemMS_Policy
num_runs : int
seed : int
outfile : string (name of output file)
ref_pol : string (name of refernece policy)
"""
assert(len(policies) > 0)
from multiprocess import Pool,cpu_count,Process
if not num_procs:
num_procs = cpu_count()
if not outfile:
outfile = 'evaluation.csv'
csvfile = open(outfile,'wb')
writer = csv.writer(csvfile)
np.random.seed(seed)
print('Evaluating policies with %d processes' %num_procs)
# Eval
self.policies = policies
self.samples = [self.sample_W(self.T-1) for j in range(num_sims)]
if num_procs > 1:
pool = Pool(num_procs)
def __init__(self,
model,
parameters,
features=None,
uncertainty_calculations=None,
save_figures=True,
output_dir_figures="figures/",
figureformat=".png",
save_data=True,
output_dir_data="data/",
verbose_level="info",
verbose_filename=None,
create_PCE_custom=None,
CPUs=mp.cpu_count(),
suppress_model_graphics=True,
M=3,
nr_pc_samples=None,
nr_mc_samples=10*3,
nr_pc_mc_samples=10*5,
seed=None,
allow_incomplete=False):
if uncertainty_calculations is None:
self._uncertainty_calculations = UncertaintyCalculations(
model=model,
parameters=parameters,
features=features,
CPUs=CPUs,
suppress_model_graphics=suppress_model_graphics,
"""
def _solve_problem(problem):
"""Solve a problem and then return the optimal value, status,
primal values, and dual values.
"""
opt_value = problem.solve(solver=solver,
ignore_dcp=ignore_dcp,
warm_start=warm_start,
verbose=verbose,
parallel=False, **kwargs)
status = problem.status
primal_values = [var.value for var in problem.variables()]
dual_values = [constr.dual_value for constr in problem.constraints]
return SolveResult(opt_value, status, primal_values, dual_values)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
solve_results = pool.map(_solve_problem, self._separable_problems)
pool.close()
pool.join()
statuses = {solve_result.status for solve_result in solve_results}
# Check if at least one subproblem is infeasible or inaccurate
for status in s.INF_OR_UNB:
if status in statuses:
self._handle_no_solution(status)
break
else:
for subproblem, solve_result in zip(self._separable_problems,
solve_results):
for var, primal_value in zip(subproblem.variables(),
solve_result.primal_values):
var.save_value(primal_value)
for constr, dual_value in zip(subproblem.constraints,
def main():
parser = command.options()
parser.add_option('--cores',dest='cores',type=int,default=cpu_count()-1,help='number of cores to run in parallel (default: %default)')
(opts,args) = parser.parse_args()
if not opts.dataset:
parser.print_help()
raise SystemExit
print("reading %s..." % opts.dataset)
(users, items) = dataset.read_users_and_items(opts.dataset, opts.sep, opts.skipfl)
print("loaded %d users" % len(users))
print("loaded %d items" % len(items))
topitems = dataset.top_items(items)
print("do not use these top items %s" % str(topitems))
# Set the inertia function
try:
w_sta, w_fin = tuple(self._w)
self._update_w = lambda i: (w_fin - (w_fin - w_sta)) * (
i / self._max_iter
) # noqa: E731
except TypeError:
self._update_w = lambda i: self._w # noqa: E731
self._n_jobs = kwargs.get("n_jobs", 1)
if self._n_jobs == -1:
self._n_jobs = None
elif self._n_jobs == -2:
self._n_jobs = multiprocess.cpu_count() - 1
self._seed = kwargs.get("seed", None)
random.seed(self._seed)