Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print("Scoring Generation {}".format(self.generation))
# Score population
scores = self.score_all()
results = list(zip(scores, range(len(scores))))
results.sort(key=itemgetter(0), reverse=True)
# Report
if self.print_output:
print("Generation", self.generation, "| Best Score:", results[0][0], repr(self.population[results[0][
1]])) # prints best result
# Write the data
# Note: if using this for analysis, for reproducability it may be useful to
# pass type(opponent) for each of the opponents. This will allow verification of results post run
row = [self.generation, mean(scores), pstdev(scores), results[0][0],
repr(self.population[results[0][1]])]
self.outputer.write_row(row)
# Next Population
indices_to_keep = [p for (s, p) in results[0: self.bottleneck]]
self.subset_population(indices_to_keep)
# Add mutants of the best players
best_mutants = [p.copy() for p in self.population]
for p in best_mutants:
p.mutate()
self.population.append(p)
# Add random variants
random_params = [self.params_class(**self.params_kwargs)
for _ in range(self.bottleneck // 2)]
params_to_modify = [params.copy() for params in self.population]
}
for feature in self.feature_statistics:
if settings.VERBOSE:
print('Analyzing {} feature statistics'.format(feature))
series = []
for sf in self.sound_files:
series += sf.analysis['series'][feature]
if len(series) == 0:
continue
self.feature_statistics[feature]['min'] = min(series)
self.feature_statistics[feature]['max'] = max(series)
self.feature_statistics[feature]['mean'] = statistics.mean(series)
self.feature_statistics[feature]['standard_deviation'] = statistics.pstdev(series)
if settings.VERBOSE:
pprint.pprint(self.feature_statistics)
return self.feature_statistics
def add_stats(all_before, all_after, entity_names, colors):
avg_before = statistics.mean(all_before)
avg_after = statistics.mean(all_after)
all_before.append(avg_before)
all_after.append(avg_after)
entity_names.append("(AVG)")
colors.append("b")
median_before = statistics.median(all_before)
median_after = statistics.median(all_after)
all_before.append(median_before)
all_after.append(median_after)
entity_names.append("(MEDIAN)")
colors.append("y")
stdev_before = statistics.pstdev(all_before,avg_before)
stdev_after = statistics.pstdev(all_after,avg_after)
all_before.append(stdev_before)
all_after.append(stdev_after)
entity_names.append("(STDEV)")
colors.append("c")
with open(os.path.join(workpath, "Benchmark.txt")) as f:
for line in f.readlines():
w.append(float(line))
click.secho(
"\nThe results of the benchmark are (all speeds in items/sec) : \n",
bold=True)
click.secho(
"\nTest = '{0}' Iterations = '{1}'\n".format(test, n_runs),
bold=True)
click.secho(
"\nMean : {0} Median : {1} Std Dev : {2}\n".format(
statistics.mean(w),
statistics.median(w),
statistics.pstdev(w)),
bold=True)
if upload_result:
codespeedinfo.uploadresult(test, w)
os.remove(os.path.join(workpath, "Benchmark.txt"))
def sharpRatio(
_returns: DataStruct,
_factor: int = 252,
_risk_free: float = 0.0,
_fund_index: str = 'fund'
) -> float:
fund = _returns[_fund_index]
tmp_list = [
a / b - 1.0 - _risk_free for a, b in zip(
fund[1:], fund[:-1]
)
]
return statistics.mean(
tmp_list
) / statistics.pstdev(
tmp_list
) * math.sqrt(_factor)
def _addOne(self, _data_struct: DataStruct):
index_value = _data_struct.index()[0]
price_value = _data_struct[self.use_key][0]
if self.last_price is not None:
chg_rate = price_value / self.last_price - 1
self.buf.append(chg_rate)
buf_std = statistics.pstdev(self.buf)
if buf_std != 0:
self.data.addDict({
self.idx_key: index_value,
self.ret_key: statistics.mean(self.buf) / buf_std,
})
self.last_price = price_value
table_data = []
file_data = {}
for d in data:
d_plain = [i[0] for i in d['datapoints'] if i[0] is not None]
d_timestamps = [i[1] for i in d['datapoints'] if i[0] is not None]
d_duration = args.to_ts - args.from_ts
d_len = len(d_plain)
if d_len < 5:
logging.warning('Very low number of datapoints returned for %s: %s' % (d['target'], d_len))
if len(d_plain) > 0:
d_min = min(d_plain)
d_max = max(d_plain)
d_mean = statistics.mean(d_plain)
d_median = statistics.median(d_plain)
d_integral = scipy.integrate.simps(d_plain, d_timestamps) / d_duration
d_pstdev = statistics.pstdev(d_plain)
d_pvariance = statistics.pvariance(d_plain)
d_hist = get_hist(d_plain)
else:
d_min = 0
d_max = 0
d_mean = 0
d_median = 0
d_integral = 0
d_pstdev = 0
d_pvariance = 0
d_hist = {(0, 0): 0}
table_row_data = [d_min, d_max, d_mean, d_median, d_integral, d_pstdev, d_pvariance, d_hist, d_duration, d_len]
file_row = [d['target']] + table_row_data
table_row = [d['target']] + reformat_number_list(table_row_data)
table_data.append(table_row)
file_data[d['target']] = {table_header[i]:file_row[i] for i in range(len(table_header))}
def pstdev(text):
"""
Finds the population standard deviation of a space-separated list of numbers.
Example::
/pstdev 33 54 43 65 43 62
"""
return format_output(statistics.pstdev(parse_numeric_list(text)))
def bollingerBandsPDiff(self, series, n=20, k=2):
if len(series) >= n:
close = series[-1]
period = series[len(series) - n: len(series)]
sman = sum(period) / n
periodStd = statistics.pstdev(period)
upperBand = sman + periodStd * k
lowerBand = sman - periodStd * k
pDiffCloseUpperBand = ((upperBand - close) / close) * 100
pDiffCloseLowerBand = ((lowerBand - close) / close) * 100
pDiffSmaAbsBand = ((upperBand - sman) / sman) * 100
return pDiffCloseUpperBand, pDiffCloseLowerBand, pDiffSmaAbsBand
else:
return None, None, None