Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
result = \"[%s, %s]\" % (values, prediction)
print u\"%s\\t%s\" % (result, count)
for line in sys.stdin:
values, prediction = line.strip().split('\\t')
if previous is None:
previous = (values, prediction)
if values != previous[0]:
print_result(previous[0], previous[1], count)
previous = (values, prediction)
count = 0
count += 1
if count > 0:
print_result(previous[0], previous[1], count)
"""
out.write(utf8(output))
out.flush()
out.write(utf8(TRIVIAL_MODEL))
if any("," in name and name.split(",")[2] in ["A", "M"] for \
name in model_names):
out.write(utf8(SEASONAL_CODE))
trends = [name.split(",")[1] for name in model_names if "," in name]
trends.extend([name for name in model_names if "," not in name])
trends = set(trends)
models_function = []
for trend in trends:
models_function.append("\"%s\": _%s_forecast" % (trend, trend))
out.write(utf8(SUBMODELS_CODE[trend]))
out.write(utf8(u"\n\nMODELS = \\\n"))
out.write(utf8("%s%s%s" % \
(u" {", u",\n ".join(models_function), u"}")))
out.write(utf8(FORECAST_FUNCTION))
"""
if value is None:
return ""
impurity_literal = ""
if impurity is not None and impurity > 0:
impurity_literal = "; impurity: %.2f%%" % (round(impurity, 4))
objective_type = self.fields[tree.objective_id]['optype']
if objective_type == 'numeric':
return u" [Error: %s]" % value
else:
return u" [Confidence: %.2f%%%s]" % ((round(value, 4) * 100),
impurity_literal)
distribution = self.get_data_distribution()
out.write(utf8(u"Data distribution:\n"))
print_distribution(distribution, out=out)
out.write(utf8(u"\n\n"))
groups = self.group_prediction()
predictions = self.get_prediction_distribution(groups)
out.write(utf8(u"Predicted distribution:\n"))
print_distribution(predictions, out=out)
out.write(utf8(u"\n\n"))
if self.field_importance:
out.write(utf8(u"Field importance:\n"))
print_importance(self, out=out)
extract_common_path(groups)
self.fields[field_id]['name'],
value)))
connector = ", "
out.write(u"\n\n")
out.write(u"Distance distribution:\n\n")
for centroid in centroids_list:
centroid.print_statistics(out=out)
out.write(u"\n")
if len(self.centroids) > 1:
out.write(u"Intercentroid distance:\n\n")
centroids_list = (centroids_list[1:] if self.cluster_global else
centroids_list)
for centroid in centroids_list:
out.write(utf8(u"%sTo centroid: %s\n" % (INDENT,
centroid.name)))
for measure, result in self.centroids_distance(centroid):
out.write(u"%s%s: %s\n" % (INDENT * 2, measure, result))
out.write(u"\n")
details = groups[group]['details']
path = Path(groups[group]['total'][0])
data_per_group = groups[group]['total'][1] * 1.0 / tree.count
pred_per_group = groups[group]['total'][2] * 1.0 / tree.count
out.write(utf8(u"\n\n%s : (data %.2f%% / prediction %.2f%%) %s" %
(group,
round(data_per_group, 4) * 100,
round(pred_per_group, 4) * 100,
path.to_rules(self.fields, format=format))))
if len(details) == 0:
out.write(utf8(u"\n The model will never predict this"
u" class\n"))
elif len(details) == 1:
subgroup = details[0]
out.write(utf8(u"%s\n" % confidence_error(
subgroup[2], impurity=subgroup[3])))
else:
out.write(utf8(u"\n"))
for j in range(0, len(details)):
subgroup = details[j]
pred_per_sgroup = subgroup[1] * 1.0 / \
groups[group]['total'][2]
path = Path(subgroup[0])
path_chain = path.to_rules(self.fields, format=format) if \
path.predicates else "(root node)"
out.write(utf8(u" · %.2f%%: %s%s\n" %
(round(pred_per_sgroup, 4) * 100,
path_chain,
confidence_error(subgroup[2],
impurity=subgroup[3]))))
def print_distribution(distribution, out=sys.stdout):
"""Prints distribution data
"""
total = reduce(lambda x, y: x + y,
[group[1] for group in distribution])
for group in distribution:
out.write(utf8(
u" %s: %.2f%% (%d instance%s)\n" % (
group[0],
round(group[1] * 1.0 / total, 4) * 100,
group[1],
u"" if group[1] == 1 else u"s")))
if objective_type == 'numeric':
return u" [Error: %s]" % value
else:
return u" [Confidence: %.2f%%%s]" % ((round(value, 4) * 100),
impurity_literal)
distribution = self.get_data_distribution()
out.write(utf8(u"Data distribution:\n"))
print_distribution(distribution, out=out)
out.write(utf8(u"\n\n"))
groups = self.group_prediction()
predictions = self.get_prediction_distribution(groups)
out.write(utf8(u"Predicted distribution:\n"))
print_distribution(predictions, out=out)
out.write(utf8(u"\n\n"))
if self.field_importance:
out.write(utf8(u"Field importance:\n"))
print_importance(self, out=out)
extract_common_path(groups)
out.write(utf8(u"\n\nRules summary:"))
for group in [x[0] for x in predictions]:
details = groups[group]['details']
path = Path(groups[group]['total'][0])
data_per_group = groups[group]['total'][1] * 1.0 / tree.count
pred_per_group = groups[group]['total'][2] * 1.0 / tree.count
out.write(utf8(u"\n\n"))
groups = self.group_prediction()
predictions = self.get_prediction_distribution(groups)
out.write(utf8(u"Predicted distribution:\n"))
print_distribution(predictions, out=out)
out.write(utf8(u"\n\n"))
if self.field_importance:
out.write(utf8(u"Field importance:\n"))
print_importance(self, out=out)
extract_common_path(groups)
out.write(utf8(u"\n\nRules summary:"))
for group in [x[0] for x in predictions]:
details = groups[group]['details']
path = Path(groups[group]['total'][0])
data_per_group = groups[group]['total'][1] * 1.0 / tree.count
pred_per_group = groups[group]['total'][2] * 1.0 / tree.count
out.write(utf8(u"\n\n%s : (data %.2f%% / prediction %.2f%%) %s" %
(group,
round(data_per_group, 4) * 100,
round(pred_per_group, 4) * 100,
path.to_rules(self.fields, format=format))))
if len(details) == 0:
out.write(utf8(u"\n The model will never predict this"
u" class\n"))
elif len(details) == 1: