Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def make_rst_table(data, header=None, indent=True):
"""Make rst table with :py:mod:`Texttable`.
Args:
data (list): data frame to be printed
header (list): column header names
indent (bool): indent table for rst
Returns:
rst-formatted table
"""
if data is None:
return ""
else:
tab_tt = tt.Texttable()
tab_tt.set_precision(2)
if not header is None:
data[0] = header
w = [len(c) + 2 for c in data[0]]
for r in data:
for i in range(0, len(r)):
w[i] = max(w[i], len(r[i]) + 2)
tab_tt.add_rows(data)
tab_tt.set_cols_width(w)
tab_tt.set_cols_align("r" * len(data[0]))
if indent:
return _indent_texttable_for_rst(tab_tt)
else:
return tab_tt.draw()
tasks that are in one of the specified states. By default, all
task states are allowed. The `states` argument should be a
list or a set of `Run.State` values.
Optional third argument `only` further restricts the listing
to tasks that are instances of a subclass of `only`. By
default, there is no restriction and all tasks are listed. The
`only` argument can be a Python class or a tuple -- anything
infact, that you can pass as second argument to the
`isinstance` operator.
:param output: An output stream (file-like object)
:param states: List of states (`Run.State` items) to consider.
:param only: Root class (or tuple of root classes) of tasks to consider.
"""
table = Texttable(0) # max_width=0 => dynamically resize cells
table.set_deco(Texttable.HEADER) # also: .VLINES, .HLINES .BORDER
table.header(['JobID', 'Job name', 'State', 'Info'])
#table.set_cols_width([10, 20, 10, 35])
table.set_cols_align(['l', 'l', 'l', 'l'])
table.add_rows([
(task.persistent_id, task.jobname,
task.execution.state, task.execution.info)
for task in self.session
if isinstance(task, only) and task.execution.in_state(*states)],
header=False)
# XXX: uses texttable's internal implementation detail
if len(table._rows) > 0:
output.write(table.draw())
output.write("\n")
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
text = self.rule.get('summary_prefix', '')
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width())
text_table.header(summary_table_fields_with_count)
# Format all fields as 'text' to avoid long numbers being shown as scientific notation
text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count])
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([str(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.items():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
text += self.rule.get('summary_prefix', '')
def _tabular_str_(self):
# I want to make this PrettyProxy's __sql__, but that doesn't seem to work.
tt = texttable.Texttable()
tt.set_deco(texttable.Texttable.HEADER)
tt.header(self.keys())
for row in self:
tt.add_row(row)
return tt.draw()
)
# Test ShapeSim
cppaAdjTensor, extraData = getMetaPathAdjacencyTensorData(
graph, nodeIndex, ['conference', 'paper', 'paper', 'author']
)
extraData['fromNodes'] = extraData['toNodes']
extraData['fromNodesIndex'] = extraData['toNodesIndex']
shapeSimMostSimilar, similarityScores = findMostSimilarNodes(
cppaAdjTensor, 'Alice', extraData, method=getNumpyShapeSimScore, alpha=1.0
)
# Output similarity scores
for name, mostSimilar in [('NeighborSim', neighborSimMostSimilar), ('ShapeSim', shapeSimMostSimilar)]:
print('\n%s Most Similar to "%s":' % (name, 'Alice'))
mostSimilarTable = texttable.Texttable()
rows = [['Author', 'Score']]
rows += [[name, score] for name, score in mostSimilar]
mostSimilarTable.add_rows(rows)
print(mostSimilarTable.draw())
global dpdk_drivers
if not devices:
get_nic_details()
dpdk_drv = []
for d in devices.keys():
if devices[d].get("Driver_str") in dpdk_drivers:
dpdk_drv.append(d)
if get_macs:
for pci, info in get_info_from_trex(dpdk_drv).items():
if pci not in dpdk_drv: # sanity check, should not happen
print('Internal error while getting MACs of DPDK bound interfaces, unknown PCI: %s' % pci)
return
devices[pci].update(info)
table = texttable.Texttable(max_width=-1)
table.header(['ID', 'NUMA', 'PCI', 'MAC', 'Name', 'Driver', 'Linux IF', 'Active'])
for id, pci in enumerate(sorted(devices.keys())):
custom_row_added = False
d = devices[pci]
if is_napatech(d):
custom_row_added = add_table_entry_napatech(id, d, table)
if not custom_row_added:
table.add_row([id, d['NUMA'], d['Slot_str'], d.get('MAC', ''), d['Device_str'], d.get('Driver_str', ''), d['Interface'], d['Active']])
print(table.draw())
def list(self):
"""List the Drbd volumes and statuses"""
# Set permissions as having been checked, as listing VMs
# does not require permissions
self._get_registered_object('auth').set_permission_asserted()
# Create table and add headers
table = Texttable()
table.set_deco(Texttable.HEADER | Texttable.VLINES)
table.header(('Name', 'Type', 'Location', 'Nodes', 'Shared', 'Free Space'))
# Set column alignment and widths
table.set_cols_width((15, 5, 30, 70, 6, 9))
table.set_cols_align(('l', 'l', 'l', 'l', 'l', 'l'))
for storage_backend in self.get_all():
table.add_row((
storage_backend.name,
storage_backend.storage_type,
storage_backend.get_location(),
', '.join(storage_backend.nodes),
str(storage_backend.shared),
SizeConverter(storage_backend.get_free_space()).to_string()
))
class_recall = get_stat_string(result_dict[actual_class], "Recall")
class_f = get_stat_string(result_dict[actual_class], "F-measure")
if class_prec != 'N/A':
folds_with_class[actual_class] += 1
prec_sum_dict[actual_class] += float(class_prec[:-1])
recall_sum_dict[actual_class] += float(class_recall[:-1])
f_sum_dict[actual_class] += float(class_f[:-1])
result_table.add_row([actual_class] + conf_matrix[i] + [class_prec, class_recall, class_f])
print(result_table.draw(), file=output_file)
print("(row = reference; column = predicted)", file=output_file)
print("Accuracy = {:.1f}%\n".format(fold_score), file=output_file)
score_sum += fold_score
if num_folds > 1:
print("\nAverage:", file=output_file)
result_table = Texttable(max_width=0)
result_table.set_cols_align(["l", "r", "r", "r"])
result_table.add_rows([["Class", "Precision", "Recall", "F-measure"]], header=True)
for actual_class in classes:
if folds_with_class[actual_class]:
result_table.add_row([actual_class] + ["{:.1f}%".format(prec_sum_dict[actual_class] / folds_with_class[actual_class]),
"{:.1f}%".format(recall_sum_dict[actual_class] / folds_with_class[actual_class]),
"{:.1f}%".format(f_sum_dict[actual_class] / folds_with_class[actual_class])])
print(result_table.draw(), file=output_file)
print("Accuracy = {:.1f}%".format(score_sum / num_folds), file=output_file)
def show_rows(cursor, limit):
"""
Show rows generated by `cursor`.
:type cursor: sqlite3.Cursor
"""
tt = texttable.Texttable()
tt.set_deco(texttable.Texttable.HEADER)
rows = itertools.islice(cursor, limit) if limit >= 0 else cursor
try:
row = next(rows)
except StopIteration:
return
try:
tt.header(row.keys())
except AttributeError:
pass
tt.add_row(row)
tt.add_rows(rows, header=False)
print(tt.draw())
if limit >= 0:
try:
next(cursor)