Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
rows = []
header = ['Operation', 'Response\nCount', 'RespSize\nBytes',
'MaxObjCnt\nRequest',
'operation\nCount',
'Total execution\ntime (hh:mm:ss)',
'inst/sec', 'runid']
for response_size in response_sizes:
for response_count in response_counts:
# run_single_test(conn, response_count, response_size, pull_sizes)
rows.extend(run_single_test(conn, runid, response_count,
response_size, pull_sizes))
print(' Response results for pywbem version %s runid %s execution time %s'
% (__version__, runid, format_timedelta(test_timer.elapsed_time())))
table = tabulate(rows, headers=header, tablefmt="simple")
print(table)
if verbose:
rows = []
for stat in STATS_LIST:
rows.append(stat)
headers = ['Operation', 'Max Object\ncount', 'Op\nCount', 'inst count',
'Operation\nTime']
table = tabulate(rows, headers=headers)
print(table)
def test_ansi_color_bold_and_fgcolor():
"Regression: set ANSI color and bold face together (issue #65)"
table = [["1", "2", "3"], ["4", "\x1b[1;31m5\x1b[1;m", "6"], ["7", "8", "9"]]
result = tabulate(table, tablefmt="grid")
expected = "\n".join(
[
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | \x1b[1;31m5\x1b[1;m | 6 |",
"+---+---+---+",
"| 7 | 8 | 9 |",
"+---+---+---+",
]
)
assert_equal(result, expected)
if row:
onear_rows.append(row)
onear_rows = sorted(onear_rows, key=lambda row: float(row[1]), reverse=True)
onear_str = tabulate(onear_rows, headers=['Name', 'Score', 'STD (dB)', 'Slope'], tablefmt='orgtbl')
onear_str = onear_str.replace('+', '|').replace('|-', '|:')
inear_rows = []
# oratory1990 and Crinacle in-ear
files = list(glob(os.path.join(ROOT_DIR, 'results', 'oratory1990', 'harman_in-ear_2019v2', '*', '*.csv')))
files += list(glob(os.path.join(ROOT_DIR, 'results', 'crinacle', 'harman_in-ear_2019v2', '*', '*.csv')))
for fp in files:
row = ranking_row(fp, harman_inear, 'inear')
if row:
inear_rows.append(row)
inear_str = sorted(inear_rows, key=lambda row: float(row[1]), reverse=True)
inear_str = tabulate(inear_str, headers=['Name', 'Score', 'STD (dB)', 'Slope', 'Average (dB)'], tablefmt='orgtbl')
inear_str = inear_str.replace('-+-', '-|-').replace('|-', '|:')
s = f'''# Headphone Ranking
Headphones ranked by Harman headphone listener preference scores.
Tables include the preference score (Score), standard deviation of the error (STD), slope of the logarithimc
regression fit of the error (Slope) for both headphone types and average of the absolute error (Average) for in-ear
headphones. STD tells how much the headphone deviates from neutral and slope tells if the headphone is warm (< 0) or
bright (> 0).
Keep in mind that these numbers are calculated with deviations from Harman targets. The linked results use different
levels of bass boost so the slope numbers here won't match the error curves you see in the linked results.
Over-ear table includes headphones measured by oratory1990. In-ear table includes headphones measured by oratory1990
and Crinacale. Measurements from other databases are not included because they are not compatible with measurements,
targets and preference scoring developed by Sean Olive et al.
def _print_roles_info(cls, nodes):
""" Prints table with roles and number of nodes serving each specific role
Args:
nodes: a list of NodeStats
"""
# Report number of nodes and roles running in the cluster
roles_counter = Counter(chain(*[node.roles for node in nodes]))
header = ("ROLE", "COUNT")
table = roles_counter.iteritems()
AppScaleLogger.log("\n" + tabulate(table, headers=header, tablefmt="plain"))
for pr in q.all():
append(pr)
records = drop_empty(records)
if records:
prt_no_format(tabulate(sorted(records[1:], key=lambda x: x[5]), records[0]))
if args.stats:
print '=== STATS ===='
headers, rows = b.progress.stats()
if rows:
prt_no_format(tabulate(rows, headers))
def print_results(result, columns, output, output_format):
"""Print out the results of a query to the specified output and using the
specified output format.
"""
if output_format == 'friendly':
output.write(tabulate(result, columns))
output.write('\n\n')
output.write('Query returned {0} rows.\n\n'.format(len(result)))
elif output_format == 'csv':
for row in result:
output.write(','.join(map(lambda x: str(x).strip(), row)))
output.write('\n')
elif output_format == 'tsv':
for row in result:
output.write('\t'.join(map(lambda x: str(x).strip(), row)))
output.write('\n')
else:
raise click.UsageError('Unknown output format!')
func_list = list(funcs.keys())
table = pd.DataFrame(final)
table = table.reindex(table.mean(1).sort_values().index)
order = np.log(table).mean().sort_values().index
table = table.T
table = table.reindex(order, axis=0)
table = table.reindex(func_list, axis=1)
table = 1000000 * table / (SIZE * NUMBER)
table.index.name = "Bit Gen"
print(table.to_csv(float_format="%0.1f"))
try:
from tabulate import tabulate
perf = table.applymap(lambda v: "{0:0.1f}".format(v))
print(tabulate(perf, headers="keys", tablefmt="rst"))
except ImportError:
pass
table = table.T
rel = table.loc[:, ["NumPy"]].values @ np.ones((1, table.shape[1])) / table
rel.pop("NumPy")
rel = rel.T
rel["Overall"] = np.exp(np.log(rel).mean(1))
rel *= 100
rel = np.round(rel).astype(np.int)
rel.index.name = "Bit Gen"
print(rel.to_csv(float_format="%0d"))
try:
from tabulate import tabulate
print tabulate(host_info, headers=["Host name", "CPU Socket count", "VM count"], tablefmt="psql"), "\n"
print 'retrieving the number of NSX logical switches ....',
ls_count, ls_list, uls_count, uls_list, hwgwls_count, hwgwls_list = ls_state(client_session)
print 'Done'
if args.verbose:
print tabulate(ls_list, headers=["Logical switch name", "Logical switch Id"], tablefmt="psql"), "\n"
print tabulate(uls_list, headers=["Universal Logical switch name", "Logical switch Id"], tablefmt="psql"), "\n"
print tabulate(hwgwls_list, headers=["Logical switches using Hardware Gateway bindings", "Logical switch Id"],
tablefmt="psql"), "\n"
print 'retrieving the number of NSX gateways (ESGs and DLRs) ....',
esg_count, esg_list, dlr_count, dlr_list = edge_state(client_session)
print 'Done'
if args.verbose:
print tabulate(esg_list, headers=["Edge service gw name", "Edge service gw Id"], tablefmt="psql"), "\n"
print tabulate(dlr_list, headers=["Logical router name", "Logical router Id"], tablefmt="psql"), "\n"
edge_feature_list = esg_features_collect(client_session, esg_list)
if args.verbose:
print tabulate(edge_feature_list,
headers=["Edge service gw name", "Edge service gw Id", "Loadbalancer",
"Firewall", "Routing", "IPSec", "L2VPN", "SSL-VPN"], tablefmt="psql"), "\n"
lb_esg = len([edge for edge in edge_feature_list if edge[2] == 'true'])
fw_esg = len([edge for edge in edge_feature_list if edge[3] == 'true'])
rt_esg = len([edge for edge in edge_feature_list if edge[4] == 'true'])
ipsec_esg = len([edge for edge in edge_feature_list if edge[5] == 'true'])
l2vpn_esg = len([edge for edge in edge_feature_list if edge[6] == 'true'])
sslvpn_esg = len([edge for edge in edge_feature_list if edge[7] == 'true'])
nsx_sockets, dfw_sockets = calculate_socket_usage(host_list, host_info)
data = []
for stage in ["INGRESS", "EGRESS"]:
for bind_point in ["PORT", "LAG", "VLAN", "RIF", "SWITCH"]:
crm_stats = countersdb.get_all(countersdb.COUNTERS_DB, 'CRM:ACL_STATS:{0}:{1}'.format(stage, bind_point))
if crm_stats:
for res in ["acl_group", "acl_table"]:
data.append([
stage, bind_point, res,
crm_stats['crm_stats_' + res + "_used"],
crm_stats['crm_stats_' + res + "_available"]
])
print '\n'
print tabulate(data, headers=header, tablefmt="simple", missingval="")
print '\n'
"LOADAVG", "ROLES"
)
table = [
(n.public_ip, n.private_ip,
"{}/{}".format("+" if n.is_initialized else "-",
"+" if n.is_loaded else "-"),
"{:.1f}x{}".format(n.cpu.load, n.cpu.count),
100.0 - n.memory.available_percent,
" ".join('"{}" => {:.1f}'.format(p.mountpoint, p.used_percent) for p in n.disk.partitions),
"{:.1f} {:.1f} {:.1f}".format(
n.loadavg.last_1_min, n.loadavg.last_5_min, n.loadavg.last_15_min),
" ".join(n.roles))
for n in nodes
]
table += [("?", ip, "?", "?", "?", "?", "?", "?") for ip in invisible_nodes]
table_str = tabulate(table, header, tablefmt="plain", floatfmt=".1f")
AppScaleLogger.log(table_str)
AppScaleLogger.log("* I/L means 'Is node Initialized'/'Is node Loaded'")