Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if ((asksort == book['askp']) and
(bidsort == book['bidp']) and
(len(set(asksort)) == len(asksort)) and
(len(set(bidsort)) == len(bidsort)) and
(bidsort[0] < asksort[0])):
msg += 'triplicate book'
triplicate = 1
tally['triple'] += 1
break
else:
msg += 'triplicate book error - '
if triplicate == 0:
# check last list and return best last price with message
try:
book = literal(mode([str(i) for i in book_list]))
asksort = sorted(book['askp'])
bidsort = sorted(book['bidp'], reverse=True)
if 0:
if (asksort != book['askp']):
print('asksort')
if (bidsort != book['bidp']):
print('bidsort')
if (len(set(asksort)) != len(asksort)):
print('askmatch')
if (len(set(bidsort)) != len(bidsort)):
print('bidmatch')
if (bidsort[0] > asksort[0]):
print('mismatched')
if ((asksort == book['askp']) and
(bidsort == book['bidp']) and
(len(set(asksort)) == len(asksort)) and
def mode(lst):
return statistics.mode(lst)
currency_id,
currency_precision,
) = rpc_lookup_asset_symbols(rpc, cache)
# prepare for statistical mode of cache items
asset_ids.append(asset_id)
account_ids.append(account_id)
currency_ids.append(currency_id)
asset_precisions.append(asset_precision)
currency_precisions.append(currency_precision)
# mode of cache
if len(asset_ids) > 4:
try:
cache["begin"] = int(time())
cache["asset_id"] = mode(asset_ids)
cache["account_id"] = mode(account_ids)
cache["currency_id"] = mode(currency_ids)
cache["asset_precision"] = mode(asset_precisions)
cache["currency_precision"] = mode(currency_precisions)
enableTrace(False)
print_market(storage, cache)
winnow(storage, "whitelist", node)
break
except BaseException:
winnow(storage, "blacklist", node)
continue
except Exception as error:
print(trace(error))
continue
return storage, cache
account_id = (inquire(lookup_accounts))[0][1]
wwc()
ret = inquire(lookup_asset_symbols)
asset_id = ret[0]['id']
asset_precision = ret[0]['precision']
currency_id = ret[1]['id']
currency_precision = ret[1]['precision']
account_ids.append(account_id)
asset_ids.append(asset_id)
currency_ids.append(currency_id)
asset_precisions.append(asset_precision)
currency_precisions.append(currency_precision)
account_id = mode(account_ids)
asset_id = mode(asset_ids)
currency_id = mode(currency_ids)
asset_precision = mode(asset_precisions)
currency_precision = mode(currency_precisions)
websocket.enableTrace(False)
print_market()
model_type = model_fields.get("type", None)
if model_type:
for filter_name in filter_field_names:
field_value = model_fields.get(filter_name, None)
if field_value:
model_filter_fields.setdefault(model_type, {})
model_filter_fields[model_type].setdefault(
filter_name, []
)
model_filter_fields[model_type][filter_name].append(
field_value)
for model_type, filter_fields in model_filter_fields.items():
for filter_name, filter_values in filter_fields.items():
try:
mode = statistics.mode(filter_values)
except statistics.StatisticsError as exception:
if "no unique mode" in str(exception):
mode = filter_values[0]
model_filter_fields[model_type][filter_name] = mode
optimised_metric_names = ["ELBO", "ENRE", "KL_z"]
if common_comparison_fields.get("type") != "VAE(G)":
optimised_metric_names.append("KL_y")
optimised_metric_symbols = {
"ELBO": "$\\mathcal{L}$",
"ENRE": "$\\log p(x|z)$",
"KL_z": "KL$_z(q||p)$",
"KL_y": "KL$_y(q||p)$",
}
supervised_clustering_metric_names = [
asset_balance = mode(asset_balance[-(l - 2):])
try:
last = mode(last)
except:
try:
last = mode(last[-(l - 1):])
except:
last = mode(last[-(l - 2):])
try:
bidp = (mode(bidp))
except:
try:
bidp = (mode(bidp[-(l - 1):]))
except:
bidp = (mode(bidp[-(l - 2):]))
try:
askp = (mode(askp))
except:
try:
askp = (mode(askp[-(l - 1):]))
except:
askp = (mode(askp[-(l - 2):]))
try:
bidv = (mode(bidv))
except:
try:
bidv = (mode(bidv[-(l - 1):]))
except:
bidv = (mode(bidv[-(l - 2):]))
try:
askv = (mode(askv))
asset_precision,
currency_id,
currency_precision,
) = rpc_lookup_asset_symbols(rpc, cache)
# prepare for statistical mode of cache items
asset_ids.append(asset_id)
account_ids.append(account_id)
currency_ids.append(currency_id)
asset_precisions.append(asset_precision)
currency_precisions.append(currency_precision)
# mode of cache
if len(asset_ids) > 4:
try:
cache["begin"] = int(time())
cache["asset_id"] = mode(asset_ids)
cache["account_id"] = mode(account_ids)
cache["currency_id"] = mode(currency_ids)
cache["asset_precision"] = mode(asset_precisions)
cache["currency_precision"] = mode(currency_precisions)
enableTrace(False)
print_market(storage, cache)
winnow(storage, "whitelist", node)
break
except BaseException:
winnow(storage, "blacklist", node)
continue
except Exception as error:
print(trace(error))
continue
return storage, cache
def append_data_statistics(meta_data):
# get data statistics
for char_cnt in meta_data:
data = meta_data[char_cnt]["data"]
audio_len_list = [d["audio_len"] for d in data]
mean_audio_len = mean(audio_len_list)
try:
mode_audio_list = [round(d["audio_len"], 2) for d in data]
mode_audio_len = mode(mode_audio_list)
except StatisticsError:
mode_audio_len = audio_len_list[0]
median_audio_len = median(audio_len_list)
try:
std = stdev(
d["audio_len"] for d in data
)
except StatisticsError:
std = 0
meta_data[char_cnt]["mean"] = mean_audio_len
meta_data[char_cnt]["median"] = median_audio_len
meta_data[char_cnt]["mode"] = mode_audio_len
meta_data[char_cnt]["std"] = std
return meta_data
race_append(doc='EV_log.txt', text=msg)
blacklist = ''
blacklist += "\n\n" + 'last process' + str(time.ctime()) + str(n)
race_append('blacklist.txt', blacklist)
pass
# calculate relative range
rrange = (max(last_list) - min(last_list)) / mean(last_list)
# check last list and return best last price with message
msg = ''
if len(set(last_list)) == 1:
last = last_list[-1]
msg += 'common'
else:
try:
last = mode(last_list)
msg += 'mode'
except:
last = median(last_list)
msg += 'median'
print(str(last_list))
print(str(nodes_used))
# override median or mode with latest if less than 2%
# difference
if rrange < 0.02:
last = last_list[-1]
msg = 'latest (' + msg + ')'
else:
# create blacklist.txt if relative range too wide
print('')
print(time.ctime(), str(last), str(rrange))
print(str(last_list))
def cycle_length_mode(self):
try:
return self._get_statistics_value(statistics.mode)
except statistics.StatisticsError:
return None