Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_top_and_bottom_with_groupby_and_mask(self, dtype, seed):
permute = partial(permute_rows, seed)
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
shape = (8, 8)
# Shuffle the input rows to verify that we correctly pick out the top
# values independently of order.
factor_data = permute(arange(0, 64, dtype=dtype).reshape(shape))
classifier_data = permuted_array([[0, 0, 1, 1, 2, 2, 0, 0],
[0, 0, 1, 1, 2, 2, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
f = self.f
(leaf.dshape.measure,))))
(chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,
chunk=chunk)
inds = tuple(range(ndim(leaf)))
dtype = expr.dshape.measure.to_numpy_dtype()
tmp = atop(
curry(compute_it, chunk_expr, [chunk], **kwargs),
inds,
data,
inds,
dtype=dtype,
)
return atop(
compose(
curry(compute_it, agg_expr, [agg], **kwargs),
curry(_concatenate2, axes=expr.axis),
),
tuple(i for i in inds if i not in expr.axis),
tmp,
inds,
dtype=dtype,
)
"""Check if the type of each value is the same of the given type.
Parameters
----------
values : list or tuple
t : type
Returns
-------
tuple
"""
return (isinstance(x, t) for x in values)
any_of = toolz.compose(any, is_one_of)
all_of = toolz.compose(all, is_one_of)
def promote_list(val: Union[V, List[V]]) -> List[V]:
"""Ensure that the value is a list.
Parameters
----------
val : list or object
Returns
-------
list
"""
if not isinstance(val, list):
val = [val]
return val
"nll": "Validation/Loss",
"mca": "Validation/MCA",
"pixacc": "Validation/Pixel_Acc",
},
),
)
def _select_max(pred_tensor):
return pred_tensor.max(1)[1]
def _tensor_to_numpy(pred_tensor):
return pred_tensor.squeeze().cpu().numpy()
transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes), _tensor_to_numpy)
transform_pred = compose(transform_func, _select_max)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"),
)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED,
create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func),
)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED,
create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred),
)
def snapshot_function():
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
table_day_to_session = compose(
self._calendar.minute_to_session_label,
partial(Timestamp, unit='s', tz='UTC'),
)
asset_first_day = table_day_to_session(table['day'][0])
asset_last_day = table_day_to_session(table['day'][-1])
asset_sessions = sessions[
sessions.slice_indexer(asset_first_day, asset_last_day)
]
assert len(table) == len(asset_sessions), (
'Got {} rows for daily bars table with first day={}, last '
'day={}, expected {} rows.\n'
'Missing sessions: {}\n'
'Extra sessions: {}'.format(
len(table),
asset_first_day.date(),
import os
import random
import warnings
from builtins import FileNotFoundError
from collections import defaultdict
from itertools import filterfalse
import numpy as np
import torch
from PIL import Image
from toolz import compose, take, curry
from toolz import pipe
from torchvision.datasets.utils import iterable_to_str, verify_str_arg
from torchvision.datasets.vision import VisionDataset
_open_to_array = compose(np.array, Image.open)
class DataNotSplitException(Exception):
pass
@curry
def _pad_right_and_bottom(pad_size, numpy_array, pad_value=255):
assert (
len(numpy_array.shape) == 2
), f"_pad_right_and_bottom only accepts 2D arrays. Input is {len(numpy_array.shape)}D"
return np.pad(numpy_array, pad_width=[(0, pad_size), (0, pad_size)], constant_values=pad_value)
def _get_classes_and_counts(mask_list):
class_counts_dict = defaultdict(int)
func = partial(combine or aggregate, axis=axis, keepdims=True)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(
func,
x,
split_every,
True,
dtype=dtype,
name=(name or funcname(combine or aggregate)) + "-partial",
reduced_meta=reduced_meta,
)
func = partial(aggregate, axis=axis, keepdims=keepdims)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
return partial_reduce(
func,
x,
split_every,
keepdims=keepdims,
dtype=dtype,
name=(name or funcname(aggregate)) + "-aggregate",
reduced_meta=reduced_meta,
)
def words():
yield instructions.LOAD_CONST(compose(
pprint,
partial(sorted, key=op.attrgetter('name')),
dict.values,
))
yield instructions.LOAD_CONST(globals)
yield instructions.CALL_FUNCTION(0)
yield instructions.CALL_FUNCTION(1)
yield instructions.POP_TOP()
yield next_instruction()
metrics_dict={
"mIoU": "Validation/mIoU",
"nll": "Validation/Loss",
"mca": "Validation/MCA",
"pixacc": "Validation/Pixel_Acc",
},
),
)
def _select_max(pred_tensor):
return pred_tensor.max(1)[1]
def _tensor_to_numpy(pred_tensor):
return pred_tensor.squeeze().cpu().numpy()
transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes), _tensor_to_numpy)
transform_pred = compose(transform_func, _select_max)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"),
)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED,
create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func),
)
evaluator.add_event_handler(
Events.EPOCH_COMPLETED,
create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred),
)
from unification import var
from kanren import eq
from kanren.core import lall
from kanren.graph import applyo
from kanren.constraints import neq
from etuples import etuple, etuplize
from ...theano.meta import mt
mt.nlinalg.qr_full = mt(QRFull("reduced"))
owner_inputs = attrgetter("owner.inputs")
normal_get_size = toolz.compose(itemgetter(2), owner_inputs)
normal_get_rng = toolz.compose(itemgetter(3), owner_inputs)
def update_name_suffix(x, old_x, suffix): # pragma: no cover
new_name = old_x.name + suffix
x.name = new_name
return x
def normal_normal_regression(Y, X, beta, Y_args_tail=None, beta_args=None):
"""Create a goal for a normal-normal regression of the form `Y ~ N(X * beta, sd**2)`."""
Y_args_tail = Y_args_tail or var()
beta_args = beta_args or var()
Y_args, Y_mean_lv = var(), var()