Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
>>> C.greater([41., 42., 43.], [42., 42., 42.]).eval()
array([ 0., 0., 1.], dtype=float32)
>>> C.greater([-1,0,1], [0]).eval()
array([ 0., 0., 1.], dtype=float32)
Args:
left: left side tensor
right: right side tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import greater
dtype = get_data_type(left, right)
left = sanitize_input(left, dtype)
right = sanitize_input(right, dtype)
return greater(left, right, name)
array([ 1., 0., 0.], dtype=float32)
>>> C.less([-1,0,1], [0]).eval()
array([ 1., 0., 0.], dtype=float32)
Args:
left: left side tensor
right: right side tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import less
dtype = get_data_type(left, right)
left = sanitize_input(left, dtype)
right = sanitize_input(right, dtype)
return less(left, right, name)
>>> C.less([41., 42., 43.], [42., 42., 42.]).eval()
array([ 1., 0., 0.], dtype=float32)
>>> C.less([-1,0,1], [0]).eval()
array([ 1., 0., 0.], dtype=float32)
Args:
left: left side tensor
right: right side tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import less
dtype = get_data_type(left, right)
left = sanitize_input(left, dtype)
right = sanitize_input(right, dtype)
return less(left, right, name)
>>> C.reduce_min( x * 1.0, (C.Axis.default_batch_axis(), 1)).eval({x: data}).round(4)
array([[ 1.],
[ 2.]], dtype=float32)
Args:
x: input tensor
axis (int or :class:`~cntk.axis.Axis` or a list of integers or a list of :class:`~cntk.axis.Axis`): axis along which the reduction will be performed
name (str): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
Note that CNTK keeps the shape of the resulting tensors when reducing over multiple static axes.
'''
from cntk.cntk_py import reduce_min
x = sanitize_input(x)
axis = sanitize_axis_list(axis)
return reduce_min(x, axis, name)
:math:`sigmoid(x) = {1 \over {1+\exp(-x)}}`
The output tensor has the same shape as ``x``.
Example:
>>> C.sigmoid([-2, -1., 0., 1., 2.]).eval()
array([ 0.119203, 0.268941, 0.5 , 0.731059, 0.880797], dtype=float32)
Args:
x: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import sigmoid
x = sanitize_input(x)
return sigmoid(x, name)
>>> dest.asarray()
array([[ 2., 2., 2., 2.],
[ 2., 2., 2., 2.],
[ 2., 2., 2., 2.]], dtype=float32)
Args:
ref: class: `~cntk.variables.Constant` or `~cntk.variables.Parameter`.
input: class:`~cntk.ops.functions.Function` that outputs a tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import assign
dtype = get_data_type(input)
operand = sanitize_input(input, dtype)
ref_operand = sanitize_input(ref, dtype)
return assign(ref_operand, operand, name)
[ 13., 15.]]]], dtype=float32)
Args:
operand: pooling input
pooling_type: one of :const:`~cntk.ops.MAX_POOLING` or :const:`~cntk.ops.AVG_POOLING`
pooling_window_shape: dimensions of the pooling window
strides (default 1): strides.
auto_padding (default [False,]): automatic padding flags for each input dimension.
ceil_out_dim (default False): ceiling while computing output size
include_pad(default False): include pad while average pooling
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import pooling
operand = sanitize_input(operand)
pooling_window_shape, strides, auto_padding = sanitize_pooling_args(pooling_window_shape, strides, auto_padding)
return pooling(operand, pooling_type, pooling_window_shape, strides, auto_padding,
ceil_out_dim, include_pad, name)
>>> s = np.array([2, 1, 3, 1], dtype=np.float32).reshape(4,1,1)
>>> n = np.array([7, 1, 3, 1], dtype=np.float32).reshape(4,1,1)
>>> C.ndcg_at_1(score, gain, group).eval({score:s, gain:n, group: g})
array(400.0, dtype=float32)
Args:
output: score of each sample
gain: gain of each sample
group: group of each sample
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import ndcg_at_1
dtype = get_data_type(output, gain, group)
output = sanitize_input(output, dtype)
gain = sanitize_input(gain, dtype)
group = sanitize_input(group, dtype)
return ndcg_at_1(output, gain, group, name)
[ 19.]
[ 21.]]]
Args:
left: left side tensor
right: right side matrix or vector
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import times_transpose
dtype = get_data_type(left, right)
left = sanitize_input(left, dtype)
rshape = sanitize_shape(right.shape)
right = sanitize_input(right, dtype, (1,rshape[0]) if len(rshape) == 1 else None)
return times_transpose(right, left, 1, name)
The output tensor has the same shape as ``x``.
Example:
>>> np.round(C.cosh([[1,0.5],[-0.25,-0.75]]).eval(),5)
array([[ 1.54308, 1.12763],
[ 1.03141, 1.29468]], dtype=float32)
Args:
x: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
'''
from cntk.cntk_py import cosh
x = sanitize_input(x)
return cosh(x, name)