Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@typemap
def element_divide(left, right, name=''):
'''
The output of this operation is the element-wise division of the two input
tensors. It supports broadcasting.
Example:
>>> C.element_divide([1., 1., 1., 1.], [0.5, 0.25, 0.125, 0.]).eval()
array([ 2., 4., 8., 0.], dtype=float32)
>>> C.element_divide([5., 10., 15., 30.], [2.]).eval()
array([ 2.5, 5. , 7.5, 15. ], dtype=float32)
Args:
left: left side tensor
right: right side tensor
name (str, optional): the name of the Function instance in the network
@typemap
def learning_rate_schedule(lr, unit, epoch_size=None):
'''
Create a learning rate schedule (using the same semantics as
:func:`training_parameter_schedule`).
Args:
lr (float or list): see parameter ``schedule`` in
:func:`training_parameter_schedule`.
unit (:class:`UnitType`): see parameter
``unit`` in :func:`training_parameter_schedule`.
epoch_size (int): see parameter ``epoch_size`` in
:func:`training_parameter_schedule`.
Returns:
learning rate schedule
@typemap
def param_relu(alpha, x, name=''):
'''
Parametric rectified linear operation. Computes the element-wise parameteric rectified linear
of ``x``: ``max(x, 0)`` for ``x >= 0`` and ``x``: ``alpha*x`` otherwise.
The output tensor has the same shape as ``x``.
Example:
>>> alpha = C.constant(value=[[0.5, 0.5, 0.5, 0.5, 0.5]])
>>> C.param_relu(alpha, [[-1, -0.5, 0, 1, 2]]).eval()
array([[-0.5 , -0.25, 0. , 1. , 2. ]], dtype=float32)
Args:
alpha (:class:`~cntk.variables.Parameter`): same shape as x
x (`numpy.array` or :class:`~cntk.ops.functions.Function`): any :class:`~cntk.ops.functions.Function` that outputs a tensor.
name (`str`, default to ''): the name of the Function instance in the network
@typemap
def backward(self, state, root_gradients, variables, as_numpy=True):
'''
Backpropagates supplied ``root_gradients`` for one or more of the output
variables of the Function, to calculate gradients with respect to
``variables``. Formally, multiplies the values of ``root_gradients`` by
the Jacobian of the Function and returns the subset of the output that
corresponds to ``variables``.
Example:
>>> # compute the value and the derivative of the sigmoid at 0
>>> v = C.input_variable(shape=(1,), needs_gradient=True)
>>> f = C.sigmoid(v)
>>> df, fv = f.forward({v:[[0]]}, [f.output], set([f.output]))
>>> value = list(fv.values())[0]
>>> grad = f.backward(df, {f.output: np.ones_like(value)}, set([v]))
>>> value
@typemap
def block_arguments_mapping(self):
'''
Returns the mapping from the arguments of the composite underlying this block function
to the Variables that they are bound to in the outer graph of Functions that this
block Function is part of.
'''
return super(Function, self).block_arguments_mapping()
@typemap
def mpi_communicator():
'''
Creates a non quantized MPI communicator.
'''
return cntk_py.mpicommunicator()
@typemap
def argmax(x, axis=None, name=''):
'''
Computes the argmax of the input tensor's elements across the specified axis.
If no axis is specified, it will return the flatten index of the largest element
in tensor x.
Example:
>>> # create 3x2 matrix in a sequence of length 1 in a batch of one sample
>>> data = [[10, 20],[30, 40],[50, 60]]
>>> C.argmax(data, 0).eval()
array([[ 2., 2.]], dtype=float32)
>>> C.argmax(data, 1).eval()
array([[ 1.],
[ 1.],
@typemap
def combine(*operands, **kw_name):
'''
Create a new Function instance which just combines the outputs of the specified list of
'operands' Functions such that the 'Outputs' of the new 'Function' are union of the
'Outputs' of each of the specified 'operands' Functions. E.g., when creating a classification
model, typically the CrossEntropy loss Function and the ClassificationError Function comprise
the two roots of the computation graph which can be combined to create a single Function
with 2 outputs; viz. CrossEntropy loss and ClassificationError output.
Example:
>>> in1 = C.input_variable((4,))
>>> in2 = C.input_variable((4,))
>>> in1_data = np.asarray([[1., 2., 3., 4.]], np.float32)
>>> in2_data = np.asarray([[0., 5., -3., 2.]], np.float32)
# If name is not a member of Function or Variable, first look for
# a user-named item in the graph.
# (Known member names cannot be overridden by user-named items,
# to ensure that the API functions.)
if not hasattr(Variable, name) and not hasattr(Function, name) \
and not name.startswith('_') and name not in ['outputs', 'output', 'this']:
# lookup of a named object inside the graph
# When 'self' is a BlockFunction (e.g. a named layer), then we only search in there,
# while when 'self' is a regular node (e.g. a named output using Label),
# we search the composite, which may return multiple hits with the same name.
# In case of multiple matches, we fail.
# BUGBUG: That is a problem if, e.g., someone used a layer (=BlockFunction) twice
# and then looks it up by name, as that will fail although both instances are identical.
from cntk.logging.graph import find_by_name
root = self.block_root if self.is_block else self
item = typemap(find_by_name)(root, name, depth=1)
if item:
return item
# If something is not found in Function, look it up in its output
# variable, if it has only one.
if name.startswith('_') or name in ['outputs', 'output', 'this']:
# These should not be looked up in self's output.
# 'outputs' and 'output' are required to fetch the attribute for
# in the Variable.
# 'this' is required for Swig and needs to be thrown if the
# object is created the first time.
raise AttributeError("neither Function nor its output variable"
" has '%s'"%name)
# access an API member of 'output', such as .shape()
outputs = self.__getattribute__('outputs')
@typemap
def as_parameter(self):
'''
Converts this instance into a :class:`Parameter`
'''
if not self.is_parameter:
raise TypeError('cannot be converted into a Parameter')
return cntk_py.Parameter(self)