Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def expit(x):
"""Compute 1 / (1 + exp(-x))."""
return asarray(tf.math.sigmoid(x.data))
dtype: Optional. The type of the output array. If None, defaults to the
dtype of `a` unless `a` is an integer type with precision less than `int`
in which case the output type is `int.`
Returns:
An ndarray with the same number of elements as `a`. If `axis` is None, the
output is a 1-d array, else it has the same shape as `a`.
"""
a = array_creation.asarray(a, dtype=dtype)
if dtype is None and tf.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
output_type = np_promote_types(a.dtype, int)
if output_type != a.dtype:
a = array_creation.asarray(a, dtype=output_type)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
def _scalar(x, tf_fn):
"""Computes the tf_fn(x) for each element in `x`.
Args:
x: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
tf_fn: function that takes a single Tensor argument.
Returns:
An ndarray with the same shape as `x`. The default output dtype is
determined by `dtypes.default_float_type`, unless x is an ndarray with a
floating point type, in which case the output type is same as x.dtype.
"""
x = array_creation.asarray(x)
if x.dtype not in (np.float16, np.float32, np.float64):
x = x.astype(dtypes.default_float_type())
return utils.tensor_to_ndarray(tf_fn(x.data))
def _bin_op(tf_fun, a, b, promote=True):
if promote:
a, b = promote_args_types(a, b)
else:
a = array_creation.asarray(a)
b = array_creation.asarray(b)
return utils.tensor_to_ndarray(tf_fun(a.data, b.data))
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).
"""
x = asarray(x)
return asarray(tf.nn.pool(input=x, window_shape=pool_size, pooling_type="MAX",
strides=strides, padding=padding))
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
repeats: 0-d or 1-d array_like. The number of times each element along
`axis` will be repeated. If this has size 1, each element along the axis
is repeated the same number of times.
axis: Optional. The axis along which to repeat. If None, the input array
is flattened.
Returns:
An ndarray with same type as `a`.
Raises:
ValueError: If `repeats` has rank > 1 or an incompatible shape.
"""
a = array_creation.asarray(a)
repeats = array_creation.asarray(repeats)
if repeats.ndim > 1:
raise ValueError('repeats must be a scalar or 1-d array.')
repeats = ravel(repeats) # Convert to 1-d array.
# As per documentation, if axis is None, the input is flattened
# and a flattened output is returned.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += a.ndim
# Broadcast repeats to match shape of axis.
if len(repeats) == 1:
repeats = utils.tensor_to_ndarray(tf.tile(repeats.data, [a.shape[axis]]))
if a.shape[axis] != len(repeats):
def transpose(a, axes=None):
"""Permutes dimensions of the array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axes: array_like. A list of ints with length rank(a) or None specifying the
order of permutation. The i'th dimension of the output array corresponds
to axes[i]'th dimension of the `a`. If None, the axes are reversed.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
if axes is not None:
axes = array_creation.asarray(axes)
return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes))
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Args:
a: array_like. Input array.
axis1: int. First axis.
axis2: int. Second axis.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
# TODO(wangpeng): handling partial shapes with unknown ranks
n = len(a.shape)
if not (-n <= axis1 and axis1 < n):
raise ValueError('axis1 must be in range [-%s, %s); got %s' % (n, n, axis1))
if not (-n <= axis2 and axis2 < n):
raise ValueError('axis2 must be in range [-%s, %s); got %s' % (n, n, axis2))
if axis1 < 0:
axis1 += n
if axis2 < 0:
axis2 += n
perm = list(range(n))
perm[axis1] = axis2
perm[axis2] = axis1
return transpose(a, perm)
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
Args:
x: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(x), rank(x))`.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
The reduced tensor.
"""
return asarray(tf.math.reduce_logsumexp(input_tensor=x.data, axis=axis,
keepdims=keepdims))