Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(
tf.floor(offsets / bucket_width), dtype=tf.int32
)
clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(
tf.reduce_sum(input_tensor=one_hots, axis=0),
dtype=tf.float64,
)
Args:
name: As passed to `summary_scope`.
pb: A `summary_pb2.Summary` message.
Returns:
A tensor whose value is `True` on success, or `False` if no summary
was written because no default summary writer was available.
"""
raw_pb = pb.SerializeToString()
summary_scope = (
getattr(tf.summary.experimental, "summary_scope", None)
or tf.summary.summary_scope
)
with summary_scope(name):
return tf.summary.experimental.write_raw_pb(raw_pb, step=0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(
tf.floor(offsets / bucket_width), dtype=tf.int32
)
clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(
tf.reduce_sum(input_tensor=one_hots, axis=0),
dtype=tf.float64,
)
edges = tf.linspace(min_, max_, bucket_count + 1)
# Ensure edges[-1] == max_, which TF's linspace implementation does not
# do, leaving it subject to the whim of floating point rounding error.
edges = tf.concat([edges[:-1], [max_]], 0)
left_edges = edges[:-1]
right_edges = edges[1:]
return tf.transpose(
a=tf.stack([left_edges, right_edges, bucket_counts])
)
"""Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
"""
if bucket_count is None:
bucket_count = DEFAULT_BUCKET_COUNT
with tf.name_scope("buckets"):
tf.debugging.assert_scalar(bucket_count)
tf.debugging.assert_type(bucket_count, tf.int32)
data = tf.reshape(data, shape=[-1]) # flatten
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(
def lazy_tensor():
tf.debugging.assert_rank(data, 3)
tf.debugging.assert_non_negative(max_outputs)
limited_audio = data[:max_outputs]
encode_fn = functools.partial(
audio_ops.encode_wav, sample_rate=sample_rate
)
encoded_audio = tf.map_fn(
encode_fn,
limited_audio,
dtype=tf.string,
name="encode_each_audio",
)
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_audio = tf.cond(
tf.shape(input=encoded_audio)[0] > 0,
lambda: encoded_audio,
lambda: tf.constant([], tf.string),
)
images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(
tf.image.encode_png,
limited_images,
dtype=tf.string,
name="encode_each_image",
)
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_images = tf.cond(
tf.shape(input=encoded_images)[0] > 0,
lambda: encoded_images,
lambda: tf.constant([], tf.string),
)
image_shape = tf.shape(input=images)
dimensions = tf.stack(
[
tf.as_string(image_shape[2], name="width"),
tf.as_string(image_shape[1], name="height"),
],
name="dimensions",
)
return tf.concat([dimensions, encoded_images], axis=0)
lambda: encoded_images,
lambda: tf.constant([], tf.string),
)
image_shape = tf.shape(input=images)
dimensions = tf.stack(
[
tf.as_string(image_shape[2], name="width"),
tf.as_string(image_shape[1], name="height"),
],
name="dimensions",
)
return tf.concat([dimensions, encoded_images], axis=0)
# To ensure that image encoding logic is only executed when summaries
# are written, we pass callable to `tensor` parameter.
return tf.summary.write(
tag=tag, tensor=lazy_tensor, step=step, metadata=summary_metadata
)
lambda: tf.constant([], tf.string),
)
def _buckets(data, bucket_count=None):
"""Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
"""
if bucket_count is None:
bucket_count = DEFAULT_BUCKET_COUNT
with tf.name_scope("buckets"):
tf.debugging.assert_scalar(bucket_count)
tf.debugging.assert_type(bucket_count, tf.int32)
data = tf.reshape(data, shape=[-1]) # flatten
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
audio_ops.encode_wav, sample_rate=sample_rate
)
encoded_audio = tf.map_fn(
encode_fn,
limited_audio,
dtype=tf.string,
name="encode_each_audio",
)
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_audio = tf.cond(
tf.shape(input=encoded_audio)[0] > 0,
lambda: encoded_audio,
lambda: tf.constant([], tf.string),
)
limited_labels = tf.tile([""], tf.shape(input=limited_audio)[:1])
return tf.transpose(a=tf.stack([encoded_audio, limited_labels]))