Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
----------
y_true : keras.placeholder
Placeholder that contains the ground truth labels of the classes
y_pred : keras.placeholder
Placeholder that contains the class prediction
Returns
-------
scalar
Dice metric
"""
y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
y_sum = K.sum(y_true * y_pred_decision)
return (2. * y_sum + K.epsilon()) / (K.sum(y_true) + K.sum(y_pred_decision) + K.epsilon())
def dice_cost_3(y_true, y_predicted):
mask_true = K.flatten(y_true[:, :, :, :, 3])#
mask_pred = K.flatten(y_predicted[:, :, :, :, 3])#
num_sum = 2.0 * K.sum(mask_true * mask_pred) + K.epsilon()
den_sum = K.sum(mask_true) + K.sum(mask_pred)+ K.epsilon()
return -num_sum/den_sum
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def call(self, x, mask=None):
if mask is not None:
return K.sum(x * K.clip(mask, 0, 1), axis=1)
else:
return K.sum(x, axis=1)
spatial_probs_old = spatial_probs_old * tf.one_hot(indices=self.space, depth=spatial_probs_old.shape[1])
spatial_probs_old = tf.reduce_sum(spatial_probs_old, axis=1)
action_probs_old = tf.clip_by_value(act_probs_old * spatial_probs_old, 1e-10, 1.0)
'''
act_probs = self.act_probs.output * K.one_hot(actions, self.act_probs.output_shape[1])
act_probs = K.sum(act_probs, axis=1)
spatial_probs = self.spatial_probs.output * K.one_hot(space, self.spatial_probs.output_shape[1])
spatial_probs = K.sum(spatial_probs, axis=1)
action_probs = K.clip(act_probs * spatial_probs, 1e-10, 1.0)
act_probs_old = self.act_probs_old.output * K.one_hot(actions, self.act_probs_old.output_shape[1])
act_probs_old = K.sum(act_probs_old, axis=1)
spatial_probs_old = self.spatial_probs_old.output * K.one_hot(space, self.spatial_probs_old.output_shape[1])
spatial_probs_old = K.sum(spatial_probs_old, axis=1)
action_probs_old = K.clip(act_probs_old * spatial_probs_old, 1e-10, 1.0)
'''
with tf.variable_scope('loss/clip'):
spatial_ratios = tf.exp(tf.log(action_probs)-tf.log(action_probs_old))
clipped_spatial_ratios = tf.clip_by_value(spatial_ratios, clip_value_min=1-clip_value, clip_value_max=1+clip_value)
loss_spatial_clip = tf.minimum(tf.multiply(self.gaes, spatial_ratios), tf.multiply(self.gaes, clipped_spatial_ratios))
loss_spatial_clip = tf.reduce_mean(loss_spatial_clip)
tf.summary.scalar('loss_spatial', loss_spatial_clip)
'''
spatial_ratios = K.exp(K.log(action_probs) - K.log(action_probs_old))
clipped_spatial_ratios = K.clip(spatial_ratios, 1 - self.clip_value, 1 + self.clip_value)
def yoloconfidloss(y_true, y_pred, t):
pobj = K.sigmoid(y_pred)
lo = K.square(y_true-pobj)
value_if_true = lamda_confid_obj*(lo)
value_if_false = lamda_confid_noobj*(lo)
loss1 = tf.select(t, value_if_true, value_if_false)
loss = K.mean(loss1) #,axis=0)
#
ave_anyobj = K.mean(pobj)
obj = tf.select(t, pobj, K.zeros_like(y_pred))
objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
return loss, ave_anyobj, ave_obj
def mix_gaussian_loss(x, mu, log_sig, w):
'''
Combine the mixture of gaussian distribution and the loss into a single function
so that we can do the log sum exp trick for numerical stability...
'''
if K.backend() == "tensorflow":
x.set_shape([None, 1])
gauss = log_norm_pdf(K.repeat_elements(x=x, rep=mu.shape[1], axis=1), mu, log_sig)
# TODO: get rid of clipping.
gauss = K.clip(gauss, -40, 40)
max_gauss = K.maximum((0.), K.max(gauss))
# log sum exp trick...
gauss = gauss - max_gauss
out = K.sum(w * K.exp(gauss), axis=1)
loss = K.mean(-K.log(out) + max_gauss)
return loss
activation=lambda x: K.relu(x) / K.sum(K.relu(x),
axis=-1))(rnn)
scales = kwargs.get("scales", [])
if kernel == "rbf":
x_size = K.shape(x)[0]
y_size = K.shape(y)[0]
dim = K.shape(x)[1]
tiled_x = K.tile(K.reshape(x, K.stack([x_size, 1, dim])), K.stack([1, y_size, 1]))
tiled_y = K.tile(K.reshape(y, K.stack([1, y_size, dim])), K.stack([x_size, 1, 1]))
return K.exp(-K.mean(K.square(tiled_x - tiled_y), axis=2) / K.cast(dim, tf.float32))
elif kernel == 'raphy':
scales = K.variable(value=np.asarray(scales))
squared_dist = K.expand_dims(squared_distance(x, y), 0)
scales = K.expand_dims(K.expand_dims(scales, -1), -1)
weights = K.eval(K.shape(scales)[0])
weights = K.variable(value=np.asarray(weights))
weights = K.expand_dims(K.expand_dims(weights, -1), -1)
return K.sum(weights * K.exp(-squared_dist / (K.pow(scales, 2))), 0)
elif kernel == "multi-scale-rbf":
sigmas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6]
beta = 1. / (2. * (K.expand_dims(sigmas, 1)))
distances = squared_distance(x, y)
s = K.dot(beta, K.reshape(distances, (1, -1)))
return K.reshape(tf.reduce_sum(input_tensor=tf.exp(-s), axis=0), K.shape(distances)) / len(sigmas)
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = k_keras.dot(x, self.W)
x_shape = k_keras.shape(x)
logits = k_keras.reshape(logits, (x_shape[0], x_shape[1]))
ai = k_keras.exp(logits - k_keras.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = k_keras.cast(mask, k_keras.floatx())
ai = ai * mask
att_weights = ai / (k_keras.sum(ai, axis=1, keepdims=True) + k_keras.epsilon())
weighted_input = x * k_keras.expand_dims(att_weights)
result = k_keras.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result