Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if self.split_half:
if idx != len(self.layer_size) - 1:
next_hidden, direct_connect = tf.split(
curr_out, 2 * [layer_size // 2], 1)
else:
direct_connect = curr_out
next_hidden = 0
else:
direct_connect = curr_out
next_hidden = curr_out
final_result.append(direct_connect)
hidden_nn_layers.append(next_hidden)
result = tf.concat(final_result, axis=1)
result = reduce_sum(result, -1, keep_dims=False)
return result
-1),
[0, 2, 1]),
q),
-1)
else:
# 1 * pair * (k or 1)
k = tf.expand_dims(self.kernel, 0)
# batch * pair
kp = reduce_sum(p * q * k, -1)
# p q # b * p * k
return kp
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = tf.concat([embed_list[idx]
for idx in row], axis=1) # batch num_pairs k
q = tf.concat([embed_list[idx]
for idx in col], axis=1)
inner_product = p * q
if self.reduce_sum:
inner_product = reduce_sum(
inner_product, axis=2, keep_dims=True)
return inner_product
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
concated_embeds_value = inputs
square_of_sum = tf.square(reduce_sum(
concated_embeds_value, axis=1, keep_dims=True))
sum_of_square = reduce_sum(
concated_embeds_value * concated_embeds_value, axis=1, keep_dims=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * reduce_sum(cross_term, axis=2, keep_dims=False)
return cross_term
def call(self, seq_value_len_list, mask=None, **kwargs):
if self.supports_masking:
if mask is None:
raise ValueError(
"When supports_masking=True,input must support masking")
uiseq_embed_list = seq_value_len_list
mask = tf.cast(mask,tf.float32)# tf.to_float(mask)
user_behavior_length = reduce_sum(mask, axis=-1, keep_dims=True)
mask = tf.expand_dims(mask, axis=2)
else:
uiseq_embed_list, user_behavior_length = seq_value_len_list
mask = tf.sequence_mask(user_behavior_length,
self.seq_len_max, dtype=tf.float32)
mask = tf.transpose(mask, (0, 2, 1))
embedding_size = uiseq_embed_list.shape[-1]
mask = tf.tile(mask, [1, 1, embedding_size])
uiseq_embed_list *= mask
hist = uiseq_embed_list
if self.mode == "max":
return reduce_max(hist, 1, keep_dims=True)
for r, c in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = tf.concat(row, axis=1)
q = tf.concat(col, axis=1)
inner_product = p * q
bi_interaction = inner_product
attention_temp = tf.nn.relu(tf.nn.bias_add(tf.tensordot(
bi_interaction, self.attention_W, axes=(-1, 0)), self.attention_b))
# Dense(self.attention_factor,'relu',kernel_regularizer=l2(self.l2_reg_w))(bi_interaction)
self.normalized_att_score = softmax(tf.tensordot(
attention_temp, self.projection_h, axes=(-1, 0)), dim=1)
attention_output = reduce_sum(
self.normalized_att_score * bi_interaction, axis=1)
attention_output = self.dropout(attention_output) # training
afm_out = self.tensordot([attention_output, self.projection_p])
return afm_out
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
concated_embeds_value = inputs
square_of_sum = tf.square(reduce_sum(
concated_embeds_value, axis=1, keep_dims=True))
sum_of_square = reduce_sum(
concated_embeds_value * concated_embeds_value, axis=1, keep_dims=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * reduce_sum(cross_term, axis=2, keep_dims=False)
return cross_term
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
concated_embeds_value = inputs
square_of_sum = tf.square(reduce_sum(
concated_embeds_value, axis=1, keep_dims=True))
sum_of_square = reduce_sum(
concated_embeds_value * concated_embeds_value, axis=1, keep_dims=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * reduce_sum(cross_term, axis=2, keep_dims=False)
return cross_term
p = tf.expand_dims(p, 1)
# k k* pair* k
# batch * pair
kp = reduce_sum(
# batch * pair * k
tf.multiply(
# batch * pair * k
tf.transpose(
# batch * k * pair
reduce_sum(
# batch * k * pair * k
tf.multiply(
p, self.kernel),
-1),
[0, 2, 1]),
q),
-1)
else:
# 1 * pair * (k or 1)
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = tf.concat([embed_list[idx]
for idx in row], axis=1) # batch num_pairs k
# Reshape([num_pairs, self.embedding_size])
q = tf.concat([embed_list[idx] for idx in col], axis=1)
# -------------------------
if self.kernel_type == 'mat':
p = tf.expand_dims(p, 1)
# k k* pair* k
# batch * pair
kp = reduce_sum(
# batch * pair * k
tf.multiply(
# batch * pair * k
tf.transpose(
# batch * k * pair
reduce_sum(
# batch * k * pair * k
tf.multiply(