Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
L = tf.tile(tf.expand_dims(tf.expand_dims(L, 0), 0), [N, d, 1, 1])
sL = tf.tile(tf.expand_dims(tf.expand_dims(sL, 0), 0), [N, d, 1, 1])
logb = tf.log(b + 1e-9) # [N, M, d]
logb = tf.concat(1, [tf.zeros([N, 1, d]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])]) # [N, M, d]
logb = tf.expand_dims(tf.transpose(logb, [0, 2, 1]), -1) # [N, d, M, 1]
left = L * tf.exp(tf.batch_matmul(L, logb * sL)) # [N, d, M, M]
right = a * u_t # [N, M, d]
right = tf.expand_dims(tf.transpose(right, [0, 2, 1]), -1) # [N, d, M, 1]
u = tf.batch_matmul(left, right) # [N, d, M, 1]
u = tf.transpose(tf.squeeze(u, [3]), [0, 2, 1]) # [N, M, d]
print ("L : %s\nsL: %s,\nlogb : %s\na : %s, b : %s, u_t : %s, left : %s, right : %s" % (L,sL, logb, a, b, u_t, left, right))
assert False
return u
class Tower(BaseTower):
def initialize(self):
params = self.params
placeholders = self.placeholders
tensors = self.tensors
variables_dict = self.variables_dict
N, J, V, Q, M = params.batch_size, params.max_sent_size, params.vocab_size, params.max_ques_size, params.mem_size
d = params.hidden_size
L = params.mem_num_layers
att_forget_bias = params.att_forget_bias
use_vector_gate = params.use_vector_gate
wd = params.wd
initializer = tf.random_uniform_initializer(-np.sqrt(3), np.sqrt(3))
with tf.name_scope("placeholders"):
x = tf.placeholder('int32', shape=[N, M, J], name='x')
x_mask = tf.placeholder('bool', shape=[N, M, J], name='x_mask')
q = tf.placeholder('int32', shape=[N, J], name='q')