Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
with tf.variable_scope("embedding"):
A = VariableEmbedder(params, wd=wd, initializer=initializer, name='A')
Aq = A(q, name='Aq') # [N, S, J, d]
Ax = A(x, name='Ax') # [N, S, J, d]
with tf.name_scope("encoding"):
encoder = PositionEncoder(J, d)
u = encoder(Aq, q_mask) # [N, d]
m = encoder(Ax, x_mask) # [N, M, d]
with tf.variable_scope("networks"):
m_mask = tf.reduce_max(tf.cast(x_mask, 'int64'), 2, name='m_mask') # [N, M]
gate_mask = tf.expand_dims(m_mask, -1)
m_length = tf.reduce_sum(m_mask, 1, name='m_length') # [N]
prev_u = tf.tile(tf.expand_dims(u, 1), [1, M, 1]) # [N, M, d]
reg_layer = VectorReductionLayer(N, M, d) if use_vector_gate else ReductionLayer(N, M, d)
gate_size = d if use_vector_gate else 1
h = None # [N, M, d]
as_, rfs, rbs = [], [], []
hs = []
for layer_idx in range(L):
with tf.name_scope("layer_{}".format(layer_idx)):
dr_prev_u = tf.nn.dropout(prev_u, 0.7) if params.use_dropout else prev_u
u_t = tf.tanh(linear([dr_prev_u, m], d, True, wd=wd, scope='u_t'))
a = tf.cast(gate_mask, 'float') * tf.sigmoid(linear([dr_prev_u * m], gate_size, True, initializer=initializer, wd=wd, scope='a') - att_forget_bias)
h = reg_layer(u_t, a, 1.0-a, scope='h')
if layer_idx + 1 < L:
if params.use_reset:
rf, rb = tf.split(2, 2, tf.cast(gate_mask, 'float') *
tf.sigmoid(linear([dr_prev_u * m], 2 * gate_size, True, initializer=initializer, wd=wd, scope='r')))
else: