Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# for every turn_t calculate matching vector
for turn_t, t_turn_length, turn_t_sent in zip(list_turn_t, list_turn_length, list_turn_t_sent):
Hu = tf.nn.embedding_lookup(word_embeddings, turn_t) # [batch, max_turn_len, emb_size]
if self.is_positional and self.stack_num > 0:
with tf.variable_scope('positional', reuse=True):
Hu = op.positional_encoding_vector(Hu, max_timescale=10)
with tf.variable_scope('expand_cont_embeddings'):
Hu = tf.concat([turn_t_sent, Hu], axis=1)
Hu_stack = [Hu]
for index in range(self.stack_num):
with tf.variable_scope('self_stack_' + str(index), reuse=True):
Hu = layers.block(
Hu, Hu, Hu,
Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot')
Hu_stack.append(Hu)
r_a_t_stack = []
t_a_r_stack = []
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot')
Hu_stack.append(Hu)
r_a_t_stack = []
t_a_r_stack = []
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
with tf.variable_scope('r_attend_t_' + str(index)):
try:
r_a_t = layers.block(
Hr_stack[index], Hu_stack[index], Hu_stack[index],
Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
r_a_t = layers.block(
Hr_stack[index], Hu_stack[index], Hu_stack[index],
Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot')
t_a_r_stack.append(t_a_r)
r_a_t_stack.append(r_a_t)
list_turn_t = tf.unstack(self.utterance_ph, axis=1)
list_turn_length = tf.unstack(self.all_utterance_len_ph, axis=1)
sim_turns = []
# for every turn_t calculate matching vector
for turn_t, t_turn_length in zip(list_turn_t, list_turn_length):
Hu = tf.nn.embedding_lookup(word_embeddings, turn_t) # [batch, max_turn_len, emb_size]
if self.is_positional and self.stack_num > 0:
with tf.variable_scope('positional', reuse=True):
Hu = op.positional_encoding_vector(Hu, max_timescale=10)
Hu_stack = [Hu]
for index in range(self.stack_num):
with tf.variable_scope('self_stack_' + str(index), reuse=True):
Hu = layers.block(
Hu, Hu, Hu,
Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot')
Hu_stack.append(Hu)
r_a_t_stack = []
t_a_r_stack = []
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
for index in range(self.stack_num):
with tf.variable_scope('self_stack_' + str(index), reuse=True):
Hu = layers.block(
Hu, Hu, Hu,
Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot')
Hu_stack.append(Hu)
r_a_t_stack = []
t_a_r_stack = []
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
with tf.variable_scope('r_attend_t_' + str(index)):
try:
r_a_t = layers.block(
Hr_stack[index], Hu_stack[index], Hu_stack[index],
Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
r_a_t = layers.block(
# for every turn_t calculate matching vector
for turn_t, t_turn_length, turn_t_sent in zip(list_turn_t, list_turn_length, list_turn_t_sent):
Hu = tf.nn.embedding_lookup(word_embeddings, turn_t) # [batch, max_turn_len, emb_size]
if self.is_positional and self.stack_num > 0:
with tf.variable_scope('positional', reuse=True):
Hu = op.positional_encoding_vector(Hu, max_timescale=10)
with tf.variable_scope('expand_cont_embeddings'):
Hu = tf.concat([turn_t_sent, Hu], axis=1)
Hu_stack = [Hu]
for index in range(self.stack_num):
with tf.variable_scope('self_stack_' + str(index), reuse=True):
Hu = layers.block(
Hu, Hu, Hu,
Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot')
Hu_stack.append(Hu)
r_a_t_stack = []
t_a_r_stack = []
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
for index in range(self.stack_num + 1):
with tf.variable_scope('t_attend_r_' + str(index)):
try:
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
t_a_r = layers.block(
Hu_stack[index], Hr_stack[index], Hr_stack[index],
Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot')
with tf.variable_scope('r_attend_t_' + str(index)):
try:
r_a_t = layers.block(
Hr_stack[index], Hu_stack[index], Hu_stack[index],
Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot')
except ValueError:
tf.get_variable_scope().reuse_variables()
r_a_t = layers.block(
Hr_stack[index], Hu_stack[index], Hu_stack[index],
Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot')
t_a_r_stack.append(t_a_r)
r_a_t_stack.append(r_a_t)
t_a_r_stack.extend(Hu_stack)
r_a_t_stack.extend(Hr_stack)
t_a_r = tf.stack(t_a_r_stack, axis=-1)
r_a_t = tf.stack(r_a_t_stack, axis=-1)
with tf.variable_scope('embedding_lookup'):
response_embeddings = tf.nn.embedding_lookup(word_embeddings, self.response_ph)
Hr = response_embeddings
if self.is_positional and self.stack_num > 0:
with tf.variable_scope('positional'):
Hr = op.positional_encoding_vector(Hr, max_timescale=10)
with tf.variable_scope('expand_resp_embeddings'):
Hr = tf.concat([sent_embedder_response, Hr], axis=1)
Hr_stack = [Hr]
for index in range(self.stack_num):
with tf.variable_scope('self_stack_' + str(index)):
Hr = layers.block(
Hr, Hr, Hr,
Q_lengths=self.response_len_ph, K_lengths=self.response_len_ph, attention_type='dot')
Hr_stack.append(Hr)
# context part
# a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
list_turn_t = tf.unstack(self.utterance_ph, axis=1)
list_turn_length = tf.unstack(self.all_utterance_len_ph, axis=1)
list_turn_t_sent = tf.unstack(sent_embedder_context, axis=1)
sim_turns = []
# for every turn_t calculate matching vector
for turn_t, t_turn_length, turn_t_sent in zip(list_turn_t, list_turn_length, list_turn_t_sent):
Hu = tf.nn.embedding_lookup(word_embeddings, turn_t) # [batch, max_turn_len, emb_size]
if self.is_positional and self.stack_num > 0: