Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def C(img, activation_fn, scope='Classifier'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
c_1 = conv2d(img, conv_info[0], is_train, activation_fn, name='c_1_conv')
c_1 = slim.dropout(c_1, keep_prob=0.5, is_training=is_train, scope='c_1_conv/')
log.info('{} {}'.format(scope.name, c_1))
c_2 = conv2d(c_1, conv_info[1], is_train, activation_fn, name='c_2_conv')
c_2 = slim.dropout(c_2, keep_prob=0.5, is_training=is_train, scope='c_2_conv/')
log.info('{} {}'.format(scope.name, c_2))
c_3 = conv2d(c_2, conv_info[2], is_train, activation_fn, name='c_3_conv')
c_3 = slim.dropout(c_3, keep_prob=0.5, is_training=is_train, scope='c_3_conv/')
log.info('{} {}'.format(scope.name, c_3))
c_4 = fc(tf.reshape(c_3, [self.batch_size, -1]), 16*n, is_train, activation_fn, name='c_4_fc')
log.info('{} {}'.format(scope.name, c_4))
c_5 = fc(tf.reshape(c_4, [self.batch_size, -1]), 4*n, is_train, activation_fn, name='c_5_fc')
log.info('{} {}'.format(scope.name, c_5))
c_6 = fc(c_5, n, is_train, activation_fn, name='c_6_fc')
log.info('{} {}'.format(scope.name, c_6))
assert c_6.get_shape().as_list() == [self.batch_size, n], c_6.get_shape().as_list()
return [c_1, c_2, c_3, c_4, c_5, c_6]
def C(img, q, scope='Classifier'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
fc_1 = fc(conv_q, 256, name='fc_1')
fc_2 = fc(fc_1, 256, name='fc_2')
fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
return fc_3
def g_theta(o_i, o_j, q, scope='g_theta', reuse=True):
with tf.variable_scope(scope, reuse=reuse) as scope:
if not reuse: log.warn(scope.name)
g_1 = fc(tf.concat([o_i, o_j, q], axis=1), 256, name='g_1')
g_2 = fc(g_1, 256, name='g_2')
g_3 = fc(g_2, 256, name='g_3')
g_4 = fc(g_3, 256, name='g_4')
return g_4
def C(img, activation_fn, scope='Classifier'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
c_1 = conv2d(img, conv_info[0], is_train, activation_fn, name='c_1_conv')
c_1 = slim.dropout(c_1, keep_prob=0.5, is_training=is_train, scope='c_1_conv/')
log.info('{} {}'.format(scope.name, c_1))
c_2 = conv2d(c_1, conv_info[1], is_train, activation_fn, name='c_2_conv')
c_2 = slim.dropout(c_2, keep_prob=0.5, is_training=is_train, scope='c_2_conv/')
log.info('{} {}'.format(scope.name, c_2))
c_3 = conv2d(c_2, conv_info[2], is_train, activation_fn, name='c_3_conv')
c_3 = slim.dropout(c_3, keep_prob=0.5, is_training=is_train, scope='c_3_conv/')
log.info('{} {}'.format(scope.name, c_3))
c_4 = fc(tf.reshape(c_3, [self.batch_size, -1]), 16*n, is_train, activation_fn, name='c_4_fc')
log.info('{} {}'.format(scope.name, c_4))
c_5 = fc(tf.reshape(c_4, [self.batch_size, -1]), 4*n, is_train, activation_fn, name='c_5_fc')
log.info('{} {}'.format(scope.name, c_5))
c_6 = fc(c_5, n, is_train, activation_fn, name='c_6_fc')
log.info('{} {}'.format(scope.name, c_6))
assert c_6.get_shape().as_list() == [self.batch_size, n], c_6.get_shape().as_list()
return [c_1, c_2, c_3, c_4, c_5, c_6]
def C(img, q, scope='Classifier'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
fc_1 = fc(conv_q, 256, name='fc_1')
fc_2 = fc(fc_1, 256, name='fc_2')
fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
return fc_3
def f_phi(g, scope='f_phi'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
fc_1 = fc(g, 256, name='fc_1')
fc_2 = fc(fc_1, 256, name='fc_2')
fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
return fc_3