Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# conv2 = lrelu(conv2d(conv1, spectural_normed=self.sn, iter=self.iter_power,
# output_dim=128, k_w=3, k_h=3, d_h=1, d_w=1, name='dis_conv2_1'))
conv2 = lrelu(conv2d(conv1, spectural_normed=self.sn, iter=self.iter_power,
output_dim=128, name='dis_conv2_2'))
# conv3 = lrelu(conv2d(conv2, spectural_normed=self.sn, iter=self.iter_power,
# output_dim=256, k_h=3, k_w=3, d_w=1, d_h=1, name='dis_conv3_1'))
conv3 = lrelu(conv2d(conv2, spectural_normed=self.sn, iter=self.iter_power,
output_dim=256, name='dis_conv3_2'))
conv4 = lrelu(conv2d(conv3, spectural_normed=self.sn, iter=self.iter_power,
output_dim=512, kernel=1, name='dis_conv4'))
conv4 = tf.reshape(conv4, [self.batch_size*self.num_rotation, -1])
#for D
gan_logits = fully_connect(conv4, spectural_normed=self.sn, iter=self.iter_power,
output_size=1, scope='dis_fully1')
if self.ssup:
rot_logits = fully_connect(conv4, spectural_normed=self.sn, iter=self.iter_power,
output_size=4, scope='dis_fully2')
rot_prob = tf.nn.softmax(rot_logits)
else:
re1 = Residual_D(x_var, spectural_normed=self.sn, output_dims=128, residual_name='re1', down_sampling=True, is_start=True)
re2 = Residual_D(re1, spectural_normed=self.sn, output_dims=128, residual_name='re2', down_sampling=True)
re3 = Residual_D(re2, spectural_normed=self.sn, output_dims=128, residual_name='re3')
re4 = Residual_D(re3, spectural_normed=self.sn, output_dims=128, residual_name='re4')
re4 = tf.nn.relu(re4)
#gsp
gsp = tf.reduce_sum(re4, axis=[1, 2])
gan_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=1, scope='dis_fully1')
if self.ssup:
rot_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=4, scope='dis_fully2')
rot_prob = tf.nn.softmax(rot_logits)
output_size=1, scope='dis_fully1')
if self.ssup:
rot_logits = fully_connect(conv4, spectural_normed=self.sn, iter=self.iter_power,
output_size=4, scope='dis_fully2')
rot_prob = tf.nn.softmax(rot_logits)
else:
re1 = Residual_D(x_var, spectural_normed=self.sn, output_dims=128, residual_name='re1', down_sampling=True, is_start=True)
re2 = Residual_D(re1, spectural_normed=self.sn, output_dims=128, residual_name='re2', down_sampling=True)
re3 = Residual_D(re2, spectural_normed=self.sn, output_dims=128, residual_name='re3')
re4 = Residual_D(re3, spectural_normed=self.sn, output_dims=128, residual_name='re4')
re4 = tf.nn.relu(re4)
#gsp
gsp = tf.reduce_sum(re4, axis=[1, 2])
gan_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=1, scope='dis_fully1')
if self.ssup:
rot_logits = fully_connect(gsp, spectural_normed=self.sn, iter=self.iter_power, output_size=4, scope='dis_fully2')
rot_prob = tf.nn.softmax(rot_logits)
#tf.summary.histogram("logits", gan_logits)
if self.ssup:
return tf.nn.sigmoid(gan_logits), gan_logits, rot_logits, rot_prob
else:
return tf.nn.sigmoid(gan_logits), gan_logits
def generate(self, z_var, y, weights, biases):
#add the first layer
z_var = tf.concat([z_var, y], 1)
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output_dim = np.minimum(16 * np.power(2, i+1), 256)
print output_dim
x = lrelu(conv2d(x, output_dim=output_dim, use_sp=self.use_sp, name='dis_conv_1_{}'.format(i)))
x = tf.reshape(x, shape=[self.batch_size, -1])
ful_global = fully_connect(x, output_size=output_dim, use_sp=self.use_sp, scope='dis_fu1')
x = tf.concat([local_x_left, local_x_right], axis=3)
for i in range(5):
output_dim = np.minimum(16 * np.power(2, i+1), 256)
x = lrelu(conv2d(x, output_dim=output_dim, use_sp=self.use_sp, name='dis_conv_2_{}'.format(i)))
x = tf.reshape(x, shape=[self.batch_size, -1])
ful_local = fully_connect(x, output_size=output_dim*2, use_sp=self.use_sp, scope='dis_fu2')
ful = tf.concat([ful_global, ful_local, guided_fp_left, guided_fp_right], axis=1)
ful = tf.nn.relu(fully_connect(ful, output_size=512, use_sp=self.use_sp, scope='dis_fu4'))
gan_logits = fully_connect(ful, output_size=1, use_sp=self.use_sp, scope='dis_fu5')
return gan_logits
def discriminate(self, x_var, y, weights, biases, reuse=False):
y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
x_var = conv_cond_concat(x_var, y1)
conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))
conv1 = conv_cond_concat(conv1, y1)
conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))
conv2 = tf.reshape(conv2, [self.batch_size, -1])
conv2 = tf.concat([conv2, y], 1)
fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))
fc1 = tf.concat([fc1, y], 1)
#for D
output= fully_connect(fc1, weights['wd'], biases['bd'])
return tf.nn.sigmoid(output)
with tf.variable_scope("encode") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = tf.nn.relu(
instance_norm(conv2d(x, output_dim=32, k_w=7, k_h=7, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
conv2 = tf.nn.relu(
instance_norm(conv2d(conv1, output_dim=64, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c2'), scope='e_in2'))
conv3 = tf.nn.relu(
instance_norm(conv2d(conv2, output_dim=128, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c3'), scope='e_in3'))
conv4 = tf.nn.relu(
instance_norm(conv2d(conv3, output_dim=128, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c4'), scope='e_in4'))
bottleneck = tf.reshape(conv4, [self.batch_size, -1])
content = fully_connect(bottleneck, output_size=128, scope='e_ful1')
#rotation = fully_connect(bottleneck, output_size=1, scope='e_ful2')
return content#, rotation
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
conv2= tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1', reuse=reuse))
conv3= tf.nn.relu(batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2', reuse=reuse))
conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
middle_conv = conv4
conv4= tf.nn.relu(batch_normal(conv4, scope='dis_bn3', reuse=reuse))
conv4= tf.reshape(conv4, [self.batch_size, -1])
fl = tf.nn.relu(batch_normal(fully_connect(conv4, output_size=256, scope='dis_fully1'), scope='dis_bn4', reuse=reuse))
output = fully_connect(fl , output_size=1, scope='dis_fully2')
return middle_conv, output