Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def generate(self, z_var, reuse=False):
with tf.variable_scope('generator') as scope:
if reuse == True:
scope.reuse_variables()
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=8*8*256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse))
d2 = tf.reshape(d1, [self.batch_size, 8, 8, 256])
d2 = tf.nn.relu(batch_normal(de_conv(d2 , output_shape=[self.batch_size, 16, 16, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse))
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, 32, 32, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse))
d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[self.batch_size, 64, 64, 32], name='gen_deconv4'), scope='gen_bn4', reuse=reuse))
d5 = de_conv(d4, output_shape=[self.batch_size, 64, 64, 3], name='gen_deconv5', d_h=1, d_w=1)
return tf.nn.tanh(d5)
def encode_decode_1(self, x, reuse=False):
with tf.variable_scope("encode_decode_1") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4'))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5'))
x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde1
def generate(self, z_var, reuse=False):
with tf.variable_scope('generator') as scope:
if reuse == True:
scope.reuse_variables()
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=8*8*256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse))
d2 = tf.reshape(d1, [self.batch_size, 8, 8, 256])
d2 = tf.nn.relu(batch_normal(de_conv(d2 , output_shape=[self.batch_size, 16, 16, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse))
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, 32, 32, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse))
d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[self.batch_size, 64, 64, 32], name='gen_deconv4'), scope='gen_bn4', reuse=reuse))
d5 = de_conv(d4, output_shape=[self.batch_size, 64, 64, 3], name='gen_deconv5', d_h=1, d_w=1)
return tf.nn.tanh(d5)
if reuse:
scope.reuse_variables()
if self.output_size == 32:
s = 4
elif self.output_size == 48:
s = 6
d1 = fully_connect(z_var, output_size=s*s*256, scope='gen_fully1')
d1 = tf.reshape(d1, [-1, s, s, 256])
if resnet == False:
d1 = tf.nn.relu(d1)
d2 = tf.nn.relu(batch_normal(de_conv(d1, output_shape=[batch_size, s*2, s*2, 256], name='gen_deconv2')
, scope='bn1', is_training=is_train))
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[batch_size, s*4, s*4, 128], name='gen_deconv3')
, scope='bn2', is_training=is_train))
d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[batch_size, s*8, s*8, 64], name='gen_deconv4')
, scope='bn3', is_training=is_train))
d5 = conv2d(d4, output_dim=self.channel, stride=1, kernel=3, name='gen_conv')
else:
d2 = Residual_G(d1, output_dims=256, up_sampling=True, residual_name='in1')
d3 = Residual_G(d2, output_dims=256, up_sampling=True, residual_name='in2')
d4 = Residual_G(d3, output_dims=256, up_sampling=True, residual_name='in3')
d4 = tf.nn.relu(batch_normal(d4, scope='in4'))
d5 = conv2d(d4, output_dim=self.channel, kernel=3, stride=1, name='gen_conv')
return tf.tanh(d5)
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])
return tf.nn.sigmoid(output)
yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
z = tf.concat([z, y], 1)
c1, c2 = self.output_size / 4, self.output_size / 2
# 10 stand for the num of labels
d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=7*7*2*64, scope='gen_fully2'), scope='gen_bn2'))
d2 = tf.reshape(d2, [self.batch_size, c1, c1, 64 * 2])
d2 = conv_cond_concat(d2, yb)
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))
d3 = conv_cond_concat(d3, yb)
d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], name='gen_deconv2')
return tf.nn.sigmoid(d4)
def encode_decode_2(self, x, reuse=False):
with tf.variable_scope("encode_decode_2") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1',
))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4',
))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5',
))
x_tilde = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde
, scope='e_in_{}'.format(i)))
bottleneck = tf.reshape(x, shape=[self.batch_size, -1])
bottleneck = fully_connect(bottleneck, output_size=256, use_sp=use_sp, scope='e_ful1')
bottleneck = tf.concat([bottleneck, guided_fp_left, guided_fp_right], axis=1)
de_x = tf.nn.relu(fully_connect(bottleneck, output_size=256*8*8, use_sp=use_sp, scope='d_ful1'))
de_x = tf.reshape(de_x, shape=[self.batch_size, 8, 8, 256])
#de_x = tf.tile(de_x, (1, 8, 8, 1), name='tile')
#decode
for i in range(5):
c_dim = np.maximum(256 / np.power(2, i), 16)
output_dim = 16 * np.power(2, i)
print de_x
de_x = tf.nn.relu(instance_norm(de_conv(de_x, output_shape=[self.batch_size, output_dim, output_dim, c_dim], use_sp=use_sp,
name='g_deconv_{}'.format(i)), scope='g_in_{}'.format(i)))
#de_x = tf.concat([de_x, input_x], axis=3)
x_tilde1 = conv2d(de_x, output_dim=3, k_w=7, k_h=7, d_h=1, d_w=1, use_sp=use_sp, name='g_conv1')
return tf.nn.tanh(x_tilde1)
conv1 = tf.nn.relu(
instance_norm(conv2d(x_var, output_dim=64, k_w=7, k_h=7, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
conv2 = tf.nn.relu(
instance_norm(conv2d(conv1, output_dim=128, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c2'), scope='e_in2'))
conv3 = tf.nn.relu(
instance_norm(conv2d(conv2, output_dim=256, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c3'), scope='e_in3'))
r1 = Residual(conv3, residual_name='re_1')
r2 = Residual(r1, residual_name='re_2')
r3 = Residual(r2, residual_name='re_3')
r4 = Residual(r3, residual_name='re_4')
r5 = Residual(r4, residual_name='re_5')
r6 = Residual(r5, residual_name='re_6')
g_deconv1 = tf.nn.relu(instance_norm(de_conv(r6, output_shape=[self.batch_size,
self.output_size/2, self.output_size/2, 128], name='gen_deconv1'), scope="gen_in"))
# for 1
g_deconv_1_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1,
output_shape=[self.batch_size, self.output_size, self.output_size, 32], name='g_deconv_1_1'), scope='gen_in_1_1'))
g_deconv_1_1_x = tf.concat([g_deconv_1_1, x_var], axis=3)
x_tilde1 = conv2d(g_deconv_1_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_1_2')
return tf.nn.tanh(x_tilde1)