Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def discriminate(self, x_var, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
conv2= tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1', reuse=reuse))
conv3= tf.nn.relu(batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2', reuse=reuse))
conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
middle_conv = conv4
conv4= tf.nn.relu(batch_normal(conv4, scope='dis_bn3', reuse=reuse))
conv4= tf.reshape(conv4, [self.batch_size, -1])
fl = tf.nn.relu(batch_normal(fully_connect(conv4, output_size=256, scope='dis_fully1'), scope='dis_bn4', reuse=reuse))
output = fully_connect(fl , output_size=1, scope='dis_fully2')
return middle_conv, output
d1 = tf.nn.relu(d1)
d2 = tf.nn.relu(batch_normal(de_conv(d1, output_shape=[batch_size, s*2, s*2, 256], name='gen_deconv2')
, scope='bn1', is_training=is_train))
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[batch_size, s*4, s*4, 128], name='gen_deconv3')
, scope='bn2', is_training=is_train))
d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[batch_size, s*8, s*8, 64], name='gen_deconv4')
, scope='bn3', is_training=is_train))
d5 = conv2d(d4, output_dim=self.channel, stride=1, kernel=3, name='gen_conv')
else:
d2 = Residual_G(d1, output_dims=256, up_sampling=True, residual_name='in1')
d3 = Residual_G(d2, output_dims=256, up_sampling=True, residual_name='in2')
d4 = Residual_G(d3, output_dims=256, up_sampling=True, residual_name='in3')
d4 = tf.nn.relu(batch_normal(d4, scope='in4'))
d5 = conv2d(d4, output_dim=self.channel, kernel=3, stride=1, name='gen_conv')
return tf.tanh(d5)
def encode_z(self, x, weights, biases):
c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='enz_bn1'))
c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='enz_bn2'))
c2 = tf.reshape(c2, [self.batch_size, 128*7*7])
#using tanh instead of tf.nn.relu.
result_z = batch_normal(fully_connect(c2, weights['e3'], biases['eb3']), scope='enz_bn3')
#result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))
#Transforming one-hot form
#sparse_label = tf.arg_max(result_c, 1)
#y_vec = tf.one_hot(sparse_label, 10)
return result_z
def Encode(self, x):
with tf.variable_scope('encode') as scope:
conv1 = tf.nn.relu(batch_normal(conv2d(x, output_dim=64, name='e_c1'), scope='e_bn1'))
conv2 = tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_bn2'))
conv3 = tf.nn.relu(batch_normal(conv2d(conv2 , output_dim=256, name='e_c3'), scope='e_bn3'))
conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
fc1 = tf.nn.relu(batch_normal(fully_connect(conv3, output_size=1024, scope='e_f1'), scope='e_bn4'))
z_mean = fully_connect(fc1 , output_size=128, scope='e_f2')
z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')
return z_mean, z_sigma
z_var = tf.concat([z_var, y], 1)
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])
return tf.nn.sigmoid(output)
def discriminate(self, x_var, y, weights, biases, reuse=False):
y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
x_var = conv_cond_concat(x_var, y1)
conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))
conv1 = conv_cond_concat(conv1, y1)
conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))
conv2 = tf.reshape(conv2, [self.batch_size, -1])
conv2 = tf.concat([conv2, y], 1)
fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))
fc1 = tf.concat([fc1, y], 1)
#for D
output= fully_connect(fc1, weights['wd'], biases['bd'])
return tf.nn.sigmoid(output)
def encode_y(self, x, weights, biases):
c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1'))
c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2'))
c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])
result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3']))
#y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10)
return result_y
def generate(self, z_var, y, weights, biases):
#add the first layer
z_var = tf.concat([z_var, y], 1)
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])
return tf.nn.sigmoid(output)
conv1 = lrelu(conv1)
conv1 = conv_cond_concat(conv1, yb)
tf.add_to_collection('ac_1', conv1)
conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
tf.add_to_collection('weight_2', w2)
conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
tf.add_to_collection('ac_2', conv2)
conv2 = tf.reshape(conv2, [self.batch_size, -1])
conv2 = tf.concat([conv2, y], 1)
f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
f1 = tf.concat([f1, y], 1)
out = fully_connect(f1, output_size=1, scope='dis_fully2')
return tf.nn.sigmoid(out), out