Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def discriminate(self, x_var, y, weights, biases, reuse=False):
y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
x_var = conv_cond_concat(x_var, y1)
conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))
conv1 = conv_cond_concat(conv1, y1)
conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))
conv2 = tf.reshape(conv2, [self.batch_size, -1])
conv2 = tf.concat([conv2, y], 1)
fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))
fc1 = tf.concat([fc1, y], 1)
#for D
output= fully_connect(fc1, weights['wd'], biases['bd'])
return tf.nn.sigmoid(output)
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
if not self.y_dim:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4
else:
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')))
h1 = tf.reshape(h1, [self.batch_size, -1])
h1 = concat([h1, y], 1)
h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin')))
h2 = concat([h2, y], 1)
h3 = linear(h2, 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3
#add the first layer
z_var = tf.concat([z_var, y], 1)
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])
return tf.nn.sigmoid(output)
def wgan_cond(model, image, y, reuse=False):
#no batchnorm for WGAN GP
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
image_ = conv_cond_concat(image, yb)
h0 = lrelu(layer_norm(conv2d(image_, model.df_dim, k_h=4, k_w=4, name='d_h0_conv',padding='VALID')))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(layer_norm(conv2d(h0, model.df_dim*4, k_h=4, k_w=4, name='d_h1_conv', padding='VALID')))
h1 = conv_cond_concat(h1, yb)
h2 = lrelu(layer_norm(conv2d(h1, model.df_dim*8, k_h=4, k_w=4, name='d_h2_conv', padding='VALID')))
h2 = conv_cond_concat(h2, yb)
h3 = lrelu(layer_norm(conv2d(h2, model.df_dim*16, k_h=4, k_w=4, name='d_h3_conv', padding='VALID')))
h3 = conv_cond_concat(h3, yb)
h4 = lrelu(layer_norm(conv2d(h3, model.df_dim*32, k_h=4, k_w=4, name='d_h4_conv', padding='VALID')))
h4 = conv_cond_concat(h4, yb)
h5 = lrelu(layer_norm(conv2d(h4, model.df_dim*32, k_h=4, k_w=4, name='d_h5_conv', padding='VALID')))
shape = np.product(h5.get_shape()[1:].as_list())
h5 = tf.reshape(h5, [-1, shape])
h5 = concat([h5,y],1)
def dcwgan_cond(model, image, y, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, model.df_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(layer_norm(conv2d(h0, model.df_dim*2, name='d_h1_conv'), name='d_ln1'))
h1 = conv_cond_concat(h1, yb)
h2 = lrelu(layer_norm(conv2d(h1, model.df_dim*4, name='d_h2_conv'), name='d_ln2'))
h2 = conv_cond_concat(h2, yb)
h3 = lrelu(layer_norm(conv2d(h2, model.df_dim*8, name='d_h3_conv'), name='d_ln3'))
shape = np.product(h3.get_shape()[1:].as_list())
reshaped = tf.reshape(h3, [-1, shape])
cond = concat([reshaped,y],1)
h4 = linear(cond, 1, 'd_h4_lin')
return h4
z = tf.concat([z, y], 1)
c1, c2 = self.output_size / 4, self.output_size / 2
# 10 stand for the num of labels
d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1'))
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=7*7*2*64, scope='gen_fully2'), scope='gen_bn2'))
d2 = tf.reshape(d2, [self.batch_size, c1, c1, 64 * 2])
d2 = conv_cond_concat(d2, yb)
d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))
d3 = conv_cond_concat(d3, yb)
d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], name='gen_deconv2')
return tf.nn.sigmoid(d4)
def wgan_slim_cond(model, image, y, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
yb = tf.reshape(y, [-1, 1, 1, model.y_dim])
image_ = conv_cond_concat(image, yb)
h0 = lrelu(layer_norm(conv2d(image_, model.df_dim, k_h=4, k_w=4, name='d_h0_conv',padding='VALID')))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(layer_norm(conv2d(h0, model.df_dim*4, k_h=4, k_w=4, name='d_h1_conv', padding='VALID')))
h1 = conv_cond_concat(h1, yb)
h2 = lrelu(layer_norm(conv2d(h1, model.df_dim*8, k_h=4, k_w=4, name='d_h2_conv', padding='VALID')))
h2 = conv_cond_concat(h2, yb)
shape = np.product(h2.get_shape()[1:].as_list())
h3 = tf.reshape(h2, [-1, shape])
h3 = concat([h3,y],1)
r_out = linear(h3, 1, 'd_ro_lin')
return r_out
d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1'))
#add the second layer
d1 = tf.concat([d1, y], 1)
d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2'))
d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128])
y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
d2 = conv_cond_concat(d2, y)
d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3'))
d3 = conv_cond_concat(d3, y)
output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1])
return tf.nn.sigmoid(output)