Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def discriminator(self, image, train, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
reshaped_img = tf.reshape(image, [self.batch_size, self.image_size[0], self.image_size[1], self.image_size[1], 1])
h0 = ops.conv3d(reshaped_img, self.d_size, name='d_h0_conv')
h0 = ops.lrelu(self.d_bn0(h0, train))
h1 = ops.conv3d(h0, self.d_size*2, name='d_h1_conv')
h1 = ops.lrelu(self.d_bn1(h1, train))
h2 = ops.conv3d(h1, self.d_size*4, name='d_h2_conv')
h2 = ops.lrelu(self.d_bn2(h2, train))
h3 = ops.conv3d(h2, self.d_size*8, name='d_h3_conv')
h3 = ops.lrelu(self.d_bn3(h3, train))
h3 = tf.reshape(h3, [self.batch_size, -1])
h4 = ops.linear(h3, h3.get_shape()[1], 1, scope='d_h5_lin')
return tf.nn.sigmoid(h4), h4, h2
def discriminator(self, image, train, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
reshaped_img = tf.reshape(image, [self.batch_size, self.image_size[0], self.image_size[1], 1])
h0 = ops.conv2d(reshaped_img, self.d_size, name='d_h0_conv')
h0 = ops.lrelu(self.d_bn0(h0, train))
h1 = ops.conv2d(h0, self.d_size*2, name='d_h1_conv')
h1 = ops.lrelu(self.d_bn1(h1, train))
h2 = ops.conv2d(h1, self.d_size*4, name='d_h2_conv')
h2_tensor = ops.lrelu(self.d_bn2(h2, train))
h2 = tf.reshape(h2_tensor, [self.batch_size, -1])
h3 = ops.linear(h2, h2.get_shape()[1], 1, scope='d_h5_lin')
return tf.nn.sigmoid(h3), h3, h2_tensor
def discriminate(self, x_var, y, weights, biases, reuse=False):
y1 = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
x_var = conv_cond_concat(x_var, y1)
conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))
conv1 = conv_cond_concat(conv1, y1)
conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))
conv2 = tf.reshape(conv2, [self.batch_size, -1])
conv2 = tf.concat([conv2, y], 1)
fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))
fc1 = tf.concat([fc1, y], 1)
#for D
output= fully_connect(fc1, weights['wd'], biases['bd'])
return tf.nn.sigmoid(output)
def discriminator(images, options, reuse=False, repeat_num=6 ,name='disc'): #original version
# In StarGAN Discriminator do not use instance normalization
# reuse or not
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
# input & hidden layer
h1 = lrelu(conv2d(images, options.nf, ks=4, s=2, name='disc_conv1'))
h2 = lrelu(conv2d(h1, 2*options.nf, ks=4, s=2, name='disc_conv2'))
h3 = lrelu(conv2d(h2, 4*options.nf, ks=4, s=2, name='disc_conv3'))
h4 = lrelu(conv2d(h3, 8*options.nf, ks=4, s=2, name='disc_conv4'))
h5 = lrelu(conv2d(h4, 16*options.nf, ks=4, s=2, name='disc_conv5'))
h6 = lrelu(conv2d(h5, 32*options.nf, ks=4, s=2, name='disc_conv6'))
# (batch, h/64, w/64, 2048)
# output layer
# (batch, h/64, w/64, 2048) ==> (batch, h/64, w/64, 1) #patch GAN
src = conv2d(h6, 1, ks=3, s=1, name='disc_conv7_patch') # (batch, h/64, w/64, 1)
# (batch, h/64, w/64, 2048) ==> (batch, 1, 1, num_cls) #big kernel size conv
k_size = int(options.image_size / np.power(2, repeat_num))
aux = conv2d(h6, options.n_label, ks=k_size, s=1, padding='VALID', name='disc_conv8_aux') # (batch, 1, 1, num_cls)
aux = tf.reshape(aux,[-1,options.n_label])
return src, aux
def encode_decode_2(self, x, reuse=False):
with tf.variable_scope("encode_decode_2") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1',
))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4',
))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5',
))
x_tilde = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde
def encode_decode_1(self, x, reuse=False):
with tf.variable_scope("encode_decode_1") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4'))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5'))
x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde1
def discriminator(self, image, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = ops.lrelu(ops.conv2d(image, self.d_size, name='d_h0_conv'))
h1 = ops.lrelu(ops.conv2d(h0, self.d_size*2, name='d_h1_conv'))
h2 = ops.lrelu(ops.conv2d(h1, self.d_size*4, name='d_h2_conv'))
h3 = ops.lrelu(ops.conv2d(h2, self.d_size*8, name='d_h3_conv'))
h4 = ops.linear(tf.reshape(h3, [self.batch_size, -1]), 4*4*self.d_size*8, 1, scope='d_h5_lin')
return tf.nn.sigmoid(h4), h4
def _dcgan_critic(self, X, reuse=False):
'''
K-Lipschitz function.
WGAN-GP does not use critic in batch norm.
'''
with tf.variable_scope('critic', reuse=reuse):
net = X
with slim.arg_scope([slim.conv2d], kernel_size=[5,5], stride=2, padding='SAME', activation_fn=ops.lrelu):
net = slim.conv2d(net, 64)
expected_shape(net, [32, 32, 64])
net = slim.conv2d(net, 128)
expected_shape(net, [16, 16, 128])
net = slim.conv2d(net, 256)
expected_shape(net, [8, 8, 256])
net = slim.conv2d(net, 512)
expected_shape(net, [4, 4, 512])
net = slim.flatten(net)
net = slim.fully_connected(net, 1, activation_fn=None)
return net
def discriminator(images, options, reuse=False, repeat_num=6 ,name='disc'): #original version
# In StarGAN Discriminator do not use instance normalization
# reuse or not
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
# input & hidden layer
h1 = lrelu(conv2d(images, options.nf, ks=4, s=2, name='disc_conv1'))
h2 = lrelu(conv2d(h1, 2*options.nf, ks=4, s=2, name='disc_conv2'))
h3 = lrelu(conv2d(h2, 4*options.nf, ks=4, s=2, name='disc_conv3'))
h4 = lrelu(conv2d(h3, 8*options.nf, ks=4, s=2, name='disc_conv4'))
h5 = lrelu(conv2d(h4, 16*options.nf, ks=4, s=2, name='disc_conv5'))
h6 = lrelu(conv2d(h5, 32*options.nf, ks=4, s=2, name='disc_conv6'))
# (batch, h/64, w/64, 2048)
# output layer
# (batch, h/64, w/64, 2048) ==> (batch, h/64, w/64, 1) #patch GAN
src = conv2d(h6, 1, ks=3, s=1, name='disc_conv7_patch') # (batch, h/64, w/64, 1)
# (batch, h/64, w/64, 2048) ==> (batch, 1, 1, num_cls) #big kernel size conv
k_size = int(options.image_size / np.power(2, repeat_num))
aux = conv2d(h6, options.n_label, ks=k_size, s=1, padding='VALID', name='disc_conv8_aux') # (batch, 1, 1, num_cls)
aux = tf.reshape(aux,[-1,options.n_label])
return src, aux