Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def generator_mnist(self, z, is_train=True, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = linear(z, 64, 'g_h0_lin', stddev=self.config.init)
h1 = linear(tf.nn.relu(h0), 256, 'g_h1_lin', stddev=self.config.init)
h2 = linear(tf.nn.relu(h1), 256, 'g_h2_lin', stddev=self.config.init)
h3 = linear(tf.nn.relu(h2), 1024, 'g_h3_lin', stddev=self.config.init)
h4 = linear(tf.nn.relu(h3), 28 * 28 * 1, 'g_h4_lin', stddev=self.config.init)
return tf.reshape(tf.nn.sigmoid(h4), [self.batch_size, 28, 28, 1])
with tf.variable_scope(prefix, reuse=reuse):
h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
h0 = tf.nn.relu(h0)
h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
h1 = tf.nn.relu(h1)
h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
h2 = tf.nn.relu(h2)
h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
h3 = tf.nn.relu(h3)
# Already has NaNs!!
latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')
return latent_mean, log_latent_sigmas
reconstructed_reencoded = self.encoder(
opts, reconstructed_training, is_training=is_training_ph,
keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
reconstructed_reencoded = reconstructed_reencoded[0]
# Below line enforces the forward to be reconstructed_reencoded and backwards to NOT change the encoder....
crazy_hack = reconstructed_reencoded - reconstructed_reencoded_sg +\
tf.stop_gradient(reconstructed_reencoded_sg)
encoded_training_sg = self.encoder(
opts, tf.stop_gradient(real_points),
is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
encoded_training_sg = encoded_training_sg[0]
adv_fake_layer = ops.linear(opts, reconstructed_reencoded_sg, 1, scope='adv_layer')
adv_true_layer = ops.linear(opts, encoded_training_sg, 1, scope='adv_layer', reuse=True)
adv_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_fake_layer, labels=tf.zeros_like(adv_fake_layer))
adv_true = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_true_layer, labels=tf.ones_like(adv_true_layer))
adv_fake = tf.reduce_mean(adv_fake)
adv_true = tf.reduce_mean(adv_true)
adv_c_loss = adv_fake + adv_true
emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(encoded_training)), 1)
emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
# Normalize the loss, so that it does not depend on how good the
# discriminator is.
emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return adv_c_loss, emb_c_loss
d_o = [128, 128]
# Build graph:
initial_elems = state
# Embedding Part
for i, layer in enumerate(d_e):
el = initial_elems
el, _ = relation_layer(layer, el, mask, name='l' + str(i))
c = mask_and_pool(el, mask) # pool to get context for next block
# Fully connected part
fc = c
for i, layer in enumerate(d_o):
fc, _, _ = linear(fc, layer, name='lO_' + str(i))
# Output
embedding = fc
# Returns the network output and parameters
return embedding, []
def discriminator(self, opts, input_,
prefix='DISCRIMINATOR', reuse=False):
"""Discriminator function, suitable for simple toy experiments.
"""
shape = input_.get_shape().as_list()
num_filters = opts['d_num_filters']
assert len(shape) > 0, 'No inputs to discriminate.'
with tf.variable_scope(prefix, reuse=reuse):
h0 = ops.linear(opts, input_, num_filters, 'h0_lin')
h0 = tf.nn.relu(h0)
h1 = ops.linear(opts, h0, num_filters, 'h1_lin')
h1 = tf.nn.relu(h1)
h2 = ops.linear(opts, h1, 1, 'h2_lin')
return h2
with tf.variable_scope(prefix, reuse=reuse):
h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
h0 = tf.nn.relu(h0)
h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
h1 = tf.nn.relu(h1)
h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
h2 = tf.nn.relu(h2)
h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
h3 = tf.nn.relu(h3)
# Already has NaNs!!
latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')
return latent_mean, log_latent_sigmas
return x
input_ = tf.cond(is_training, lambda: add_noise(input_), lambda: do_nothing(input_))
num_units = opts['e_num_filters']
num_layers = opts['e_num_layers']
with tf.variable_scope("ENCODER", reuse=reuse):
if not opts['convolutions']:
hi = input_
for i in range(num_layers):
hi = ops.linear(opts, hi, num_units, scope='h%d_lin' % i)
if opts['batch_norm']:
hi = ops.batch_norm(opts, hi, is_training, reuse, scope='bn%d' % i)
hi = tf.nn.relu(hi)
if opts['e_is_random']:
latent_mean = ops.linear(
opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1))
log_latent_sigmas = ops.linear(
opts, hi, opts['latent_space_dim'], 'h%d_lin_sigma' % (i + 1))
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1))
elif opts['e_arch'] == 'dcgan':
return self.dcgan_encoder(opts, input_, is_training, reuse, keep_prob)
elif opts['e_arch'] == 'ali':
return self.ali_encoder(opts, input_, is_training, reuse, keep_prob)
elif opts['e_arch'] == 'began':
return self.began_encoder(opts, input_, is_training, reuse, keep_prob)
else:
raise ValueError('%s Unknown' % opts['e_arch'])
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = ops.lrelu(layer_x, 0.1)
assert height == 1
assert width == 1
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units * 2, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units * 2, d_h=1, d_w=1,
scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training,
reuse, scope='hfinal_bn')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1,
scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['e_noise'] != 'gaussian':
res = ops.linear(opts, layer_x, opts['zdim'], scope='hlast_lin')
return res
else:
mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
log_sigmas = ops.linear(opts, layer_x,
opts['zdim'], scope='log_sigmas_lin')
return mean, log_sigmas
def generator(self, z, y=None):
"""Defines the G network structure.
"""
with tf.variable_scope("generator") as scope:
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
self.h1, self.h1_w, self.h1_b = deconv2d(
h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3', with_w=True)