Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_normal_GAN():
cortex.add_step('discriminator._cost', P='fake.P', X=0.,
name='fake_cost')
cortex.add_step('discriminator._cost', P='real.P', X=1.,
name='real_cost')
cortex.build()
cortex.add_cost(
lambda x, y: x + y, 'fake_cost.output', 'real_cost.output',
name='discriminator_cost')
m = 2
if m == 0:
cortex.add_cost(gen_cost, F='fake.P',
cells=['discriminator'],
name='generator_cost')
elif m == 1:
# Generative model
cortex.prepare_cell('gaussian', name='noise', dim=dim_in)
cortex.prepare_cell('DistributionMLP', name='generator', dim_hs=[500, 500, 500],
h_act='softplus', batch_normalization=True, dim=dim,
weight_normalization=False, bn_mean_only=False,
distribution_type=distribution_type)
# Discriminator
cortex.prepare_cell('DistributionMLP', name='discriminator',
distribution_type='binomial',
dim=1, dropout=d_dropout,
dim_in=dim, dim_hs=[500, 200], h_act='softplus')
# GRAPH --------------------------------------------------------------------
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples', constants=['noise.samples'])
cortex.prepare_samples('generator.P', n_posterior_samples)
cortex.add_step('discriminator', 'generator.samples', name='fake',
constants=['generator.samples'])
cortex.add_step('discriminator._cost', P='fake.P', X=0., name='fake_cost')
cortex.add_step('discriminator._cost', P='real.P', X=1., name='real_cost')
cortex.add_step('noise.grid2d', random_idx=True, name='noise_grid')
cortex.add_step('generator', 'noise_grid.output', name='gen_grid')
cortex.build()
#cortex.add_cost('l2_decay', 0.002, 'discriminator.mlp.weights')
cortex.add_cost('l2_decay', 0.002, 'generator.mlp.weights')
distribution_type=distribution_type)
# Discriminator
cortex.prepare_cell('DistributionMLP', name='discriminator',
distribution_type='binomial',
dim=1, dropout=d_dropout,
dim_in=dim, dim_hs=[500, 200], h_act='softplus')
# GRAPH --------------------------------------------------------------------
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples', constants=['noise.samples'])
cortex.prepare_samples('generator.P', n_posterior_samples)
cortex.add_step('discriminator', 'generator.samples', name='fake',
constants=['generator.samples'])
cortex.add_step('discriminator._cost', P='fake.P', X=0., name='fake_cost')
cortex.add_step('discriminator._cost', P='real.P', X=1., name='real_cost')
cortex.add_step('noise.grid2d', random_idx=True, name='noise_grid')
cortex.add_step('generator', 'noise_grid.output', name='gen_grid')
cortex.build()
#cortex.add_cost('l2_decay', 0.002, 'discriminator.mlp.weights')
cortex.add_cost('l2_decay', 0.002, 'generator.mlp.weights')
cortex.add_cost(lambda x, y: x + y, 'fake_cost.output', 'real_cost.output',
name='discriminator_cost')
cortex.add_cost(reweighted_MLE, G='generator.P',
G_samples='generator.samples',
cell_type='CNN2D',
input_shape=cortex._manager.datasets['data']['image_shape'],
filter_shapes=((4, 4), (4, 4), (4, 4)),
strides=((2, 2), (2, 2), (1, 1)),
pads=((1, 1), (1, 1), (1, 1)),
n_filters=[128, 256, 1028], dim_out=1, h_act='softplus',
batch_normalization=True)
cortex.prepare_cell('DistributionMLP', name='discriminator',
mlp=discriminator,
dim=1,
distribution_type='binomial')
cortex.prepare_cell('Baseline', name='baseline')
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples')
cortex.add_step('discriminator', 'generator.output', name='fake')
build_normal_GAN()
cortex.profile()
optimizer_args = {}
train_session = cortex.create_session(batch_size=batch_size)
cortex.build_session(test=test)
trainer = cortex.setup_trainer(
train_session,
optimizer=optimizer,
epochs=10000,
pads=((1, 1), (1, 1), (1, 1)),
n_filters=[128, 256, 1028], dim_out=1, h_act='softplus',
batch_normalization=True)
cortex.prepare_cell('DistributionMLP', name='discriminator',
mlp=discriminator,
dim=1,
distribution_type='binomial')
cortex.prepare_cell('Baseline', name='baseline')
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples')
cortex.add_step('discriminator', 'generator.output', name='fake')
build_normal_GAN()
cortex.profile()
optimizer_args = {}
train_session = cortex.create_session(batch_size=batch_size)
cortex.build_session(test=test)
trainer = cortex.setup_trainer(
train_session,
optimizer=optimizer,
epochs=10000,
learning_rate=learning_rate,
learning_rate_decay=.99,
batch_size=batch_size,
)
strides=((2, 2), (2, 2), (1, 1)),
pads=((1, 1), (1, 1), (1, 1)),
n_filters=[128, 256, 1028], dim_out=1, h_act='softplus',
batch_normalization=True)
cortex.prepare_cell('DistributionMLP', name='discriminator',
mlp=discriminator,
dim=1,
distribution_type='binomial')
cortex.prepare_cell('Baseline', name='baseline')
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples')
cortex.add_step('discriminator', 'generator.output', name='fake')
build_normal_GAN()
cortex.profile()
optimizer_args = {}
train_session = cortex.create_session(batch_size=batch_size)
cortex.build_session(test=test)
trainer = cortex.setup_trainer(
train_session,
optimizer=optimizer,
epochs=10000,
learning_rate=learning_rate,
learning_rate_decay=.99,
batch_size=batch_size,
h_act='softplus', batch_normalization=True, dim=dim,
weight_normalization=False, bn_mean_only=False,
distribution_type=distribution_type)
# Discriminator
cortex.prepare_cell('DistributionMLP', name='discriminator',
distribution_type='binomial',
dim=1, dropout=d_dropout,
dim_in=dim, dim_hs=[500, 200], h_act='softplus')
# GRAPH --------------------------------------------------------------------
cortex.add_step('discriminator', 'data.input', name='real')
cortex.prepare_samples('noise', batch_size)
cortex.add_step('generator', 'noise.samples', constants=['noise.samples'])
cortex.prepare_samples('generator.P', n_posterior_samples)
cortex.add_step('discriminator', 'generator.samples', name='fake',
constants=['generator.samples'])
cortex.add_step('discriminator._cost', P='fake.P', X=0., name='fake_cost')
cortex.add_step('discriminator._cost', P='real.P', X=1., name='real_cost')
cortex.add_step('noise.grid2d', random_idx=True, name='noise_grid')
cortex.add_step('generator', 'noise_grid.output', name='gen_grid')
cortex.build()
#cortex.add_cost('l2_decay', 0.002, 'discriminator.mlp.weights')
cortex.add_cost('l2_decay', 0.002, 'generator.mlp.weights')
cortex.add_cost(lambda x, y: x + y, 'fake_cost.output', 'real_cost.output',
name='discriminator_cost')