Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
with tf.device(devices[get_dev_id(3)]) if devices else ExitStack() as gs:
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
with tf.device(devices[get_dev_id(0)]) if devices else ExitStack() as gs:
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
with tf.device(devices[get_dev_id(2)]) if devices else ExitStack() as gs:
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
with tf.device(devices[get_dev_id(0)]) if devices else ExitStack() as gs:
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
with tf.device(devices[get_dev_id(3)]) if devices else ExitStack() as gs:
branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
with tf.device(devices[get_dev_id(3)]) if devices else ExitStack() as gs:
branch_pool = ops.avg_pool(net, [3, 3])
with tf.device(devices[get_dev_id(0)]) if devices else ExitStack() as gs:
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
with tf.device(devices[get_dev_id(3)]) if devices else ExitStack() as gs:
net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.device(devices[get_dev_id(0)]) if devices else ExitStack() as gs:
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout')
def discriminator(self, image, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = ops.lrelu(ops.conv2d(image, self.d_size, name='d_h0_conv'))
h1 = ops.lrelu(ops.conv2d(h0, self.d_size*2, name='d_h1_conv'))
h2 = ops.lrelu(ops.conv2d(h1, self.d_size*4, name='d_h2_conv'))
h3 = ops.lrelu(ops.conv2d(h2, self.d_size*8, name='d_h3_conv'))
h4 = ops.linear(tf.reshape(h3, [self.batch_size, -1]), 4*4*self.d_size*8, 1, scope='d_h5_lin')
return tf.nn.sigmoid(h4), h4
def discriminate(self, x_var, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
conv2= tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1', reuse=reuse))
conv3= tf.nn.relu(batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2', reuse=reuse))
conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
middle_conv = conv4
conv4= tf.nn.relu(batch_normal(conv4, scope='dis_bn3', reuse=reuse))
conv4= tf.reshape(conv4, [self.batch_size, -1])
fl = tf.nn.relu(batch_normal(fully_connect(conv4, output_size=256, scope='dis_fully1'), scope='dis_bn4', reuse=reuse))
output = fully_connect(fl , output_size=1, scope='dis_fully2')
return middle_conv, output
def discriminator(x, label):
with tf.variable_scope('Discriminator'):
if layers > 1:
with tf.variable_scope('rgb_layer_{}'.format(layers - 2)):
d0 = pool(x)
d0 = leaky_relu(conv2d(d0, self.channels[layers - 1], 1))
with tf.variable_scope('rgb_layer_{}'.format(layers - 1)):
d1 = leaky_relu(conv2d(x, self.channels[layers], 1))
for i in reversed(range(layers)):
with tf.variable_scope('layer_{}'.format(i)):
if i == 0:
d1 = minibatch_stddev(d1)
with tf.variable_scope('1'):
d1 = leaky_relu(conv2d(d1, self.channels[i]))
with tf.variable_scope('2'):
if i == 0:
d1 = leaky_relu(conv2d(d1, self.channels[0], 2, 2))
else:
d1 = leaky_relu(conv2d(d1, self.channels[i]))
if i > 0:
d1 = pool(d1)
if i == layers - 1 and layers > 1:
d1 = self._reparameterize(d0, d1)
with tf.variable_scope('dense'):
for i, (kernel, stride, channels) in enumerate(layer_params):
height = (height - 1) * stride + kernel
width = height
layer_x = ops.deconv2d(
opts, layer_x, [batch_size, height, width, channels],
d_h=stride, d_w=stride, scope='h%d_deconv' % i,
conv_filters_dim=kernel, padding='VALID')
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training,
reuse, scope='h%d_bn' % i)
layer_x = ops.lrelu(layer_x, 0.1)
assert height == data_height
assert width == data_width
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units / 8, d_h=1, d_w=1,
scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x,
is_training, reuse, scope='hfinal_bn')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1,
scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['input_normalize_sym']:
return tf.nn.tanh(layer_x), layer_x
else:
return tf.nn.sigmoid(layer_x), layer_x
def discriminate(self, x_var, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(conv2d(x_var, output_dim=64, name='dis_conv1'))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2'))
conv4 = conv2d(conv3, output_dim=512, name='dis_conv4')
middle_conv = conv4
conv4 = lrelu(instance_norm(conv4, scope='dis_bn3'))
conv5 = lrelu(instance_norm(conv2d(conv4, output_dim=1024, name='dis_conv5'), scope='dis_bn4'))
conv6 = conv2d(conv5, output_dim=2, k_w=4, k_h=4, d_h=1, d_w=1, padding='VALID', name='dis_conv6')
return conv6, middle_conv
def D(img, scope='Discriminator', reuse=True):
with tf.variable_scope(scope, reuse=reuse) as scope:
if not reuse: log.warn(scope.name)
d_1 = conv2d(img, conv_info[0], is_train, name='d_1_conv')
d_1 = slim.dropout(d_1, keep_prob=0.5, is_training=is_train, scope='d_1_conv/')
if not reuse: log.info('{} {}'.format(scope.name, d_1))
d_2 = conv2d(d_1, conv_info[1], is_train, name='d_2_conv')
d_2 = slim.dropout(d_2, keep_prob=0.5, is_training=is_train, scope='d_2_conv/')
if not reuse: log.info('{} {}'.format(scope.name, d_2))
d_3 = conv2d(d_2, conv_info[2], is_train, name='d_3_conv')
d_3 = slim.dropout(d_3, keep_prob=0.5, is_training=is_train, scope='d_3_conv/')
if not reuse: log.info('{} {}'.format(scope.name, d_3))
d_4 = slim.fully_connected(
tf.reshape(d_3, [self.batch_size, -1]), n+1, scope='d_4_fc', activation_fn=None)
if not reuse: log.info('{} {}'.format(scope.name, d_4))
output = d_4
assert output.get_shape().as_list() == [self.batch_size, n+1]
pred_rf = tf.reshape(tf.sigmoid(output[:,-1]), [self.batch_size,1])
if self.config.model in ('VA', 'BOTH'):
def discriminator(self, image, y=None, reuse=False):
"""Defines the D network structure.
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
if not self.y_dim:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4
else:
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')))
h1 = tf.reshape(h1, [self.batch_size, -1])
h1 = concat([h1, y], 1)
def create(self, inputs, kernel_size=None, seed=None, reuse_variables=None):
output = inputs
with tf.variable_scope(self.name, reuse=reuse_variables):
for index, kernel in enumerate(self.kernels):
# not use batch-norm in the first layer
bnorm = False if index == 0 else True
name = 'conv' + str(index)
output = conv2d(
inputs=output,
name=name,
kernel_size=kernel_size,
filters=kernel[0],
strides=kernel[1],
bnorm=bnorm,
activation=tf.nn.leaky_relu,
seed=seed
)
if kernel[2] > 0:
output = tf.nn.dropout(output, keep_prob=1 - kernel[2], name='dropout_' + name, seed=seed)
output = conv2d(
inputs=output,
name='conv_last',
# for 4
g_deconv_4_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1, output_shape=[self.batch_size,
self.output_size, self.output_size, sn], name='gen_deconv4_1'), scope='gen_in_4_1'))
g_deconv_4_1_x = tf.concat([g_deconv_4_1, x], axis=3)
g_deconv_4_2 = conv2d(g_deconv_4_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1,
name='gen_conv_4_2')
x_tilde4 = conv2d(g_deconv_4_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_4_3')
# for 5
g_deconv_5_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1, output_shape=[self.batch_size,
self.output_size, self.output_size, sn], name='gen_deconv5_1'), scope='gen_in_5_1'))
g_deconv_5_1_x = tf.concat([g_deconv_5_1, x], axis=3)
g_deconv_5_2 = conv2d(g_deconv_5_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1,
name='gen_conv_5_2')
x_tilde5 = conv2d(g_deconv_5_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_5_3')
# for 6
g_deconv_6_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1, output_shape=[self.batch_size,
self.output_size, self.output_size, sn], name='gen_deconv6_1'), scope='gen_in_6_1'))
g_deconv_6_1_x = tf.concat([g_deconv_6_1, x], axis=3)
g_deconv_6_2 = conv2d(g_deconv_6_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1,
name='gen_conv_6_2')
x_tilde6 = conv2d(g_deconv_6_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_6_3')
# for 7
g_deconv_7_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1,
output_shape=[self.batch_size, self.output_size, self.output_size, sn], name='g_deconv_7_1'), scope='gen_in_7_1'))
g_deconv_7_1_x = tf.concat([g_deconv_7_1, x], axis=3)
x_tilde7 = conv2d(g_deconv_7_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_7_2')