Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
conv1 = tf.layers.conv2d(x, 256, [5, 5], kernel_initializer=k_in(5e-2))
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.batch_normalization(conv1)
conv1 = tf.layers.max_pooling2d(conv1, pool_size=[3, 3], strides=[2, 2], padding='valid')
conv2 = tf.layers.conv2d(conv1, 256, [4, 4], padding='valid', kernel_initializer=k_in(5e-2))
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.batch_normalization(conv2)
conv3 = tf.layers.conv2d(conv2, 512, [3, 3], padding='valid', kernel_initializer=k_in(5e-2))
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.batch_normalization(conv3)
conv4 = tf.layers.conv2d(conv3, 1024, [2, 2], padding='valid', kernel_initializer=k_in(5e-2))
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.batch_normalization(conv4)
conv4 = tf.layers.max_pooling2d(conv4, 3, strides=[2, 2], padding='valid')
conv_flat = tf.reshape(conv4, [-1, np.product(conv4.shape[1:])])
dense1 = tf.layers.dense(conv_flat, 1024, activation=tf.nn.relu, kernel_initializer=k_in(.004))
dense1 = tf.layers.batch_normalization(dense1)
dense2 = tf.layers.dense(dense1, 512, activation=tf.nn.relu, kernel_initializer=k_in(.004))
dense2 = tf.layers.batch_normalization(dense2)
logits = tf.layers.dense(dense2, data.NUM_CLASSES, kernel_initializer=k_in(1./200))
softmax = tf.nn.softmax(logits, axis=1, name='softmax')
return logits, softmax
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
print_activations(pool2)
# conv3
with tf.name_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv3)
# conv4
with tf.name_scope('conv4') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv4)
def focusFnn(x): # todo: only update the last layer, init with pre-trained values
with tf.name_scope("focusFnn"):
# same weights as decoder
x_drop = tf.nn.dropout(x, keep_prob_hidden)
layer_1 = tf.nn.relu(tf.add(tf.matmul(x_drop, decoder_params['w1']),
decoder_params['b1']))
layer_1_drop = tf.nn.dropout(layer_1, keep_prob_hidden)
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1_drop, decoder_params['w2']),
decoder_params['b2']))
# focusFnn weights on last layer
layer_2_drop = tf.nn.dropout(layer_2, keep_prob_hidden)
layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2_drop, focusFnn_params['w1']),
focusFnn_params['b1']))
variable_summaries('decoder_w1', decoder_params['w1'])
variable_summaries('decoder_w2', decoder_params['w2'])
variable_summaries('decoder_b1', decoder_params['b1'])
variable_summaries('decoder_b2', decoder_params['b2'])
variable_summaries('decoder_a1', layer_1)
variable_summaries('decoder_a2', layer_2)
variable_summaries('fnn_w1', focusFnn_params['w1'])
variable_summaries('fnn_b1', focusFnn_params['b1'])
variable_summaries('fnn_a1', layer_3)
return layer_3
# Convolve
conv1 = slim.conv2d(
inputs=scaledImage, num_outputs=32, kernel_size=[8, 8], stride=[4, 4],
padding='VALID', biases_initializer=None)
conv2 = slim.conv2d(
inputs=conv1, num_outputs=64, kernel_size=[4, 4], stride=[2, 2],
padding='VALID', biases_initializer=None)
conv3 = slim.conv2d(
inputs=conv2, num_outputs=64, kernel_size=[3, 3], stride=[1, 1],
padding='VALID', biases_initializer=None)
# Flatten and Feedforward
flattened = tf.contrib.layers.flatten(conv3)
net = tf.contrib.layers.fully_connected(
inputs=flattened,
num_outputs=self.h_size,
activation_fn=tf.nn.relu)
return net
from analysis import rocstories as rocstories_analysis
from text_utils import TextEncoder
from utils import encode_dataset, flatten, iter_data, find_trainable_variables, get_ema_vars, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path
def gelu(x):
return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3))))
def swish(x):
return x*tf.nn.sigmoid(x)
opt_fns = {
'adam':adam,
}
act_fns = {
'relu':tf.nn.relu,
'swish':swish,
'gelu':gelu
}
lr_schedules = {
'warmup_cosine':warmup_cosine,
'warmup_linear':warmup_linear,
'warmup_constant':warmup_constant,
}
def _norm(x, g=None, b=None, e=1e-5, axis=[1]):
u = tf.reduce_mean(x, axis=axis, keep_dims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True)
x = (x - u) * tf.rsqrt(s + e)
if g is not None and b is not None:
x = x*g + b
padding='VALID') + conv1_b
conv1 = tf.nn.relu(conv1)
pool_1 = tf.nn.max_pool(conv1,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
conv2_w = tf.Variable(
tf.truncated_normal(shape=[5, 5, 6, 16], mean=mu, stddev=sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(
pool_1, conv2_w, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
conv2 = tf.nn.relu(conv2)
pool_2 = tf.nn.max_pool(conv2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
fc1 = flatten(pool_2)
fc1_w = tf.Variable(
tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc1, fc1_w) + fc1_b
fc1 = tf.nn.relu(fc1)
fc2_w = tf.Variable(
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_reshape = tf.reshape(h1_relu, shape=[self.batch_size, self.dim_16[0], self.dim_16[1], 64])
h1_concat = tf.concat(axis=3, values=[h1_reshape,yneed_4])
h2 = tf.layers.conv2d_transpose(inputs=h1_concat, filters = 64,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3, values=[h2_relu, yneed_3])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = 32,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_2')
h3_relu = tf.nn.relu(self.normalize(h3))
h3_concat = tf.concat(axis=3, values=[h3_relu, yneed_1])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = 32,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_3')
h4_relu = tf.nn.relu(self.normalize(h4))
h4_concat = tf.concat(axis=3, values=[h4_relu, yneed_2])
h5 = tf.layers.conv2d_transpose(inputs=h4_concat, filters=self.dim_channel,
kernel_size=[4,4],strides=[2,2], padding='SAME', activation=None,
reuse=scope.reuse, name="conv_4")
return tf.nn.sigmoid(h5)
def encoder_image(self, image, scope):
res2 = res_block(dconv2, 'res2',self.is_train, cfg.norm)
#ouput shape: [14, 14, 256]
with tf.variable_scope('dconv3'):
#feat14 = tf.nn.relu(norm(conv2d(feat14, 128, 'feat14', kernel_size=1),self.is_train,'norm3_1'))
dconv3 = tf.nn.relu(norm(deconv2d(res2, 128, 'dconv2',
kernel_size=4, strides = 2),self.is_train,'norm3_2'))
res3 = res_block(dconv3, 'res3',self.is_train, cfg.norm)
#output shape: [28, 28, 128]
with tf.variable_scope('dconv4'):
#feat28 = tf.nn.relu(norm(conv2d(feat28, 64, 'feat28', kernel_size=1),self.is_train,'norm4_1'))
dconv4 = tf.nn.relu(norm(deconv2d(res3, 64, 'dconv4',
kernel_size=4, strides = 2),self.is_train,'norm4_2'))
res4 = res_block(dconv4, 'res4',self.is_train, cfg.norm)
#output shape: [56, 56, 64]
with tf.variable_scope('dconv5'):
dconv5 = tf.nn.relu(norm(deconv2d(res4, 32, 'dconv5', kernel_size=4, strides = 2),self.is_train,'norm5'))
res5 = res_block(dconv5, 'res5',self.is_train, cfg.norm)
#input shape: [112, 112, 32]
with tf.variable_scope('dconv6'):
dconv6 = tf.nn.relu(norm(deconv2d(res5, 32, 'dconv6', kernel_size=4, strides = 2),self.is_train,'norm6'))
res6 = res_block(dconv6, 'res6',self.is_train, cfg.norm)
#output shape: [224, 224, 32]
with tf.variable_scope('cw_conv'):
gen = tf.nn.tanh(conv2d(res6, 3, 'pw_conv', kernel_size=1, strides = 1))
return (gen + 1) * 127.5
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv3)
# conv4
with tf.name_scope('conv4') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv4)
# conv5
with tf.name_scope('conv5') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv5)
def convLayer(x, filter_size=5, filter_depth=64, pool_size=2):
x_depth = x.get_shape()[-1].value
W = weight_variable([filter_size, filter_size, x_depth, filter_depth])
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
b = bias_variable([filter_depth])
relu = tf.nn.relu(conv + b)
pool = tf.nn.max_pool(relu,
ksize=[1,pool_size,pool_size,1],
strides=[1,pool_size,pool_size,1],
padding = 'SAME')
return pool