Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_graph(test=False):
with tf.name_scope('imgholder'): # The placeholder is just a holder and doesn't contains the actual data.
imgholder = tf.placeholder(tf.float32,[None,256,256,3]) # The 3 is color channels
with tf.name_scope('bias_holder'):
bias_holder = tf.placeholder(tf.float32,[None,16,16,4]) # The bias (x,y,w,h) for 16*16 feature maps.
with tf.name_scope('conf_holder'):
conf_holder = tf.placeholder(tf.float32,[None,16,16,1]) # The confidence about 16*16 feature maps.
with tf.name_scope('croppedholder'):
croppedholder = tf.placeholder(tf.float32,[None,32,32,3]) # 256 is the number of feature maps
with tf.name_scope('veri_conf_holder2'):
veri_conf_holder = tf.placeholder(tf.float32, [None,1])
# with tf.name_scope('veri_bias_holder'):
# veri_bias_holder = tf.placeholder(tf.float32, [None,4]) # The veri output numbers,x,y,w,h
with tf.name_scope('mask'):
maskholder = tf.placeholder(tf.float32,[None,16,16,1])
conf, bias,feature_map = RPN(imgholder,test)
veri_conf = verify_net(croppedholder,test)
bias_loss = tf.reduce_sum(tf.reduce_mean(tf.square(bias*conf_holder - bias_holder),axis=0))
conf_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=conf,labels=conf_holder))
# veri_bias_loss = tf.reduce_sum(tf.reduce_mean(tf.square(veri_bias*veri_conf_holder - veri_bias_holder),axis=0))
if not os.path.isdir(filewriter_path): os.mkdir(filewriter_path)
if not os.path.isdir(checkpoint_path): os.mkdir(checkpoint_path)
x = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3])
y = tf.placeholder(tf.float32, [None, num_classes])
model = AlexNet(x, num_classes)
score = model.fc5
var_list = [v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers]
initial_x_batch = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3])
dist_x_batch = distorted_batch(initial_x_batch, IMAGE_SIZE)
with tf.name_scope("cross_ent"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits = score, labels = y))
with tf.name_scope('train'):
gradients = tf.gradients(loss, var_list)
gradients = list(zip(gradients, var_list))
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(initial_learning_rate,
global_step,
decay_steps,
learning_rate_decay_factor,
staircase=True)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
train_op = optimizer.apply_gradients(grads_and_vars=gradients, global_step=global_step)
strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope("Input"):
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
x_ = tf.image.random_brightness(x, 5)
x_ = tf.image.random_contrast(x_, lower=0.2, upper=1.8)
with tf.name_scope("gt"):
y_ = tf.placeholder(tf.float32, shape=[None, 8])
with tf.name_scope("Conv1"):
W_conv1 = weight_variable([5, 5, 3, 20], name="W_conv1")
b_conv1 = bias_variable([20], name="b_conv1")
h_conv1 = tf.nn.relu(conv2d(x_, W_conv1) + b_conv1)
with tf.name_scope("MaxPool1"):
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope("Conv2"):
W_conv2 = weight_variable([5, 5, 20, 40], name="W_conv2")
b_conv2 = bias_variable([40], name="b_conv2")
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
with tf.name_scope("Conv2_1"):
W_conv2_1 = weight_variable([5, 5, 40, 40], name="W_conv2_1")
b_conv2_1= bias_variable([40], name="b_conv2_1")
h_conv2_1 = tf.nn.relu(conv2d(h_conv2, W_conv2_1) + b_conv2_1)
with tf.name_scope("MaxPool2"):
h_pool2 = max_pool_2x2(h_conv2_1)
with tf.name_scope("Conv3"):
W_conv3 = weight_variable([5, 5, 40, 60], name="W_conv3")
b_conv3 = bias_variable([60], name="b_conv3")
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
# Number of negative entries to select.
max_neg_entries = tf.cast(tf.reduce_sum(fnmask), tf.int32)
n_neg = tf.cast(negative_ratio * n_positives, tf.int32)
n_neg = tf.minimum(n_neg, max_neg_entries)
# avoid n_neg is zero, and cause error when doing top_k later on
n_neg = tf.maximum(n_neg, 1)
val, idxes = tf.nn.top_k(-nvalues_flat, k=n_neg)
max_hard_pred = -val[-1]
# Final negative mask, hard negative mining
nmask = tf.logical_and(nmask, nvalues <= max_hard_pred)
fnmask = tf.cast(nmask, dtype)
# Add cross-entropy loss.
with tf.name_scope('cross_entropy_pos'):
total_cross_pos = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=gclasses)
total_cross_pos = tf.reduce_sum(total_cross_pos * fpmask, name="cross_entropy_pos")
tf.losses.add_loss(total_cross_pos)
with tf.name_scope('cross_entropy_neg'):
total_cross_neg = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=no_classes)
total_cross_neg = tf.reduce_sum(total_cross_neg * fnmask, name="cross_entropy_neg")
tf.losses.add_loss(total_cross_neg)
# Add localization loss: smooth L1, L2, ...
with tf.name_scope('localization'):
# Weights Tensor: positive mask + random negative.
weights = tf.expand_dims(alpha * fpmask, axis=-1)
total_loc = custom_layers.abs_smooth_2(localisations - glocalisations)
total_loc = tf.reduce_sum(total_loc * weights, name="localization")
tf.losses.add_loss(total_loc)
if os.path.exists(config.use_output_path):
os.system('rm '+config.use_output_path)
if os.path.exists(config.use_log_path):
os.system('rm '+config.use_log_path)
if config.mode=='forward' or config.mode=='use':
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
if config.mode=='backward' or config.mode=='use':
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
configs = tf.ConfigProto()
configs.gpu_options.allow_growth = True
with tf.Session(config=configs) as session:
session.run(init)
if config.mode=='forward':
kernel_initializer=tf.glorot_uniform_initializer(),
bias_initializer=tf.zeros_initializer())
with tf.name_scope("Loss"):
self.loss_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=output),
reduction_indices=0)
with tf.name_scope("Optimize"):
self.train_op = tf.train.MomentumOptimizer(self.learning_rate,momentum=0.9).minimize(self.loss_op)
if self.max_l2_norm is not None:
clip_op=[var.assign(tf.clip_by_norm(var, clip_norm=self.max_l2_norm))
for var in tf.trainable_variables()]
self.train_op=tf.group([self.train_op,clip_op])
with tf.name_scope("Accuracy"):
self.predicted = tf.argmax(tf.nn.softmax(output), axis=1, output_type=tf.int32)
correct_or_not = tf.equal(self.predicted, self.y)
self.acc_op = tf.reduce_mean(tf.cast(correct_or_not, tf.float32))
with tf.name_scope("Summaries"):
loss = None
accuracy = None
self.loss_accuracy_summary = tf.Summary()
self.loss_accuracy_summary.value.add(tag='Loss', simple_value=loss)
self.loss_accuracy_summary.value.add(tag='Accuracy', simple_value=accuracy)
def pad_up_to(value, size, axis, name=None):
"""Pad a tensor with zeros on the right along axis to a least the given size.
Args:
value: Tensor to pad.
size: Minimum size along axis.
axis: A nonnegative integer.
name: Optional name for this operation.
Returns:
Padded value.
"""
with tf.name_scope(name, 'pad_up_to') as name:
value = tf.convert_to_tensor(value, name='value')
axis = tf.convert_to_tensor(axis, name='axis')
need = tf.nn.relu(size - tf.shape(value)[axis])
ids = tf.stack([tf.stack([axis, 1])])
paddings = tf.sparse_to_dense(ids, tf.stack([tf.rank(value), 2]), need)
padded = tf.pad(value, paddings, name=name)
# Fix shape inference
axis = tf.contrib.util.constant_value(axis)
shape = value.get_shape()
if axis is not None and shape.ndims is not None:
shape = shape.as_list()
shape[axis] = None
padded.set_shape(shape)
return padded
with tf.name_scope('adv_train'):
self.adv_learning_rate_input = tf.placeholder(
tf.float32, [], name='adv_learning_rate_input')
self.adv_optimizer = tf.train.GradientDescentOptimizer(
self.adv_learning_rate_input)
self.adv_train_step = self.adv_optimizer.minimize(self.adv_cross_entropy_mean)
self.adv_predicted_indices = tf.argmax(net_output[1], 1)
self.adv_expected_indices = tf.argmax(self.noise_labels, 1)
self.adv_correct_prediction = tf.equal(self.adv_predicted_indices, self.adv_expected_indices)
self.adv_confusion_matrix = tf.confusion_matrix(self.adv_expected_indices, self.adv_predicted_indices,
num_classes=self.noise_label_count)
self.adv_evaluation_step = tf.reduce_mean(tf.cast(self.adv_correct_prediction, tf.float32))
else:
with tf.name_scope('cross_entropy'):
self.cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=self.ground_truth_input, logits=net_output))
tf.summary.scalar('cross_entropy', self.cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
self.learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
self.optimizer = tf.train.AdamOptimizer(
self.learning_rate_input)
self.grads_and_vars = self.optimizer.compute_gradients(self.cross_entropy_mean)
self.train_step = self.optimizer.apply_gradients(self.grads_and_vars)
# self.train_step = self.optimizer.minimize(self.cross_entropy_mean)
self.predicted_indices = tf.argmax(net_output, 1)
self.expected_indices = tf.argmax(self.ground_truth_input, 1)
def _calc_loss(self, targets, final_dists):
with tf.name_scope('loss'):
dec = tf.shape(targets)[1]
batch_nums = tf.shape(targets)[0]
dec = tf.range(0, limit=dec)
dec = tf.expand_dims(dec, axis=0)
dec = tf.tile(dec, [batch_nums, 1])
indices = tf.stack([dec, targets], axis=2) # [batch_size, dec, 2]
loss = tf.map_fn(fn=lambda x: tf.gather_nd(x[1], x[0]), elems=(indices, final_dists), dtype=tf.float32)
loss = -tf.log(loss)
nonpadding = tf.to_float(tf.not_equal(targets, self.token2idx[""])) # 0:
loss = tf.reduce_sum(loss * nonpadding) / (tf.reduce_sum(nonpadding) + 1e-7)
return loss
def dense(inputs, hidden, name_scope, use_bias=True, scope="dense"):
with tf.name_scope(name_scope):
with tf.variable_scope(scope,reuse=tf.AUTO_REUSE):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
out_shape = [shape[idx] for idx in range(
len(inputs.get_shape().as_list()) - 1)] + [hidden]
flat_inputs = tf.reshape(inputs, [-1, dim])
with tf.name_scope('weights'):
W = tf.get_variable("W", [dim, hidden])
#variable_summaries(W)
res = tf.matmul(flat_inputs, W)
if use_bias:
b = tf.get_variable(
"b", [hidden], initializer=tf.constant_initializer(0.))
res = tf.nn.bias_add(res, b)
res = tf.reshape(res, out_shape)
return res