Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
loss = {}
loss['part_loss'] = add_part_loss('part_pred')
total_loss = loss['part_loss']
if cfg.intermediate_supervision:
loss['part_loss_interm'] = add_part_loss('part_pred_interm')
total_loss = total_loss + loss['part_loss_interm']
if cfg.location_refinement:
locref_pred = heads['locref']
locref_targets = batch[Batch.locref_targets]
locref_weights = batch[Batch.locref_mask]
loss_func = losses.huber_loss if cfg.locref_huber_loss else tf.losses.mean_squared_error
loss['locref_loss'] = cfg.locref_loss_weight * loss_func(locref_targets, locref_pred, locref_weights)
total_loss = total_loss + loss['locref_loss']
# loss['total_loss'] = slim.losses.get_total_loss(add_regularization_losses=params.regularize)
loss['total_loss'] = total_loss
return loss
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
loss = {}
loss['part_loss'] = add_part_loss('part_pred')
total_loss = loss['part_loss']
if cfg.intermediate_supervision:
loss['part_loss_interm'] = add_part_loss('part_pred_interm')
total_loss = total_loss + loss['part_loss_interm']
if cfg.location_refinement:
locref_pred = heads['locref']
locref_targets = batch[Batch.locref_targets]
locref_weights = batch[Batch.locref_mask]
loss_func = losses.huber_loss if cfg.locref_huber_loss else tf.losses.mean_squared_error
loss['locref_loss'] = cfg.locref_loss_weight * loss_func(locref_targets, locref_pred, locref_weights)
total_loss = total_loss + loss['locref_loss']
if cfg.pairwise_predict:
pairwise_pred = heads['pairwise_pred']
pairwise_targets = batch[Batch.pairwise_targets]
pairwise_weights = batch[Batch.pairwise_mask]
loss_func = losses.huber_loss if cfg.pairwise_huber_loss else tf.losses.mean_squared_error
loss['pairwise_loss'] = cfg.pairwise_loss_weight * loss_func(pairwise_targets, pairwise_pred,
pairwise_weights)
total_loss = total_loss + loss['pairwise_loss']
stride = self.cfg.stride
if mirror:
joints = [self.mirror_joints(person_joints, self.symmetric_joints, image.shape[1]) for person_joints in
joints]
sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2
scaled_joints = [person_joints[:, 1:3] * scale for person_joints in joints]
joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
part_score_targets, part_score_weights, locref_targets, locref_mask = self.compute_target_part_scoremap(
joint_id, scaled_joints, data_item, sm_size, scale)
batch.update({
Batch.part_score_targets: part_score_targets,
Batch.part_score_weights: part_score_weights,
Batch.locref_targets: locref_targets,
Batch.locref_mask: locref_mask
})
batch = {key: data_to_input(data) for (key, data) in batch.items()}
batch[Batch.data_item] = data_item
return batch
def train(self, batch):
cfg = self.cfg
heads = self.get_net(batch[Batch.inputs])
weigh_part_predictions = cfg.weigh_part_predictions
part_score_weights = batch[Batch.part_score_weights] if weigh_part_predictions else 1.0
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
loss = {}
loss['part_loss'] = add_part_loss('part_pred')
total_loss = loss['part_loss']
if cfg.intermediate_supervision:
loss['part_loss_interm'] = add_part_loss('part_pred_interm')
total_loss = total_loss + loss['part_loss_interm']
for i in range(len(data_items)):
# Approximating the scale
scale = min(target_size[0]/data_items[i].im_size[1], target_size[1]/data_items[i].im_size[2])
if self.cfg.get("scmap_type", None) == "gaussian":
part_score_target, part_score_weight, locref_target, locref_mask = self.gaussian_scmap(
joint_ids[i], [joints[i]], data_items[i], sm_size, scale)
else:
part_score_target, part_score_weight, locref_target, locref_mask = self.compute_target_part_scoremap_numpy(
joint_ids[i], [joints[i]], data_items[i], sm_size, scale)
part_score_targets.append(part_score_target)
part_score_weights.append(part_score_weight)
locref_targets.append(locref_target)
locref_masks.append(locref_mask)
return {
Batch.part_score_targets: part_score_targets,
Batch.part_score_weights: part_score_weights,
Batch.locref_targets: locref_targets,
Batch.locref_mask: locref_masks
}
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
loss = {}
loss['part_loss'] = add_part_loss('part_pred')
total_loss = loss['part_loss']
if cfg.intermediate_supervision:
loss['part_loss_interm'] = add_part_loss('part_pred_interm')
total_loss = total_loss + loss['part_loss_interm']
if cfg.location_refinement:
locref_pred = heads['locref']
locref_targets = batch[Batch.locref_targets]
locref_weights = batch[Batch.locref_mask]
loss_func = losses.huber_loss if cfg.locref_huber_loss else tf.losses.mean_squared_error
loss['locref_loss'] = cfg.locref_loss_weight * loss_func(locref_targets, locref_pred, locref_weights)
total_loss = total_loss + loss['locref_loss']
if cfg.pairwise_predict:
pairwise_pred = heads['pairwise_pred']
pairwise_targets = batch[Batch.pairwise_targets]
pairwise_weights = batch[Batch.pairwise_mask]
loss_func = losses.huber_loss if cfg.pairwise_huber_loss else tf.losses.mean_squared_error
loss['pairwise_loss'] = cfg.pairwise_loss_weight * loss_func(pairwise_targets, pairwise_pred,
pairwise_weights)
total_loss = total_loss + loss['pairwise_loss']
loss['total_loss'] = total_loss
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
im_file = data_item.im_path
# logging.debug('image %s', im_file)
# print('image: {}'.format(im_file))
# logging.debug('mirror %r', mirror)
img = data_item.image # augmented image
batch = {Batch.inputs: img}
if self.has_gt:
batch.update({
Batch.part_score_targets: part_score_targets,
Batch.part_score_weights: part_score_weights,
Batch.locref_targets: locref_targets,
Batch.locref_mask: locref_mask
})
batch = {key: data_to_input(data) for (key, data) in batch.items()}
batch[Batch.data_item] = data_item
return batch
def display_dataset():
logging.basicConfig(level=logging.DEBUG)
cfg = load_config()
dataset = dataset_create(cfg)
dataset.set_shuffle(False)
while True:
batch = dataset.next_batch()
for frame_id in range(1):
img = batch[Batch.inputs][frame_id,:,:,:]
img = np.squeeze(img).astype('uint8')
scmap = batch[Batch.part_score_targets][frame_id,:,:,:]
scmap = np.squeeze(scmap)
# scmask = batch[Batch.part_score_weights]
# if scmask.size > 1:
# scmask = np.squeeze(scmask).astype('uint8')
# else:
# scmask = np.zeros(img.shape)
subplot_height = 4
subplot_width = 5
num_plots = subplot_width * subplot_height
f, axarr = plt.subplots(subplot_height, subplot_width)
def get_batch_spec(cfg):
num_joints = cfg.num_joints
batch_size = cfg.batch_size
return {
Batch.inputs: [batch_size, None, None, 3],
Batch.part_score_targets: [batch_size, None, None, num_joints],
Batch.part_score_weights: [batch_size, None, None, num_joints],
Batch.locref_targets: [batch_size, None, None, num_joints * 2],
Batch.locref_mask: [batch_size, None, None, num_joints * 2]
}