Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
builder = tfv1.saved_model.builder.SavedModelBuilder(filename)
prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_signatures,
outputs=outputs_signatures,
method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
sess, tags,
signature_def_map={signature_name: prediction_signature})
builder.save()
logger.info("SavedModel created at {}.".format(filename))
def _run_init(self, sess):
logger.info(
"Restoring checkpoint from {} ...".format(self.path))
def f(reader, name, v):
val = reader.get_tensor(name)
SessionUpdate.load_value_to_var(v, val)
with sess.as_default():
self._match_vars(f)
extreme_name = 'maximum' if self.reverse else 'minimum'
if not path.endswith(str(self.best[0])):
logger.warn("[MinSaver] New {} '{}' found at global_step={}, but the latest checkpoint is {}.".format(
extreme_name, self.monitor_stat, self.best[0], path
))
logger.warn("MinSaver will do nothing this time. "
"The callbacks may have inconsistent frequency or wrong order.")
return
newname = os.path.join(self.checkpoint_dir,
self.filename or
('max-' + self.monitor_stat if self.reverse else 'min-' + self.monitor_stat))
files_to_copy = tf.gfile.Glob(path + '*')
for file_to_copy in files_to_copy:
tf.gfile.Copy(file_to_copy, file_to_copy.replace(path, newname), overwrite=True)
logger.info("Model at global_step={} with {} {}={:.5g} saved.".format(
self.best[0], extreme_name, self.monitor_stat, self.best[1]))
def _trigger(self):
try:
self.saver.save(
tf.get_default_session(),
self.path,
global_step=tf.train.get_global_step(),
write_meta_graph=False)
logger.info("Model saved to %s." % tf.train.get_checkpoint_state(self.checkpoint_dir).model_checkpoint_path)
except (OSError, IOError, tf.errors.PermissionDeniedError,
tf.errors.ResourceExhaustedError): # disk error sometimes.. just ignore it
logger.exception("Exception in ModelSaver!")
anchor_boxes: (h', w', NA, 4)
gt_boxes: (N, 4)
gt_labels: (N,)
If MODE_MASK, gt_masks: (N, h, w)
"""
roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN)
print_class_histogram(roidbs)
# Valid training images should have at least one fg box.
# But this filter shall not be applied for testing.
num = len(roidbs)
roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
num - len(roidbs), len(roidbs)))
ds = DataFromList(roidbs, shuffle=True)
aug = imgaug.AugmentorList(
[CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
imgaug.Flip(horiz=True)])
def preprocess(roidb):
fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd']
boxes = np.copy(boxes)
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
im = im.astype('float32')
# assume floatbox as input
assert boxes.dtype == np.float32, "Loader has to return floating point boxes!"
def run(model_path, image_path):
predictor = tp.OfflinePredictor(tp.PredictConfig(
model=Model(),
session_init=tp.get_model_loader(model_path),
input_names=['image'],
output_names=['saliency']))
im_orig = cv2.imread(image_path)
assert im_orig is not None and im_orig.ndim == 3, image_path
original_size = im_orig.shape
logger.info('imsize {}'.format(im_orig.shape))
# resnet expect RGB inputs of 224x224x3
im = cv2.resize(im_orig, (IMAGE_SIZE, IMAGE_SIZE))
im = im.astype(np.float32)[:, :, ::-1]
saliency_images = predictor([im])[0]
saliency_images = cv2.resize(saliency_images, (original_size[1], original_size[0]))
abs_saliency = np.abs(saliency_images).max(axis=-1)
pos_saliency = np.maximum(0, saliency_images)
neg_saliency = np.maximum(0, -saliency_images)
pos_saliency -= pos_saliency.min()
pos_saliency /= pos_saliency.max()
actual_args = copy.copy(get_arg_scope()[func.__name__])
actual_args.update(kwargs)
if name is not None:
with tf.variable_scope(name) as scope:
do_log_shape = log_shape and scope.name not in _layer_logged
do_summary = do_summary and scope.name not in _layer_logged
if do_log_shape:
logger.info("{} input: {}".format(scope.name, get_shape_str(inputs)))
# run the actual function
outputs = func(*args, **actual_args)
if do_log_shape:
# log shape info and add activation
logger.info("{} output: {}".format(
scope.name, get_shape_str(outputs)))
_layer_logged.add(scope.name)
if do_summary:
if isinstance(outputs, list):
for x in outputs:
add_activation_summary(x, scope.name)
else:
add_activation_summary(outputs, scope.name)
else:
# run the actual function
outputs = func(*args, **actual_args)
return outputs
def log(self):
""" log the time of some heavy callbacks """
if self.tot < 3:
return
msgs = []
for name, t in self.times:
if t / self.tot > 0.3 and t > 1:
msgs.append(name + ": " + humanize_time_delta(t))
logger.info(
"Callbacks took {:.3f} sec in total. {}".format(
self.tot, '; '.join(msgs)))
anchor_boxes: (h', w', NA, 4)
gt_boxes: (N, 4)
gt_labels: (N,)
If MODE_MASK, gt_masks: (N, h, w)
"""
roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN)
print_class_histogram(roidbs)
# Valid training images should have at least one fg box.
# But this filter shall not be applied for testing.
num = len(roidbs)
roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
num - len(roidbs), len(roidbs)))
ds = DataFromList(roidbs, shuffle=True)
aug = imgaug.AugmentorList(
[CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
imgaug.Flip(horiz=True)])
def preprocess(roidb):
fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd']
boxes = np.copy(boxes)
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
im = im.astype('float32')
# assume floatbox as input
assert boxes.dtype == np.float32, "Loader has to return floating point boxes!"
The value to assign to the variable.
Note:
Subclasses will implement the abstract method
:meth:`_get_value_to_set`, which should return a new value to
set, or return None to do nothing.
"""
ret = self._get_value_to_set()
if ret is not None and ret != self._last_value:
if self.epoch_num != self._last_epoch_set:
# Print this message at most once every epoch
if self._last_value is None:
logger.info("[HyperParamSetter] At global_step={}, {} is set to {:.6f}".format(
self.global_step, self.param.readable_name, ret))
else:
logger.info("[HyperParamSetter] At global_step={}, {} changes from {:.6f} to {:.6f}".format(
self.global_step, self.param.readable_name, self._last_value, ret))
self._last_epoch_set = self.epoch_num
self._last_value = ret
return ret