Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# In this example we are going to train a face detector based on the small
# faces dataset in the examples/faces directory. This means you need to supply
# the path to this faces folder as a command line argument so we will know
# where it is.
if len(sys.argv) != 2:
print(
"Give the path to the examples/faces directory as the argument to this "
"program. For example, if you are in the python_examples folder then "
"execute this program by running:\n"
" ./train_shape_predictor.py ../examples/faces")
exit()
faces_folder = sys.argv[1]
options = dlib.shape_predictor_training_options()
# Now make the object responsible for training the model.
# This algorithm has a bunch of parameters you can mess with. The
# documentation for the shape_predictor_trainer explains all of them.
# You should also read Kazemi's paper which explains all the parameters
# in great detail. However, here I'm just setting three of them
# differently than their default values. I'm doing this because we
# have a very small dataset. In particular, setting the oversampling
# to a high amount (300) effectively boosts the training set size, so
# that helps this example.
options.oversampling_amount = 300
# I'm also reducing the capacity of the model by explicitly increasing
# the regularization (making nu smaller) and by using trees with
# smaller depths.
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True
# In this example we are going to train a face detector based on the small
# faces dataset in the examples/faces directory. This means you need to supply
# the path to this faces folder as a command line argument so we will know
# where it is.
if len(sys.argv) != 2:
print(
"Give the path to the examples/faces directory as the argument to this "
"program. For example, if you are in the python_examples folder then "
"execute this program by running:\n"
" ./train_shape_predictor.py ../examples/faces")
exit()
faces_folder = sys.argv[1]
options = dlib.shape_predictor_training_options()
# Now make the object responsible for training the model.
# This algorithm has a bunch of parameters you can mess with. The
# documentation for the shape_predictor_trainer explains all of them.
# You should also read Kazemi's paper which explains all the parameters
# in great detail. However, here I'm just setting three of them
# differently than their default values. I'm doing this because we
# have a very small dataset. In particular, setting the oversampling
# to a high amount (300) effectively boosts the training set size, so
# that helps this example.
options.oversampling_amount = 300
# I'm also reducing the capacity of the model by explicitly increasing
# the regularization (making nu smaller) and by using trees with
# smaller depths.
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True
check_float = partial(checks.check_multi_scale_param, self.n_scales,
(float,))
feature_padding = check_int('feature_padding', feature_padding)
n_pixel_pairs = check_int('n_pixel_pairs', n_pixel_pairs)
distance_prior_weighting = check_float('distance_prior_weighting',
distance_prior_weighting)
regularisation_weight = check_float('regularisation_weight',
regularisation_weight)
n_split_tests = check_int('n_split_tests', n_split_tests)
n_trees = check_int('n_trees', n_trees)
n_dlib_perturbations = check_int('n_dlib_perturbations',
n_dlib_perturbations)
n_tree_levels = check_int('n_tree_levels', n_tree_levels)
self._dlib_options_templates = []
for j in range(self.n_scales):
new_opts = dlib.shape_predictor_training_options()
# Size of region within which to sample features for the feature
# pool, e.g a padding of 0.5 would cause the algorithm to sample
# pixels from a box that was 2x2 pixels
new_opts.feature_pool_region_padding = feature_padding[j]
# P parameter from Kazemi paper
new_opts.feature_pool_size = n_pixel_pairs[j]
# Controls how tight the feature sampling should be. Lower values
# enforce closer features. Opposite of explanation from Kazemi
# paper, lambda
new_opts.lambda_param = distance_prior_weighting[j]
# Boosting regularization parameter - nu from Kazemi paper, larger
# values may cause overfitting but improve performance on training
# data
new_opts.nu = regularisation_weight[j]
# S from Kazemi paper - Number of split features at each node to
def copy_dlib_options(options):
new_options = dlib.shape_predictor_training_options()
for p in sorted(filter(lambda x: '__' not in x, dir(options))):
setattr(new_options, p, getattr(options, p))
return new_options
def __init__(self, model):
if isinstance(model, STRING_TYPES) or isinstance(model, Path):
m_path = Path(model)
if not Path(m_path).exists():
raise ValueError('Model {} does not exist.'.format(m_path))
model = dlib.shape_predictor(str(m_path))
# Dlib doesn't expose any information about how the model was built,
# so we just create dummy options
self.algorithm = DlibAlgorithm(dlib.shape_predictor_training_options(),
n_iterations=0)
self.algorithm.dlib_model = model
self.scales = [1]
def train_shape_predictor(self):
self.__print_training_message('shape predictor')
opt = dlib.shape_predictor_training_options()
opt.oversampling_amount = 300
opt.nu = 0.05
opt.tree_depth = 2
opt.num_threads = self.cpu_cores
opt.be_verbose = True
dlib.train_shape_predictor(self.xml, PREDICTOR_DAT, opt)