Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_label_rois():
img, lbl_cls, lbl_inst = get_instance_segmentation_data()
rects = []
dlib.find_candidate_object_locations(img, rects)
rois = []
for rect in rects:
x1, y1, x2, y2 = rect.left(), rect.top(), rect.right(), rect.bottom()
rois.append((x1, y1, x2, y2))
rois = np.array(rois)
roi_clss, roi_inst_masks = rfcn.utils.label_rois(
rois, lbl_inst, lbl_cls, overlap_thresh=0.5)
n_rois = len(rois)
nose.tools.assert_equal(len(roi_clss), n_rois)
nose.tools.assert_equal(len(roi_inst_masks), n_rois)
np.testing.assert_equal(np.unique(roi_clss), [0, 1])
viz_imgs = []
colors = fcn.utils.labelcolormap(21)
def findSelectiveSearchRois(img, kvals, minSize, max_merging_iterations, nmsThreshold):
tmp = []
dlib.find_candidate_object_locations(imconvertCv2Ski(img), tmp, kvals, minSize, max_merging_iterations)
rois = [[d.left(), d.top(), d.right(), d.bottom()] for d in tmp]
if nmsThreshold != None:
assert(nmsThreshold > 0 and nmsThreshold < 1)
dets = [ToFloats(r) + [abs((r[2] - r[0]) * (r[3] - r[1]))] for r in rois]
keepInds = nmsPython(np.array(dets), nmsThreshold)
#print("findSelectiveSearchRois using nms threshold: {}: before nms nrRois={}, after nms nrRois={}".format(nmsThreshold, len(rois), len(keepInds)))
#groupedRectangles, weights = cv2.groupRectangles(np.asanyarray(rectsInput, np.float).tolist(), 1, 0.3)
rois = [rois[i] for i in keepInds]
random.shuffle(rois) # randomize ROI order to not introduce any unintended effects later
return rois
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install -U scikit-image
# Or downloaded from http://scikit-image.org/download.html.
import dlib
from skimage import io
image_file = '../examples/faces/2009_004587.jpg'
img = io.imread(image_file)
# Locations of candidate objects will be saved into rects
rects = []
dlib.find_candidate_object_locations(img, rects, min_size=500)
print("number of rectangles found {}".format(len(rects)))
for k, d in enumerate(rects):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
def get_bboxes(orig_img, im_scale, min_size, dedup_boxes=1. / 16):
rects = []
dlib.find_candidate_object_locations(orig_img, rects, min_size=min_size)
rects = [[0, d.left(), d.top(), d.right(), d.bottom()] for d in rects]
rects = np.asarray(rects, dtype=np.float32)
# bbox pre-processing
rects *= im_scale
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(rects * dedup_boxes).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
rects = rects[index, :]
return rects
def selective_search_dlib(img, max_img_size=(500, 500),
kvals=(50, 200, 2), min_size=2200, check=True,
debug_window=False):
if debug_window:
org_img = img
org_h, org_w = img.shape[0:2]
# Resize the image for speed up
img, resize_scale = _scale_down_image(img, max_img_size)
# Selective search
drects = []
dlib.find_candidate_object_locations(img, drects, kvals=kvals,
min_size=min_size)
rects = [(int(drect.left() * resize_scale),
int(drect.top() * resize_scale),
int(drect.width() * resize_scale),
int(drect.height() * resize_scale)) for drect in drects]
# Check the validness of the rectangles
if check:
if len(rects) == 0:
logger.error('No selective search rectangle '
'(Please tune the parameters)')
for rect in rects:
x, y = rect[0], rect[1]
w, h = rect[2], rect[3]
x2, y2 = x + w, y + h
if x < 0 or y < 0 or org_w < x2 or org_h < y2 or w <= 0 or h <= 0: