Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _merge_leafs(self, leafs):
nest_polygons = []
for leaf in leafs:
leafidx = int(leaf.split('polygon-')[1])
nest = dict(self.contours_slice.loc[leafidx, :])
coords = _parse_annot_coords(nest)
nest_polygons.append(Polygon(coords))
return self._merge_polygons(nest_polygons)
# run adaptive multi-scale LoG filter
im_log_max, im_sigma_max = htk_shape_filters.cdog(
im_nuclei_stain, im_nuclei_fgnd_mask,
sigma_min=min_radius / np.sqrt(2),
sigma_max=max_radius / np.sqrt(2)
)
# apply local maximum clustering
im_nuclei_seg_mask, seeds, maxima = htk.segmentation.nuclear.max_clustering(
im_log_max, im_nuclei_fgnd_mask, local_max_search_radius)
if seeds is None:
return im_nuclei_seg_mask
# split any objects with disconnected fragments
im_nuclei_seg_mask = htk.segmentation.label.split(im_nuclei_seg_mask,
conn=8)
# filter out small objects
im_nuclei_seg_mask = htk.segmentation.label.area_open(
im_nuclei_seg_mask, min_nucleus_area).astype(np.int)
return im_nuclei_seg_mask
sigma_max=max_radius / np.sqrt(2)
)
# apply local maximum clustering
im_nuclei_seg_mask, seeds, maxima = htk.segmentation.nuclear.max_clustering(
im_log_max, im_nuclei_fgnd_mask, local_max_search_radius)
if seeds is None:
return im_nuclei_seg_mask
# split any objects with disconnected fragments
im_nuclei_seg_mask = htk.segmentation.label.split(im_nuclei_seg_mask,
conn=8)
# filter out small objects
im_nuclei_seg_mask = htk.segmentation.label.area_open(
im_nuclei_seg_mask, min_nucleus_area).astype(np.int)
return im_nuclei_seg_mask
X, Y, Min, Max = seed_contours(I, Delta)
# trace contours from seeds
cXs, cYs = trace_contours(I, X, Y, Min, Max, MaxLength=255)
# score successfully traced contours
Scores = score_contours(I, cXs, cYs)
# construct label image from scored contours
Label = label_contour(I.shape, cXs, cYs, Scores)
# compact contours to remove spurs - the paper calls this "optimization"
Label = label.compact(Label, Compaction)
# cleanup label image
Label = label.split(Label)
Label = label.area_open(Label, MinArea)
Label = label.width_open(Label, MinWidth)
# split objects with concavities
Label = split_concavities(Label, MinDepth, MinConcavity)
return Label
"""
# identify contour seed points
X, Y, Min, Max = seed_contours(I, Delta)
# trace contours from seeds
cXs, cYs = trace_contours(I, X, Y, Min, Max, MaxLength=255)
# score successfully traced contours
Scores = score_contours(I, cXs, cYs)
# construct label image from scored contours
Label = label_contour(I.shape, cXs, cYs, Scores)
# compact contours to remove spurs - the paper calls this "optimization"
Label = label.compact(Label, Compaction)
# cleanup label image
Label = label.split(Label)
Label = label.area_open(Label, MinArea)
Label = label.width_open(Label, MinWidth)
# split objects with concavities
Label = split_concavities(Label, MinDepth, MinConcavity)
return Label
- self.ordinary_contours: dict: indexed by maskname, each entry
is a contours dataframe
- self.edge_contours: dict: indexed by maskname, each entry is
a contours dataframe
- self.merged_contours: pandas DataFrame: single dataframe to
save all merged contours
"""
ordinary_contours = dict()
edge_contours = dict()
for midx, maskpath in enumerate(self.maskpaths):
# extract contours
MASK = imread(maskpath)
contours_df = get_contours_from_mask(
MASK=MASK,
monitorPrefix="%s: mask %d of %d" % (
monitorPrefix, midx, len(self.maskpaths)),
**self.contkwargs)
# separate edge from non-edge contours
edgeids = []
for edge in ['top', 'left', 'bottom', 'right']:
edgeids.extend(list(contours_df.loc[contours_df.loc[
:, 'touches_edge-%s' % edge] == 1, :].index))
edgeids = list(set(edgeids))
roiname = os.path.split(maskpath)[1]
edge_contours[roiname] = contours_df.loc[edgeids, :].copy()
ordinary_contours[roiname] = contours_df.drop(edgeids, axis=0)
self.ordinary_contours = ordinary_contours
def visualize_individual_superpixels(self):
"""Visualize individual spixels, color-coded by cellularity."""
# Define GTCodes dataframe
GTCodes_df = DataFrame(columns=['group', 'GT_code', 'color'])
for spval, sp in self.fdata.iterrows():
spstr = 'spixel-%d_cellularity-%d' % (
spval, self.cluster_props[sp['cluster']]['cellularity'])
GTCodes_df.loc[spstr, 'group'] = spstr
GTCodes_df.loc[spstr, 'GT_code'] = spval
GTCodes_df.loc[spstr, 'color'] = \
self.cluster_props[sp['cluster']]['color']
# get contours df
contours_df = get_contours_from_mask(
MASK=self.spixel_mask, GTCodes_df=GTCodes_df,
get_roi_contour=False, MIN_SIZE=0, MAX_SIZE=None,
verbose=self.cd.verbose == 3, monitorPrefix=self.monitorPrefix)
contours_df.loc[:, "group"] = [
j.split('_')[-1] for j in contours_df.loc[:, "group"]]
# get annotation docs
annprops = {
'F': (self.ymax - self.ymin) / self.tissue_rgb.shape[0],
'X_OFFSET': self.xmin,
'Y_OFFSET': self.ymin,
'opacity': self.cd.opacity,
'lineWidth': self.cd.lineWidth,
}
annotation_docs = get_annotation_documents_from_contours(
contours_df.copy(), docnamePrefix='spixel', annprops=annprops,
process_whole_image = False
#
# Initiate Dask client
#
print('\n>> Creating Dask client ...\n')
start_time = time.time()
c = cli_utils.create_dask_client(args)
print(c)
dask_setup_time = time.time() - start_time
print('Dask setup time = {}'.format(
cli_utils.disp_time_hms(dask_setup_time)))
#
# Read Input Image
#
print('\n>> Reading input image ... \n')
ts = large_image.getTileSource(args.inputImageFile)
ts_metadata = ts.getMetadata()
print(json.dumps(ts_metadata, indent=2))
is_wsi = ts_metadata['magnification'] is not None
#
# Compute tissue/foreground mask at low-res for whole slide images
annot_fname = os.path.splitext(
os.path.basename(args.outputNucleiAnnotationFile))[0]
annotation = {
"name": annot_fname + '-nuclei-' + args.nuclei_annotation_format,
"elements": nuclei_list
}
with open(args.outputNucleiAnnotationFile, 'w') as annotation_file:
json.dump(annotation, annotation_file, indent=2, sort_keys=False)
total_time_taken = time.time() - total_start_time
print('Total analysis time = {}'.format(
cli_utils.disp_time_hms(total_time_taken)))
if is_wsi:
#
# Compute tissue/foreground mask at low-res for whole slide images
#
print('\n>> Computing tissue/foreground mask at low-res ...\n')
start_time = time.time()
im_fgnd_mask_lres, fgnd_seg_scale = \
cli_utils.segment_wsi_foreground_at_low_res(ts)
fgnd_time = time.time() - start_time
print('low-res foreground mask computation time = {}'.format(
cli_utils.disp_time_hms(fgnd_time)))
it_kwargs = {
'tile_size': {'width': args.analysis_tile_size},
'scale': {'magnification': args.analysis_mag},
}
#
# Compute foreground fraction of tiles in parallel using Dask
#
print('\n>> Computing foreground fraction of all tiles ...\n')
start_time = time.time()
num_tiles = \
ts.getSingleTile(**it_kwargs)['iterator_range']['position']