Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if load_as_type == cellprofiler.modules.namesandtypes.LOAD_AS_OBJECTS:
url_feature = cellprofiler.measurement.C_OBJECTS_URL + "_" + OBJECTS_NAME
path_feature = cellprofiler.measurement.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
file_feature = cellprofiler.measurement.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
series_feature = cellprofiler.measurement.C_OBJECTS_SERIES + "_" + OBJECTS_NAME
frame_feature = cellprofiler.measurement.C_OBJECTS_FRAME + "_" + OBJECTS_NAME
channel_feature = (
cellprofiler.measurement.C_OBJECTS_CHANNEL + "_" + OBJECTS_NAME
)
names = javabridge.make_list([OBJECTS_NAME])
else:
url_feature = cellprofiler.measurement.C_URL + "_" + IMAGE_NAME
path_feature = cellprofiler.measurement.C_PATH_NAME + "_" + IMAGE_NAME
file_feature = cellprofiler.measurement.C_FILE_NAME + "_" + IMAGE_NAME
series_feature = cellprofiler.measurement.C_SERIES + "_" + IMAGE_NAME
frame_feature = cellprofiler.measurement.C_FRAME + "_" + IMAGE_NAME
channel_feature = cellprofiler.measurement.C_CHANNEL + "_" + IMAGE_NAME
names = javabridge.make_list([IMAGE_NAME])
m.image_set_number = 1
m.add_measurement(cellprofiler.measurement.IMAGE, url_feature, url)
m.add_measurement(cellprofiler.measurement.IMAGE, path_feature, pathname)
m.add_measurement(cellprofiler.measurement.IMAGE, file_feature, filename)
if series is not None:
m.add_measurement(cellprofiler.measurement.IMAGE, series_feature, series)
if index is not None:
m.add_measurement(cellprofiler.measurement.IMAGE, frame_feature, index)
if channel is not None:
m.add_measurement(cellprofiler.measurement.IMAGE, channel_feature, channel)
m.add_measurement(
cellprofiler.measurement.IMAGE, cellprofiler.measurement.GROUP_NUMBER, 1
)
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(module)
m = cellprofiler.measurement.Measurements()
nimages = None
for object_name in list(mdict.keys()):
odict = mdict[object_name]
for feature in list(odict.keys()):
m.add_all_measurements(object_name, feature, odict[feature])
if nimages is None:
nimages = len(odict[feature])
else:
assert nimages == len(odict[feature])
if (
object_name == cellprofiler.measurement.IMAGE
and feature in dose_measurements
):
if len(module.dose_values) > 1:
module.add_dose_value()
dv = module.dose_values[-1]
dv.measurement.value = feature
m.image_set_number = nimages
image_set_list = cellprofiler.image.ImageSetList()
for i in range(nimages):
image_set = image_set_list.get_image_set(i)
workspace = cellprofiler.workspace.Workspace(
pipeline, module, image_set, cellprofiler.object.ObjectSet(), m, image_set_list
)
return workspace, module
expected[expected == numpy.max(guide_labels)] = 0
guide_labels[guide_labels == numpy.max(guide_labels)] = 0
workspace, module = make_workspace(gridding, guide_labels)
assert isinstance(
module, cellprofiler.modules.identifyobjectsingrid.IdentifyObjectsInGrid
)
module.diameter_choice.value = cellprofiler.modules.identifyobjectsingrid.AM_MANUAL
module.diameter.value = diameter
module.shape_choice.value = (
cellprofiler.modules.identifyobjectsingrid.SHAPE_CIRCLE_NATURAL
)
module.run(workspace)
labels = workspace.object_set.get_objects(OUTPUT_OBJECTS_NAME).segmented
assert numpy.all(labels == expected[0 : labels.shape[0], 0 : labels.shape[1]])
m = workspace.measurements
assert isinstance(m, cellprofiler.measurement.Measurements)
xm = m.get_current_measurement(OUTPUT_OBJECTS_NAME, "Location_Center_X")
assert len(xm) == 96
assert numpy.all(xm[:-1] == x_locations[1:-1])
assert numpy.isnan(xm[-1])
ym = m.get_current_measurement(OUTPUT_OBJECTS_NAME, "Location_Center_Y")
assert numpy.all(ym[:-1] == y_locations[1:-1])
count = m.get_current_image_measurement("Count_%s" % OUTPUT_OBJECTS_NAME)
assert count == gridding.rows * gridding.columns
def test_by_order():
n = cellprofiler.modules.namesandtypes.NamesAndTypes()
n.assignment_method.value = cellprofiler.modules.namesandtypes.ASSIGN_RULES
n.matching_choice.value = cellprofiler.modules.namesandtypes.MATCH_BY_ORDER
n.add_assignment()
n.assignments[0].image_name.value = C0
n.assignments[1].image_name.value = C1
n.assignments[0].rule_filter.value = 'file does contain "%s"' % C0
n.assignments[1].rule_filter.value = 'file does contain "%s"' % C1
data = {
C0: [("%s%d" % (C0, i + 1), m) for i, m in enumerate(md([(M0, 2)]))],
C1: [("%s%d" % (C1, i + 1), m) for i, m in enumerate(md([(M1, 2)]))],
}
do_teest(n, data, [(cellprofiler.measurement.IMAGE_NUMBER,)], [(C0, M0), (C1, M1)])
pipeline = cellprofiler.pipeline.Pipeline()
module = cellprofiler.modules.maskimage.MaskImage()
module.source_choice.value = cellprofiler.modules.maskimage.IO_OBJECTS
module.object_name.value = OBJECTS_NAME
module.image_name.value = IMAGE_NAME
module.masked_image_name.value = MASKED_IMAGE_NAME
module.invert_mask.value = False
module.set_module_num(1)
workspace = cellprofiler.workspace.Workspace(
pipeline,
module,
image_set,
object_set,
cellprofiler.measurement.Measurements(),
image_set_list,
)
module.run(workspace)
masked_image = workspace.image_set.get_image(MASKED_IMAGE_NAME)
assert isinstance(masked_image, cellprofiler.image.Image)
assert np.all(masked_image.pixel_data[expected_mask] == pixel_data[expected_mask])
assert np.all(masked_image.pixel_data[~expected_mask] == 0)
assert np.all(masked_image.mask == expected_mask)
assert np.all(masked_image.masking_objects.segmented == labels)
def save_filename_measurements(self, workspace):
if self.update_file_names.value:
filename = self.get_filename(
workspace, make_dirs=False, check_overwrite=False
)
pn, fn = os.path.split(filename)
url = cellprofiler.modules.loadimages.pathname2url(filename)
workspace.measurements.add_measurement(
cellprofiler.measurement.IMAGE,
self.file_name_feature,
fn,
can_overwrite=True,
)
workspace.measurements.add_measurement(
cellprofiler.measurement.IMAGE,
self.path_name_feature,
pn,
can_overwrite=True,
)
workspace.measurements.add_measurement(
cellprofiler.measurement.IMAGE,
self.url_feature,
url,
can_overwrite=True,
)
logger.debug("Getting initial measurements")
# Fetch the path to the intial measurements if needed.
current_measurements = self.initial_measurements.get(
self.current_analysis_id
)
if current_measurements is None:
logger.debug("Sending initial measurements request")
rep = self.send(InitialMeasurementsRequest(self.current_analysis_id))
logger.debug("Got initial measurements")
current_measurements = self.initial_measurements[
self.current_analysis_id
] = cpmeas.load_measurements_from_buffer(rep.buf)
else:
logger.debug("Has initial measurements")
# Make a copy of the measurements for writing during this job
current_measurements = cpmeas.Measurements(copy=current_measurements)
all_measurements.add(current_measurements)
job_measurements.append(current_measurements)
successful_image_set_numbers = []
image_set_numbers = job.image_set_numbers
worker_runs_post_group = job.worker_runs_post_group
logger.info("Doing job: " + ",".join(map(str, image_set_numbers)))
self.pipeline_listener.image_set_number = image_set_numbers[0]
if not worker_runs_post_group:
# Get the shared state from the first imageset in this run.
shared_dicts = self.send(
SharedDictionaryRequest(self.current_analysis_id)
).dictionaries
assert len(shared_dicts) == len(current_pipeline.modules())
def prepare_run(self, workspace):
"""Initialize graph files"""
if not self.wants_objskeleton_graph:
return True
edge_files = set()
vertex_files = set()
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
for image_number in m.get_image_numbers():
edge_path, vertex_path = self.get_graph_file_paths(m, image_number)
edge_files.add(edge_path)
vertex_files.add(vertex_path)
for file_path, header in (
(edge_path, self.edge_file_columns),
(vertex_path, self.vertex_file_columns),
):
if os.path.exists(file_path):
import wx
if (
wx.MessageBox(
"%s already exists. Do you want to overwrite it?" % file_path,
"Warning: overwriting file",
"""
if outf is None:
if self.wants_default_output_directory.value:
path = cpprefs.get_default_output_directory()
else:
path = cpprefs.get_absolute_path(self.custom_output_directory.value)
h5_path = os.path.join(path, F_BATCH_DATA_H5)
else:
h5_path = outf
image_set_list = workspace.image_set_list
pipeline = workspace.pipeline
m = cpmeas.Measurements(copy=workspace.measurements, filename=h5_path)
try:
assert isinstance(pipeline, cpp.Pipeline)
assert isinstance(m, cpmeas.Measurements)
orig_pipeline = pipeline
pipeline = pipeline.copy()
# this use of workspace.frame is okay, since we're called from
# prepare_run which happens in the main wx thread.
target_workspace = cpw.Workspace(
pipeline, None, None, None, m, image_set_list, workspace.frame
)
pipeline.prepare_to_create_batch(target_workspace, self.alter_path)
bizarro_self = pipeline.module(self.module_num)
bizarro_self.revision.value = int(
re.sub(r"\.|rc\d{1}", "", cellprofiler.__version__)
)
if self.wants_default_output_directory:
bizarro_self.custom_output_directory.value = self.alter_path(
cpprefs.get_default_output_directory()
self.image_set_list.clear_columns()
self.image_set_list.clear_rows()
metadata_key_names = [
group.metadata_choice.value
for group in self.grouping_metadata
if group.metadata_choice.value != "None"
]
metadata_feature_names = [
"_".join((cpmeas.C_METADATA, key)) for key in metadata_key_names
]
metadata_key_names = [
x[(len(cpmeas.C_METADATA) + 1) :] for x in metadata_feature_names
]
image_set_feature_names = [
cpmeas.GROUP_NUMBER,
cpmeas.GROUP_INDEX,
] + metadata_feature_names
self.image_set_list.insert_column(0, "Group number")
self.image_set_list.insert_column(1, "Group index")
for i, key in enumerate(metadata_key_names):
for l, offset in ((self.grouping_list, 0), (self.image_set_list, 2)):
l.insert_column(i + offset, "Group: %s" % key)
self.grouping_list.insert_column(len(metadata_key_names), "Count")
image_numbers = m.get_image_numbers()
group_indexes = m[cpmeas.IMAGE, cpmeas.GROUP_INDEX, image_numbers][:]
group_numbers = m[cpmeas.IMAGE, cpmeas.GROUP_NUMBER, image_numbers][:]
counts = np.bincount(group_numbers)
first_indexes = np.argwhere(group_indexes == 1).flatten()
group_keys = [