Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
pp_params = pp.getDefaults();
pp_params.setValue("PeakPickerMRM:remove_overlapping_peaks", options.remove_overlapping_peaks, '')
pp_params.setValue("PeakPickerMRM:method", options.method, '')
pp.setParameters(pp_params);
chromatograms = pyopenms.MSExperiment()
fh = pyopenms.FileHandler()
fh.loadExperiment(chromat_in, chromatograms)
targeted = pyopenms.TargetedExperiment();
tramlfile = pyopenms.TraMLFile();
tramlfile.load(traml_in, targeted);
output = algorithm(chromatograms, targeted, pp)
pyopenms.FeatureXMLFile().store(out, output);
mse.updateRanges()
sizes.append((mse.getSize(), in_f))
plog.setProgress(i)
plog.endProgress()
__, file_ = max(sizes)
f_fmxl = pms.FeatureXMLFile()
if not out_files:
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fmxl.setOptions(options)
if align_features:
map_ref = pms.FeatureMap()
f_fxml_tmp = pms.FeatureXMLFile()
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fxml_tmp.setOptions(options)
f_fxml_tmp.load(file_, map_ref)
algorithm.setReference(map_ref)
else:
map_ref = pms.MSExperiment()
pms.MzMLFile().load(file_, map_ref)
algorithm.setReference(map_ref)
plog.startProgress(0, len(in_files), "Align input maps")
for i, in_file in enumerate(in_files):
trafo = pms.TransformationDescription()
if align_features:
map_ = pms.FeatureMap()
if in_types == set((pms.Type.CONSENSUSXML,)):
link_features = False
elif in_types == set((pms.Type.FEATUREXML,)):
link_features = True
else:
raise Exception("different kinds of input files")
algorithm_parameters = params.copy("algorithm:", True)
algorithm = pms.FeatureGroupingAlgorithmQT()
algorithm.setParameters(algorithm_parameters)
out_map = pms.ConsensusMap()
fds = out_map.getColumnHeaders()
if link_features:
f = pms.FeatureXMLFile()
maps = []
for i, in_file in enumerate(in_files):
map_ = pms.FeatureMap()
f.load(in_file, map_)
# set filedescriptions
fd = fds.get(i, pms.ColumnHeader())
fd.filename = in_file
fd.size = map_.size()
fd.unique_id = map_.getUniqueId()
fds[i] = fd
maps.append(map_)
out_map.setColumnHeaders(fds)
algorithm.group(maps, out_map)
else:
f = pms.ConsensusXMLFile()
pms.IdXMLFile().load(id_file, protein_ids, peptide_ids)
mapper = pms.IDMapper()
mapper.setParameters(params)
if in_type == pms.Type.CONSENSUSXML:
file_ = pms.ConsensusXMLFile()
map_ = pms.ConsensusMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.FEATUREXML:
file_ = pms.FeatureXMLFile()
map_ = pms.FeatureMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt,
use_centroid_mz)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.MZQ:
file_ = pms.MzQuantMLFile()
msq = pms.MSQuantifications()
file_.load(in_file, msq)
maps = msq.getConsensusMaps()
for map_ in maps:
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
msq.setConsensusMaps(maps)
def parse_featureXML_GT(feature_file):
featuremap = pyopenms.FeatureMap()
featurexml = pyopenms.FeatureXMLFile()
featurexml.load(feature_file, featuremap)
hulls = pd.DataFrame(columns=['rt_min', 'rt_max', 'mz_min', 'mz_max', 'detected', 'pic_id'])
for i in range(featuremap.size()):
feature = featuremap[i]
chs = feature.getConvexHulls()
for j in range(len(chs)):
pts = chs[j].getHullPoints()
hulls.loc[len(hulls)] = [pts.min(0)[0], pts.max(0)[0], pts.min(0)[1], pts.max(0)[1], False, -1]
return hulls
algorithm = pms.MapAlignmentAlgorithmPoseClustering()
alignment_params = params.copy("algorithm:", True)
algorithm.setParameters(alignment_params)
algorithm.setLogType(pms.LogType.CMD)
plog = pms.ProgressLogger()
plog.setLogType(pms.LogType.CMD)
if reference_file:
file_ = reference_file
elif reference_index > 0:
file_ = in_files[reference_index-1]
else:
sizes = []
if align_features:
fh = pms.FeatureXMLFile()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
sizes.append((fh.loadSize(in_f), in_f))
plog.setProgress(i)
else:
fh = pms.MzMLFile()
mse = pms.MSExperiment()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
fh.load(in_f, mse)
mse.updateRanges()
sizes.append((mse.getSize(), in_f))
plog.setProgress(i)
plog.endProgress()
__, file_ = max(sizes)
pms.IdXMLFile().load(id_file, protein_ids, peptide_ids)
mapper = pms.IDMapper()
mapper.setParameters(params)
if in_type == pms.Type.CONSENSUSXML:
file_ = pms.ConsensusXMLFile()
map_ = pms.ConsensusMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.FEATUREXML:
file_ = pms.FeatureXMLFile()
map_ = pms.FeatureMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt,
use_centroid_mz)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.MZQ:
file_ = pms.MzQuantMLFile()
msq = pms.MSQuantifications()
file_.load(in_file, msq)
maps = msq.getConsensusMaps()
for map_ in maps:
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
msq.setConsensusMaps(maps)