Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def id_mapper(in_file, id_file, out_file, params, use_centroid_rt,
use_centroid_mz, use_subelements ):
in_type = pms.FileHandler.getType(in_file)
protein_ids = []
peptide_ids = []
pms.IdXMLFile().load(id_file, protein_ids, peptide_ids)
mapper = pms.IDMapper()
mapper.setParameters(params)
if in_type == pms.Type.CONSENSUSXML:
file_ = pms.ConsensusXMLFile()
map_ = pms.ConsensusMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.FEATUREXML:
file_ = pms.FeatureXMLFile()
map_ = pms.FeatureMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt,
use_centroid_mz)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.MZQ:
maps = []
for i, in_file in enumerate(in_files):
map_ = pms.FeatureMap()
f.load(in_file, map_)
# set filedescriptions
fd = fds.get(i, pms.ColumnHeader())
fd.filename = in_file
fd.size = map_.size()
fd.unique_id = map_.getUniqueId()
fds[i] = fd
maps.append(map_)
out_map.setColumnHeaders(fds)
algorithm.group(maps, out_map)
else:
f = pms.ConsensusXMLFile()
maps = []
for i, in_file in enumerate(in_files):
map_ = pms.ConsensusMap()
f.load(in_file, map_)
maps.append(map_)
algorithm.group(maps, out_map)
if not keep_subelements:
for i in range(len(in_files)):
# set filedescriptions
fd = fds.get(i, pms.ColumnHeader())
fd.filename = in_files[i]
fd.size = maps[i].size()
fd.unique_id = maps[i].getUniqueId()
fds[i] = fd
out_map.setColumnHeaders(fds)
def id_mapper(in_file, id_file, out_file, params, use_centroid_rt,
use_centroid_mz, use_subelements ):
in_type = pms.FileHandler.getType(in_file)
protein_ids = []
peptide_ids = []
pms.IdXMLFile().load(id_file, protein_ids, peptide_ids)
mapper = pms.IDMapper()
mapper.setParameters(params)
if in_type == pms.Type.CONSENSUSXML:
file_ = pms.ConsensusXMLFile()
map_ = pms.ConsensusMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.FEATUREXML:
file_ = pms.FeatureXMLFile()
map_ = pms.FeatureMap()
file_.load(in_file, map_)
mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt,
use_centroid_mz)
addDataProcessing(map_, params, pms.ProcessingAction.IDENTIFICATION_MAPPING)
file_.store(out_file, map_)
elif in_type == pms.Type.MZQ:
if not keep_subelements:
for i in range(len(in_files)):
# set filedescriptions
fd = fds.get(i, pms.ColumnHeader())
fd.filename = in_files[i]
fd.size = maps[i].size()
fd.unique_id = maps[i].getUniqueId()
fds[i] = fd
out_map.setColumnHeaders(fds)
else:
algorithm.transferSubelements(maps, out_map)
out_map.setUniqueIds()
addDataProcessing(out_map, params, pms.ProcessingAction.FEATURE_GROUPING)
pms.ConsensusXMLFile().store(out_file, out_map)
sizes = []
for feat in out_map:
sizes.append(feat.size())
c = Counter(sizes)
print "Number of consensus features:"
for size, count in c.most_common():
print " of size %2d : %6d" % (size, count)
print " total : %6d" % out_map.size()