Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/user_guide/PSM_to_features.rst
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ Next, load the PeptideIdentifications from an `.idXML` file:

.. code-block:: python

peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
protein_ids = []
oms.IdXMLFile().load(idxml_file, protein_ids, peptide_ids)

Expand Down
2 changes: 1 addition & 1 deletion docs/source/user_guide/export_files_GNPS.rst
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ from your :py:class:`~.ConsensusMap` that have no :term:`MS2` spectra annotated.
filtered_map = oms.ConsensusMap(consensus_map)
filtered_map.clear(False)
for feature in consensus_map:
if feature.getPeptideIdentifications():
if feature.getPeptideIdentifications().size() > 0:
filtered_map.push_back(feature)

consensusXML_file = "filtered.consensusXML"
Expand Down
2 changes: 1 addition & 1 deletion docs/source/user_guide/export_pandas_dataframe.rst
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ PeptideIdentification

urlretrieve(url + "small.idXML", "small.idXML")
prot_ids = []
pep_ids = []
pep_ids = oms.PeptideIdentificationList()
oms.IdXMLFile().load("small.idXML", prot_ids, pep_ids)

df = oms.peptide_identifications_to_df(pep_ids)
Expand Down
5 changes: 3 additions & 2 deletions docs/source/user_guide/identification_data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ We can now display the peptides we just stored:
:linenos:

# Iterate over PeptideIdentification
peptide_ids = [peptide_id]
peptide_ids = oms.PeptideIdentificationList()
peptide_ids.push_back(peptide_id)
for peptide_id in peptide_ids:
# Peptide identification values
print("Peptide ID m/z:", peptide_id.getMZ())
Expand Down Expand Up @@ -193,7 +194,7 @@ discussed :ref:`anchor-other-id-data`) which we would do as follows:
oms.IdXMLFile().store("out.idXML", [protein_id], peptide_ids)
# and load it back into memory
prot_ids = []
pep_ids = []
pep_ids = oms.PeptideIdentificationList()
oms.IdXMLFile().load("out.idXML", prot_ids, pep_ids)

# Iterate over all protein hits
Expand Down
17 changes: 7 additions & 10 deletions docs/source/user_guide/interactive_plots.rst
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ interactively zoomed-in if you execute the code in a notebook
exp.updateRanges()
expandcols = ["RT", "mz", "inty"]
spectraarrs2d = exp.get2DPeakDataLong(
exp.getMinRT(), exp.getMaxRT(), exp.getMinMZ(), exp.getMaxMZ()
exp.getMinRT(), exp.getMaxRT(), exp.getMinMZ(), exp.getMaxMZ(), 1
)
spectradf = pd.DataFrame(dict(zip(expandcols, spectraarrs2d)))
spectradf = spectradf.set_index(["RT", "mz"])
Expand Down Expand Up @@ -75,17 +75,14 @@ interactively zoomed-in if you execute the code in a notebook
min_alpha=0,
)
.opts(active_tools=["box_zoom"], tools=["hover"], hooks=[new_bounds_hook])
.opts( # weird.. I have no idea why one has to do this. But with one opts you will get an error
plot=dict(
width=800,
height=800,
xlabel="Retention time (s)",
ylabel="mass/charge (Da)",
)
)
)

hd.dynspread(raster, threshold=0.7, how="add", shape="square")
hd.dynspread(raster, threshold=0.7, how="add", shape="square").opts(
width=800,
height=800,
xlabel="Retention time (s)",
ylabel="mass/charge (Da)",
)


Result:
Expand Down
6 changes: 3 additions & 3 deletions docs/source/user_guide/other_ms_data_formats.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ You can store and load identification data from an `idXML` file as follows:
gh = gh = "https://raw.githubusercontent.com/OpenMS/pyopenms-docs/master"
urlretrieve(gh + "/src/data/IdXMLFile_whole.idXML", "test.idXML")
protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
oms.IdXMLFile().load("test.idXML", protein_ids, peptide_ids)
oms.IdXMLFile().store("test.out.idXML", protein_ids, peptide_ids)

Expand All @@ -31,7 +31,7 @@ You can store and load identification data from an `mzIdentML` file as follows:
gh = gh = "https://raw.githubusercontent.com/OpenMS/pyopenms-docs/master"
urlretrieve(gh + "/src/data/MzIdentML_3runs.mzid", "test.mzid")
protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
oms.MzIdentMLFile().load("test.mzid", protein_ids, peptide_ids)
oms.MzIdentMLFile().store("test.out.mzid", protein_ids, peptide_ids)
.. # alternatively: -- dont do this, doesnt work
Expand All @@ -48,7 +48,7 @@ You can store and load identification data from a TPP `pepXML` file as follows:
gh = gh = "https://raw.githubusercontent.com/OpenMS/pyopenms-docs/master"
urlretrieve(gh + "/src/data/PepXMLFile_test.pepxml", "test.pepxml")
protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
oms.PepXMLFile().load("test.pepxml", protein_ids, peptide_ids)
oms.PepXMLFile().store("test.out.pepxml", protein_ids, peptide_ids)

Expand Down
10 changes: 5 additions & 5 deletions docs/source/user_guide/peptide_search.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ a fasta database of protein sequences:
urlretrieve(gh + "/src/data/SimpleSearchEngine_1.mzML", "searchfile.mzML")
urlretrieve(gh + "/src/data/SimpleSearchEngine_1.fasta", "search.fasta")
protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
oms.SimpleSearchEngineAlgorithm().search(
"searchfile.mzML", "search.fasta", protein_ids, peptide_ids
)
Expand Down Expand Up @@ -143,9 +143,9 @@ ppm\ (\pm 2\ ppm)`, we expect that we will not find the hit at :math:`775.38` m/
salgo.setParameters(p)

protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
salgo.search("searchfile.mzML", "search.fasta", protein_ids, peptide_ids)
print("Found", len(peptide_ids), "peptides")
print("Found", peptide_ids.size(), "peptides")

As we can see, using a smaller precursor mass tolerance leads the algorithm to
find only one hit instead of two. Similarly, if we use the wrong enzyme for
Expand Down Expand Up @@ -189,7 +189,7 @@ Now include some additional decoy database generation step as well as subsequent

# Run SimpleSearchAlgorithm, store protein and peptide ids
protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()

# set some custom search parameters
simplesearch = oms.SimpleSearchEngineAlgorithm()
Expand Down Expand Up @@ -224,7 +224,7 @@ This is done by applying one of the available protein inference algorithms on th
:linenos:

protein_ids = []
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()

# Re-run search since we need to keep decoy hits for inference
simplesearch.search(searchfile, target_decoy_database, protein_ids, peptide_ids)
Expand Down
2 changes: 1 addition & 1 deletion docs/source/user_guide/quality_control.rst
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ proteomics and metabolomics quality metrics.
oms.FeatureXMLFile().load("features.featureXML", feature_map)

prot_ids = [] # list of ProteinIdentification()
pep_ids = [] # list of PeptideIdentification()
pep_ids = oms.PeptideIdentificationList() # list of PeptideIdentification()
# OPTIONAL: get protein and peptide identifications from idXML file
urlretrieve(gh + "/src/data/OpenPepXL_output.idXML", "ids.idXML")
oms.IdXMLFile().load("ids.idXML", prot_ids, pep_ids)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ Map :term:`MS2` spectra to features as :py:class:`~.PeptideIdentification` objec
if feature_map.getMetaValue("spectra_data")[
0
].decode() == exp.getMetaValue("mzML_path"):
peptide_ids = []
peptide_ids = oms.PeptideIdentificationList()
protein_ids = []
mapper.annotate(
feature_map,
Expand All @@ -161,10 +161,10 @@ Map :term:`MS2` spectra to features as :py:class:`~.PeptideIdentification` objec
prot_ids.append(prot_id)
fm_new.setProteinIdentifications(prot_ids)
for feature in feature_map:
pep_ids = []
pep_ids = oms.PeptideIdentificationList()
for pep_id in feature.getPeptideIdentifications():
pep_id.setIdentifier(f"Identifier_{i}")
pep_ids.append(pep_id)
pep_ids.push_back(pep_id)
feature.setPeptideIdentifications(pep_ids)
fm_new.push_back(feature)
feature_maps_mapped.append(fm_new)
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ scikit-learn
tabulate
requests
bokeh
jupyter_bokeh
datashader
holoviews
pyviz_comms
Expand Down
Loading