From 98731ef604150ad2a609d7e8b7ec6bcc90e25674 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20Nie=C3=9Fer?= Date: Mon, 25 Nov 2024 18:47:14 +0100 Subject: [PATCH 1/5] fix typos, update bibliography --- docs/source/literature.bib | 24 +++++++++++++++++------- paper/literature.bib | 33 +++++++++++++++++---------------- paper/paper.md | 13 +++++-------- 3 files changed, 39 insertions(+), 31 deletions(-) diff --git a/docs/source/literature.bib b/docs/source/literature.bib index bf3dec7..bc524d3 100644 --- a/docs/source/literature.bib +++ b/docs/source/literature.bib @@ -82,11 +82,12 @@ @article{RN150 @book{RN162, author = {Kruschke, John K.}, title = {Doing Bayesian Data Analysis}, - edition = {1st Edition}, + edition = {Second Edition}, publisher={Academic Press}, isbn = {9780123814852}, - year = {2010}, - type = {Book} + year = {2015}, + type = {Book}, + doi = {http://dx.doi.org/10.1016/B978-0-12-405888-0.00001-5} } @article{RN144, @@ -96,7 +97,8 @@ @article{RN144 volume = {12}, pages = {171-178}, year = {1985}, - type = {Journal Article} + type = {Journal Article}, + url = {http://www.jstor.org/stable/4615982}, } @@ -107,7 +109,8 @@ @article{RN152 volume = {7}, number = {4}, year = {1992}, - type = {Journal Article} + type = {Journal Article}, + doi = {10.1214/ss/1177011136} } @article{RN153, @@ -176,7 +179,7 @@ @article{RN146 volume = {11}, pages = {3571-3594}, year = {2010}, - type = {Journal Article} + type = {Journal Article}, } @article{RN147, @@ -221,6 +224,7 @@ @article{vivo2012bayesian number={6}, pages={2622--2630}, year={2012}, + doi={https://doi.org/10.1021/ac202124t}, publisher={ACS Publications} } @@ -232,6 +236,7 @@ @article{woldegebriel2015probabilistic number={14}, pages={7345--7355}, year={2015}, + doi={https://doi.org/10.1021/acs.analchem.5b01521}, publisher={ACS Publications} } @@ -242,6 +247,7 @@ @article{briskot2019prediction volume={1587}, pages={101--110}, year={2019}, + doi={https://doi.org/10.1016/j.chroma.2018.11.076}, publisher={Elsevier} } @@ -252,6 +258,7 @@ @article{yamamoto2021uncertainty volume={175}, pages={223--237}, year={2021}, + doi={https://doi.org/10.1016/j.cherd.2021.09.003}, publisher={Elsevier} } @@ -263,6 +270,7 @@ @article{wiczling2016much number={1}, pages={997--1002}, year={2016}, + doi={https://doi.org/10.1021/acs.analchem.5b03859}, publisher={ACS Publications} } @@ -274,16 +282,18 @@ @article{kelly1971estimation number={10}, pages={1170--1183}, year={1971}, + doi={https://doi.org/10.1021/ac60304a011}, publisher={ACS Publications} } @article{kelly1971application, - title={Application of method of maximum posterior probability to estimation of gas-chromatographic peak parmeters}, + title={Application of method of maximum posterior probability to estimation of gas-chromatographic peak parameters}, author={Kelly, PC and Harris, WE}, journal={Analytical Chemistry}, volume={43}, number={10}, pages={1184--1195}, year={1971}, + doi={https://doi.org/10.1021/ac60304a005}, publisher={ACS Publications} } diff --git a/paper/literature.bib b/paper/literature.bib index c699eb8..c233db1 100644 --- a/paper/literature.bib +++ b/paper/literature.bib @@ -1,4 +1,4 @@ -@softmisc{nutpie, +@misc{nutpie, author = {Seyboldt, Adrian and {PyMC Developers}}, keywords = {Software}, license = {MIT}, @@ -44,7 +44,7 @@ @article{matplotlib year = 2007 } -@softmisc{matplotlibzenodo, +@misc{matplotlibzenodo, author = {{The Matplotlib Development Team}}, title = {Matplotlib: Visualization with Python}, keywords = {software}, @@ -62,7 +62,6 @@ @article{RN173 journal = {Journal of Machine Learning Research}, volume = {15}, year = {2014}, - type = {Journal Article} } @article{RN150, @@ -76,16 +75,17 @@ @article{RN150 doi = {10.7717/peerj-cs.1516}, url = {https://www.ncbi.nlm.nih.gov/pubmed/37705656}, year = {2023}, - type = {Journal Article} } @book{RN162, author = {Kruschke, John K.}, title = {Doing Bayesian Data Analysis}, - edition = {1st Edition}, + edition = {Second Edition}, + publisher={Academic Press}, isbn = {9780123814852}, - year = {2010}, - type = {Book} + year = {2015}, + type = {Book}, + doi = {http://dx.doi.org/10.1016/B978-0-12-405888-0.00001-5} } @article{RN144, @@ -95,7 +95,7 @@ @article{RN144 volume = {12}, pages = {171-178}, year = {1985}, - type = {Journal Article} + url = {http://www.jstor.org/stable/4615982}, } @@ -106,7 +106,7 @@ @article{RN152 volume = {7}, number = {4}, year = {1992}, - type = {Journal Article} + doi = {10.1214/ss/1177011136} } @article{RN153, @@ -121,7 +121,6 @@ @article{RN153 doi = {10.1021/ac60319a011}, url = {https://www.ncbi.nlm.nih.gov/pubmed/22324584}, year = {1972}, - type = {Journal Article} } @article{RN149, @@ -136,7 +135,6 @@ @article{RN149 doi = {10.1002/biot.201700141}, url = {https://www.ncbi.nlm.nih.gov/pubmed/29283217}, year = {2018}, - type = {Journal Article} } @article{RN148, @@ -151,7 +149,6 @@ @article{RN148 doi = {10.1002/1097-0290(20010205)72:3<346::aid-bit12>3.0.co;2-x}, url = {https://www.ncbi.nlm.nih.gov/pubmed/11135205}, year = {2001}, - type = {Journal Article} } @article{RN145, @@ -165,7 +162,6 @@ @article{RN145 1573-1375}, doi = {10.1007/s11222-016-9696-4}, year = {2016}, - type = {Journal Article} } @article{RN146, @@ -175,7 +171,6 @@ @article{RN146 volume = {11}, pages = {3571-3594}, year = {2010}, - type = {Journal Article} } @article{RN147, @@ -187,7 +182,6 @@ @article{RN147 issn = {2475-9066}, doi = {10.21105/joss.01143}, year = {2019}, - type = {Journal Article} } @article{harris2020array, @@ -220,6 +214,7 @@ @article{vivo2012bayesian number={6}, pages={2622--2630}, year={2012}, + doi={https://doi.org/10.1021/ac202124t}, publisher={ACS Publications} } @@ -231,6 +226,7 @@ @article{woldegebriel2015probabilistic number={14}, pages={7345--7355}, year={2015}, + doi={https://doi.org/10.1021/acs.analchem.5b01521}, publisher={ACS Publications} } @@ -241,6 +237,7 @@ @article{briskot2019prediction volume={1587}, pages={101--110}, year={2019}, + doi={https://doi.org/10.1016/j.chroma.2018.11.076}, publisher={Elsevier} } @@ -251,6 +248,7 @@ @article{yamamoto2021uncertainty volume={175}, pages={223--237}, year={2021}, + doi={https://doi.org/10.1016/j.cherd.2021.09.003}, publisher={Elsevier} } @@ -262,6 +260,7 @@ @article{wiczling2016much number={1}, pages={997--1002}, year={2016}, + doi={https://doi.org/10.1021/acs.analchem.5b03859}, publisher={ACS Publications} } @@ -273,16 +272,18 @@ @article{kelly1971estimation number={10}, pages={1170--1183}, year={1971}, + doi={https://doi.org/10.1021/ac60304a011}, publisher={ACS Publications} } @article{kelly1971application, - title={Application of method of maximum posterior probability to estimation of gas-chromatographic peak parmeters}, + title={Application of method of maximum posterior probability to estimation of gas-chromatographic peak parameters}, author={Kelly, PC and Harris, WE}, journal={Analytical Chemistry}, volume={43}, number={10}, pages={1184--1195}, year={1971}, + doi={https://doi.org/10.1021/ac60304a005}, publisher={ACS Publications} } diff --git a/paper/paper.md b/paper/paper.md index adf6c88..1dae8ec 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -56,8 +56,8 @@ Since this is a time-consuming, not to mention tedious, procedure and introduces The advantage of this approach is the complete integration of all relevant parameters – i.e. baseline, peak area and height, mean, signal-to-noise ratio etc. – into one single model through which all parameters are estimated simultaneously. Furthermore, Bayesian inference comes with uncertainty quantification for all peak model parameters, and thus does not merely yield a point estimate as would commonly be the case. It also grants access to novel metrics for avoiding false positives and negatives by rejecting signals where a) a convergence criterion of the peak fitting procedure was not fulfilled or b) the uncertainty of the estimated parameters exceeded a user-defined threshold. -By employing peak fitting to uncover peak parameters – most importantly the area –, this approach thus differs from recent applications of Bayesian statistics to chromatographic peak data which e.g. focussed on peak detection [@vivo2012bayesian; @woldegebriel2015probabilistic], method optimization [@wiczling2016much] and simulations of chromatography [@briskot2019prediction; @yamamoto2021uncertainty]. -The first studies to be published about this topic contain perhaps the technique most similar in spirit to the present one since functions made of an idealized peak shape and a noise term are fitted but beyond this common starting point the methodolody is quiet distinct [@kelly1971estimation; @kelly1971application]. +By employing peak fitting to uncover peak parameters – most importantly the area – this approach thus differs from recent applications of Bayesian statistics to chromatographic peak data which e.g. focussed on peak detection [@vivo2012bayesian; @woldegebriel2015probabilistic], method optimization [@wiczling2016much] and simulations of chromatography [@briskot2019prediction; @yamamoto2021uncertainty]. +The first studies to be published about this topic contain perhaps the technique most similar in spirit to the present one since functions made of an idealized peak shape and a noise term are fitted but beyond this common starting point the methodology is quite distinct [@kelly1971estimation; @kelly1971application]. # Materials and Methods ## Implementation @@ -77,22 +77,19 @@ Since the inference data is stored alongside graphs and report sheets, users may $\texttt{PeakPerformance}$ accommodates the use of a pre-manufactured data pipeline for standard applications (Fig. 1) as well as the creation of custom data pipelines using only its core functions. The provided data analysis pipeline was designed in a user-friendly way and is covered by multiple example notebooks. -Before using $\texttt{PeakPerformance}$, the user has to supply raw data files containing a NumPy array with time in the first and intensity in the second dimension for each peak according as described in detail in the documentation. +Before using $\texttt{PeakPerformance}$, the user has to supply raw data files containing a NumPy array with time in the first and intensity in the second dimension for each peak as described in detail in the documentation. Using the $\texttt{prepare\_model\_selection()}$ method, an Excel template file ("Template.xlsx") for inputting user information is prepared and stored in the raw data directory. Since targeted LC-MS/MS analyses essentially cycle through a list of mass traces for every sample, a model type has to be assigned to each mass trace. If this is not done by the user, an optional automated model selection step will be performed, where one exemplary peak per mass trace is analyzed with all models to identify the most appropriate one. -The automated model selection can be started using the $\texttt{model\_selection()}$ function from the pipeline module and will be performed successively for each mass trace. -The results for each model are ranked with the $\texttt{compare()}$ function of the ArviZ package based on Pareto-smoothed importance sampling leave-one-out cross-validation (LOO-PIT) [@RN146; @RN145]. +Its results for each model are ranked based on Pareto-smoothed importance sampling leave-one-out cross-validation (LOO-PIT) [@RN146; @RN145]. ![](./Fig3_PP-standalone.png) __Figure 1:__ Overview of the pre-manufactured data analysis pipeline featured in $\texttt{PeakPerformance}$. Subsequently, the peak analysis pipeline can be started with the function $\texttt{pipeline()}$ from the $\texttt{pipeline}$ module. Depending on whether the "pre-filtering" option was selected, an optional filtering step will be executed to reject signals where clearly no peak is present before sampling, thus saving computation time. -This filtering step combines the $\texttt{find\_peaks()}$ function from the SciPy package [@scipy] with a user-defined minimum signal-to-noise threshold and may reject a great many signals before sampling, e.g. in the case of isotopic labeling experiments where every theoretically achievable mass isotopomer needs to be investigated, yet depending on the input labeling mixture, the majority of them might not be present in actuality. Upon passing the first filter, a Markov chain Monte Carlo (MCMC) simulation is conducted using a No-U-Turn Sampler (NUTS) [@RN173], preferably - if installed in the Python environment - the nutpie sampler [@nutpie] due to its highly increased performance compared to the default sampler of PyMC. -Before sampling from the posterior distribution, a prior predictive check is performed. When a posterior distribution has been obtained, the main filtering step is next in line which checks the convergence of the Markov chains via the potential scale reduction factor [@RN152] or $\hat{R}$ statistic and based on the uncertainty of the determined peak parameters. If a signal was accepted as a peak, a posterior predictive check is conducted and added to the inference data object resulting from the model simulation. Regarding the performance of the simulation, in our tests an analysis of a single peaks took 20 s to 30 s and of a double peaks 25 s to 90 s. @@ -124,7 +121,7 @@ __Table 2:__ Depiction of the results for the most important peak parameters of ![](./summary_joint.svg){width="100%"} In this case, the fits were successful and convergence was reached for all parameters. -Most notably and for the first time, the measurement noise was taken into account when determining the peak area as represented by its standard deviation and as can be observed in the posterior predictive plots where the noisy data points fall within the boundary of the 95 % HDI.\\ +Most notably and for the first time, the measurement noise was taken into account when determining the peak area as represented by its standard deviation and as can be observed in the posterior predictive plots where the noisy data points fall within the boundary of the 95 % HDI. In the documentation, there is a study featuring simulated and experimental data to validate $\texttt{PeakPerformance}$'s results against a commercially available vendor software for peak integration showing that comparable results are indeed obtained. From 931594746d1dc6638b00f0e6fa60af1ebaabe7e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20Nie=C3=9Fer?= Date: Mon, 25 Nov 2024 19:00:17 +0100 Subject: [PATCH 2/5] unite conclusion and summary, then remove conclusion --- paper/paper.md | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 1dae8ec..6d4a8c5 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -42,8 +42,9 @@ bibliography: A major bottleneck of chromatography-based analytics has been the elusive fully automated identification and integration of peak data without the need of extensive human supervision. The presented Python package $\texttt{PeakPerformance}$ applies Bayesian inference to chromatographic peak fitting, and provides an automated approach featuring model selection and uncertainty quantification. -Currently, its application is focused on data from targeted liquid chromatography tandem mass spectrometry (LC-MS/MS), but its design allows for an expansion to other chromatographic techniques. -$\texttt{PeakPerformance}$ is implemented in Python and the source code is available on [GitHub](https://github.com/JuBiotech/peak-performance). +Regarding peak acceptance, it improves on vendor software solutions with more sophisticated, multi-layered metrics for decision making based on convergence of the parameter estimation, as well as the uncertainties of peak parameters. +Currently, its application is focused on data from targeted liquid chromatography tandem mass spectrometry (LC-MS/MS), but its design allows for an expansion to other chromatographic techniques and accommodates users with little programming experience by supplying convenience functions and relying on Microsoft Excel for data input and reporting. +$\texttt{PeakPerformance}$ is implemented in Python, its source code is available on [GitHub](https://github.com/JuBiotech/peak-performance), and a through documentation is available under [https://peak-performance.rtfd.io](https://peak-performance.rtfd.io). It is unit-tested on Linux and Windows and accompanied by documentation as well as example notebooks. # Statement of need @@ -124,16 +125,6 @@ In this case, the fits were successful and convergence was reached for all param Most notably and for the first time, the measurement noise was taken into account when determining the peak area as represented by its standard deviation and as can be observed in the posterior predictive plots where the noisy data points fall within the boundary of the 95 % HDI. In the documentation, there is a study featuring simulated and experimental data to validate $\texttt{PeakPerformance}$'s results against a commercially available vendor software for peak integration showing that comparable results are indeed obtained. - -# Conclusions -$\texttt{PeakPerformance}$ is a tool for automated LC-MS/MS peak data analysis employing Bayesian inference. -It provides built-in uncertainty quantification by Bayesian parameter estimation and thus for the first time takes the measurement noise of an LC-MS/MS device into account when integrating peaks. -Regarding peak acceptance, it improves on vendor software solutions with more sophisticated, multi-layered metrics for decision making based on convergence of the parameter estimation, as well as the uncertainties of peak parameters. -Finally, it allows the addition of new models to describe peak intensity functions with just a few minor code changes, thus lending itself to expansion to data from other chromatographic techniques. -The design of $\texttt{PeakPerformance}$ accommodates users with little programming experience by supplying convenience functions and relying on Microsoft Excel for data input and reporting. -Its code repository on GitHub features automated unit tests, and an automatically built documentation [https://peak-performance.rtfd.io](https://peak-performance.rtfd.io). - - ### Author contributions $\texttt{PeakPerformance}$ was conceptualized by JN and MO. Software implementation was conducted by JN with code review by MO. From 6732e12b43ff77c416a4d08f59b72a50f1357485 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20Nie=C3=9Fer?= Date: Mon, 25 Nov 2024 19:08:17 +0100 Subject: [PATCH 3/5] test protecting cases --- paper/literature.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/literature.bib b/paper/literature.bib index c233db1..08bdaf9 100644 --- a/paper/literature.bib +++ b/paper/literature.bib @@ -153,7 +153,7 @@ @article{RN148 @article{RN145, author = {Vehtari, Aki and Gelman, Andrew and Gabry, Jonah}, - title = {Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC}, + title = {{Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC}}, journal = {Statistics and Computing}, volume = {27}, number = {5}, From cfbde1603dc6ce8c73d45c2366e800850190bed6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20Nie=C3=9Fer?= Date: Mon, 25 Nov 2024 19:12:37 +0100 Subject: [PATCH 4/5] fix all cases --- paper/literature.bib | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/paper/literature.bib b/paper/literature.bib index 08bdaf9..af7f6e2 100644 --- a/paper/literature.bib +++ b/paper/literature.bib @@ -46,7 +46,7 @@ @article{matplotlib @misc{matplotlibzenodo, author = {{The Matplotlib Development Team}}, - title = {Matplotlib: Visualization with Python}, + title = {{Matplotlib: Visualization with Python}}, keywords = {software}, month = may, year = 2024, @@ -58,7 +58,7 @@ @misc{matplotlibzenodo @article{RN173, author = {Hoffmann, Matthew D. and Gelman, Andrew}, - title = {The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, + title = {{The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}}, journal = {Journal of Machine Learning Research}, volume = {15}, year = {2014}, @@ -66,7 +66,7 @@ @article{RN173 @article{RN150, author = {Abril-Pla, O. and Andreani, V. and Carroll, C. and Dong, L. and Fonnesbeck, C. J. and Kochurov, M. and Kumar, R. and Lao, J. and Luhmann, C. C. and Martin, O. A. and Osthege, M. and Vieira, R. and Wiecki, T. and Zinkov, R.}, - title = {{PyMC}: a modern, and comprehensive probabilistic programming framework in Python}, + title = {{PyMC}: a modern, and comprehensive probabilistic programming framework in {P}ython}, journal = {PeerJ Comput Sci}, volume = {9}, pages = {e1516}, @@ -79,7 +79,7 @@ @article{RN150 @book{RN162, author = {Kruschke, John K.}, - title = {Doing Bayesian Data Analysis}, + title = {{Doing Bayesian Data Analysis}}, edition = {Second Edition}, publisher={Academic Press}, isbn = {9780123814852}, @@ -101,7 +101,7 @@ @article{RN144 @article{RN152, author = {Gelman, Andrew and Rubin, Donald B.}, - title = {Inference from Iterative Simulation Using Multiple Sequences}, + title = {{Inference from Iterative Simulation Using Multiple Sequences}}, journal = {Statistical Science}, volume = {7}, number = {4}, @@ -111,7 +111,7 @@ @article{RN152 @article{RN153, author = {Grushka, E.}, - title = {Characterization of exponentially modified Gaussian peaks in chromatography}, + title = {{Characterization of exponentially modified Gaussian peaks in chromatography}}, journal = {Anal Chem}, volume = {44}, number = {11}, @@ -125,7 +125,7 @@ @article{RN153 @article{RN149, author = {Hemmerich, J. and Noack, S. and Wiechert, W. and Oldiges, M.}, - title = {Microbioreactor Systems for Accelerated Bioprocess Development}, + title = {{Microbioreactor Systems for Accelerated Bioprocess Development}}, journal = {Biotechnol J}, volume = {13}, number = {4}, @@ -166,7 +166,7 @@ @article{RN145 @article{RN146, author = {Watanabe, Sumio}, - title = {Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory}, + title = {{Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory}}, journal = {Journal of machine learning research}, volume = {11}, pages = {3571-3594}, @@ -175,7 +175,7 @@ @article{RN146 @article{RN147, author = {Kumar, Ravin and Carroll, Colin and Hartikainen, Ari and Martin, Osvaldo}, - title = {ArviZ a unified library for exploratory analysis of Bayesian models in Python}, + title = {{ArviZ a unified library for exploratory analysis of Bayesian models in Python}}, journal = {Journal of Open Source Software}, volume = {4}, number = {33}, From 5e256e7fc14d5c12fcf60ab409f6336e87982402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20Nie=C3=9Fer?= Date: Mon, 25 Nov 2024 19:17:47 +0100 Subject: [PATCH 5/5] actually fix all cases --- paper/literature.bib | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paper/literature.bib b/paper/literature.bib index af7f6e2..0d9fdd5 100644 --- a/paper/literature.bib +++ b/paper/literature.bib @@ -207,7 +207,7 @@ @article{harris2020array } @article{vivo2012bayesian, - title={Bayesian approach for peak detection in two-dimensional chromatography}, + title={{Bayesian approach for peak detection in two-dimensional chromatography}}, author={Viv{\'o}-Truyols, Gabriel}, journal={Analytical chemistry}, volume={84}, @@ -219,7 +219,7 @@ @article{vivo2012bayesian } @article{woldegebriel2015probabilistic, - title={Probabilistic model for untargeted peak detection in LC--MS using Bayesian statistics}, + title={{Probabilistic model for untargeted peak detection in LC--MS using Bayesian statistics}}, author={Woldegebriel, Michael and Viv{\'o}-Truyols, Gabriel}, journal={Analytical chemistry}, volume={87}, @@ -231,7 +231,7 @@ @article{woldegebriel2015probabilistic } @article{briskot2019prediction, - title={Prediction uncertainty assessment of chromatography models using Bayesian inference}, + title={{Prediction uncertainty assessment of chromatography models using Bayesian inference}}, author={Briskot, Till and St\"{u}ckler, Ferdinand and Wittkopp, Felix and Williams, Christopher and Yang, Jessica and Konrad, Susanne and Doninger, Katharina and Griesbach, Jan and Bennecke, Moritz and Hepbildikler, Stefan and others}, journal={Journal of Chromatography A}, volume={1587}, @@ -242,7 +242,7 @@ @article{briskot2019prediction } @article{yamamoto2021uncertainty, - title={Uncertainty quantification for chromatography model parameters by Bayesian inference using sequential Monte Carlo method}, + title={{Uncertainty quantification for chromatography model parameters by Bayesian inference using sequential Monte Carlo method}}, author={Yamamoto, Yota and Yajima, Tomoyuki and Kawajiri, Yoshiaki}, journal={Chemical Engineering Research and Design}, volume={175}, @@ -253,7 +253,7 @@ @article{yamamoto2021uncertainty } @article{wiczling2016much, - title={How much can we learn from a single chromatographic experiment? A Bayesian perspective}, + title={{How much can we learn from a single chromatographic experiment? A Bayesian perspective}}, author={Wiczling, Pawe{\l} and Kaliszan, Roman}, journal={Analytical chemistry}, volume={88},