generated from alshedivat/al-folio
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpapers.bib
166 lines (150 loc) · 19.8 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
---
---
@string{aps = {American Physical Society,}}
@article{ke2024gibbsbps,
preview={GibbsBPS.jpg},
title={Fused L<sub>1/2</sub> prior for large scale linear inverse problem with Gibbs bouncy particle sampler},
author={Xiongwen Ke and Yanan Fan and Qingping Zhou},
abstract={In this paper, we study Bayesian approach for solving large scale linear inverse problems arising in various scientific and engineering fields. We propose a fused L<sub>1/2</sub> prior with edge-preserving and sparsity-promoting properties and show that it can be formulated as a Gaussian mixture Markov random field. Since the density function of this family of prior is neither log-concave nor Lipschitz, gradient-based Markov chain Monte Carlo methods can not be applied to sample the posterior. Thus, we present a Gibbs sampler in which all the conditional posteriors involved have closed form expressions. The Gibbs sampler works well for small size problems but it is computationally intractable for large scale problems due to the need for sample high dimensional Gaussian distribution. To reduce the computation burden, we construct a Gibbs bouncy particle sampler (Gibbs-BPS) based on a piecewise deterministic Markov process. This new sampler combines elements of Gibbs sampler with bouncy particle sampler and its computation complexity is an order of magnitude smaller. We show that the new sampler converges to the target distribution. With computed tomography examples, we demonstrate that the proposed method shows competitive performance with existing popular Bayesian methods and is highly efficient in large scale problems.},
year={2024},
arxiv = {2409.07874},
selected={true}
}
@article{zhou2023deep,
preview={DuNets.jpg},
title={Deep unrolling networks with recurrent momentum acceleration for nonlinear inverse problems},
author={Qingping Zhou and Jiayu Qian and Junqi Tang and Jinglai Li},
abstract={Combining the strengths of model-based iterative algorithms and data-driven deep learning solutions, deep unrolling networks (DuNets) have become a popular tool to solve inverse imaging problems. Although DuNets have been successfully applied to many linear inverse problems, their performance tends to be impaired by nonlinear problems. Inspired by momentum acceleration techniques that are often used in optimization algorithms, we propose a recurrent momentum acceleration (RMA) framework that uses a long short-term memory recurrent neural network (LSTM-RNN) to simulate the momentum acceleration process. The RMA module leverages the ability of the LSTM-RNN to learn and retain knowledge from the previous gradients. We apply RMA to two popular DuNets – the learned proximal gradient descent (LPGD) and the learned primal-dual (LPD) methods, resulting in LPGD-RMA and LPD-RMA, respectively. We provide experimental results on two nonlinear inverse problems: a nonlinear deconvolution problem, and an electrical impedance tomography problem with limited boundary measurements. In the first experiment we have observed that the improvement due to RMA largely increases with respect to the nonlinearity of the problem. The results of the second example further demonstrate that the RMA schemes can significantly improve the performance of DuNets in strongly ill-posed problems.},
journal={Inverse Problems},
html={http://iopscience.iop.org/article/10.1088/1361-6420/ad35e3},
slides={DuNets_slides.pdf},
year={2024},
pdf = {https://arxiv.org/abs/2307.16120},
selected={true}
}
@article{wang2024comparative,
preview={dgmEIT.jpg},
title={A comparative study of variational autoencoders, normalizing flows, and score-based diffusion models for electrical impedance tomography},
author={Huihui Wang and Guixian Xu and Qingping Zhou},
abstract={Electrical Impedance Tomography (EIT) is a widely employed imaging technique in industrial inspection, geophysical prospecting, and medical imaging. However, the inherent nonlinearity and ill-posedness of EIT image reconstruction present challenges for classical regularization techniques, such as the critical selection of regularization terms and the lack of prior knowledge. Deep generative models (DGMs) have been shown to play a crucial role in learning implicit regularizers and prior knowledge. This study aims to investigate the potential of three DGMs – variational autoencoder networks, normalizing flow, and score-based diffusion model – to learn implicit regularizers in learning-based EIT imaging. We first introduce background information on EIT imaging and its inverse problem formulation. Next, we propose three algorithms for performing EIT inverse problems based on corresponding DGMs. Finally, we present numerical and visual experiments, which reveal that (1) no single method consistently outperforms the others across all settings, and (2) when reconstructing an object with two anomalies using a well-trained model based on a training dataset containing four anomalies, the conditional normalizing flow (CNF) model exhibits the best generalization in low-level noise, while the conditional score-based diffusion model (CSD*) demonstrates the best generalization in high-level noise settings. We hope our preliminary efforts will encourage other researchers to assess their DGMs in EIT and other nonlinear inverse problems.},
journal={Journal of Inverse and Ill-posed Problems},
number={0},
year={2024},
publisher={De Gruyter},
html={https://doi.org/10.1515/jiip-2023-0037},
pdf = {https://arxiv.org/abs/2310.15831},
selected={true}
}
@article{hu2024surrogate,
preview={surrogateForBI.jpg},
title = {A MCMC Method Based on Surrogate Model and Gaussian Process Parameterization for Infinite Bayesian PDE Inversion},
author = {Zheng Hu and Hongqiao Wang and Qingping Zhou},
abstract = {This work focuses on an inversion problem derived from parametric partial differential equations (PDEs) with an infinite-dimensional parameter, represented as a coefficient function. The objective is to estimate this coefficient function accurately despite having only noisy measurements of the PDE solution at sparse input points. Conventional methods for inversion require numerous calls to a refined PDE solver, resulting in significant computational complexity, especially for challenging PDE problems, making the inference of the coefficient function practically infeasible. To address this issue, we propose a MCMC method that combines an deep learning-based surrogate and Gaussian process parameterization to efficiently infer the posterior of the coefficient function. The surrogate model is a combination of a cost-effective coarse PDE solver and a neural network-based transformation which provides an approximate solution derived from the refined PDE solver but based on the coarse PDE solution. The coefficient function is represented by Gaussian process with finite number of spatially dependent parameters and this parametric representation will be beneficial for the preconditioned Crank-Nicolson (pCN) Markov chain Monte Carlo method to sample efficiently from the posterior distribution. Approximate Bayesian computation method is used for constructing an informative dataset for the transformation learning. Our numerical examples demonstrate the effectiveness of this approach in accurately estimating the coefficient function while significantly reducing the computational burden associated with traditional inversion techniques.},
journal = {Journal of Computational Physics},
pages = {112970},
year = {2024},
doi = {https://doi.org/10.1016/j.jcp.2024.112970},
html = {https://www.sciencedirect.com/science/article/pii/S0021999124002195}
}
@article{xu2024enhancing,
preview={HQSnet.jpg},
title={Enhancing electrical impedance tomography reconstruction using learned half-quadratic splitting networks with Anderson acceleration},
author={Guixian Xu and Huihui Wang and Qingping Zhou},
abstract={Electrical Impedance Tomography (EIT) is widely applied in medical diagnosis, industrial inspection, and environmental monitoring. Combining the physical principles of the imaging system with the advantages of data-driven deep learning networks, physics-embedded deep unrolling networks have recently emerged as a promising solution in computational imaging. However, the inherent nonlinear and ill-posed properties of EIT image reconstruction still present challenges to existing methods in terms of accuracy and stability. To tackle this challenge, we propose the learned half-quadratic splitting (HQSNet) algorithm for incorporating physics into learning-based EIT imaging. We then apply Anderson acceleration (AA) to the HQSNet algorithm, denoted as AA-HQSNet, which can be interpreted as AA applied to the Gauss-Newton step and the learned proximal gradient descent step of the HQSNet, respectively. AA is a widely-used technique for accelerating the convergence of fixed-point iterative algorithms and has gained significant interest in numerical optimization and machine learning. However, the technique has received little attention in the inverse problems community thus far. Employing AA enhances the convergence rate compared to the standard HQSNet while simultaneously avoiding artifacts in the reconstructions. Lastly, we conduct rigorous numerical and visual experiments to show that the AA module strengthens the HQSNet, leading to robust, accurate, and considerably superior reconstructions compared to state-of-the-art methods. Our Anderson acceleration scheme to enhance HQSNet is generic and can be applied to improve the performance of various physics-embedded deep learning methods.},
journal={Journal of Scientific Computing},
volume={98},
number={2},
pages={49},
year={2024},
publisher={Springer},
html={https://doi.org/10.1007/s10915-023-02439-4},
pdf = {https://arxiv.org/abs/2304.14491}
}
article{qian2023bayesian,
preview={roundtrip.jpg},
title = {Bayesian imaging inverse problem with SA-Roundtrip prior via HMC-pCN sampler},
journal = {Computational Statistics \& Data Analysis},
pages = {107930},
year = {2024},
html = {https://doi.org/10.1016/j.csda.2024.107930},
url = {https://www.sciencedirect.com/science/article/pii/S0167947324000148},
author = {Jiayu Qian and Yuanyuan Liu and Jingya Yang and Qingping Zhou},
keywords = {Bayesian inference, inverse problems, Deep generative prior, Generative adversarial network, Hamiltonian Monte Carlo},
abstract = {Bayesian inference with deep generative prior has received considerable interest for solving imaging inverse problems in many scientific and engineering fields. The selection of the prior distribution is learned from, and therefore an important representation learning of, available prior measurements. The SA-Roundtrip, a novel deep generative prior, is introduced to enable controlled sampling generation and identify the data's intrinsic dimension. This prior incorporates a self-attention structure within a bidirectional generative adversarial network. Subsequently, Bayesian inference is applied to the posterior distribution in the low-dimensional latent space using the Hamiltonian Monte Carlo with preconditioned Crank-Nicolson (HMC-pCN) algorithm, which is proven to be ergodic under specific conditions. Experiments conducted on computed tomography (CT) reconstruction with the MNIST and TomoPhantom datasets reveal that the proposed method outperforms state-of-the-art comparisons, consistently yielding a robust and superior point estimator along with precise uncertainty quantification.}
}
@article{wang2023uncertainty,
preview={uq-guided.jpg},
author = {Ruixuan Wang and Zhikang Liu and Jiahao Gong and Qingping Zhou and Xiaoqing Guan and Guangbo Ge},
title = {An Uncertainty-Guided Deep Learning Method Facilitates Rapid Screening of CYP3A4 Inhibitors},
abstract={Cytochrome P450 3A4 (CYP3A4), a prominent member of the P450 enzyme superfamily, plays a crucial role in metabolizing various xenobiotics, including over 50% of clinically significant drugs. Evaluating CYP3A4 inhibition before drug approval is essential to avoiding potentially harmful pharmacokinetic drug−drug interactions (DDIs) and adverse drug reactions (ADRs). Despite the development of several CYP inhibitor prediction models, the primary approach for screening CYP inhibitors still relies on experimental methods. This might stem from the limitations of existing models, which only provide deterministic classification outcomes instead of precise inhibition intensity (e.g., IC50) and often suffer from inadequate prediction reliability. To address this challenge, we propose an uncertaintyguided regression model to accurately predict the IC50 values of anti-CYP3A4 activities. First, a comprehensive data set of CYP3A4 inhibitors was compiled, consisting of 27,045 compounds with classification labels, including 4395 compounds with explicit IC50 values. Second, by integrating the predictions of the classification model trained on a larger data set and introducing an evidential uncertainty method to rank prediction confidence, we obtained a high-precision and reliable regression model. Finally, we use the evidential uncertainty values as a trustworthy indicator to perform a virtual screening of an in-house compound set. The in vitro experiment results revealed that this new indicator significantly improved the hit ratio and reduced false positives among the topranked compounds. Specifically, among the top 20 compounds ranked with uncertainty, 15 compounds were identified as novel CYP3A4 inhibitors, and three of them exhibited activities less than 1 μM. In summary, our findings highlight the effectiveness of incorporating uncertainty in compound screening, providing a promising strategy for drug discovery and development.},
journal = {Journal of Chemical Information and Modeling},
slides = {uq_guided_screening_slides.pdf},
volume = {63},
number = {24},
pages = {7699-7710},
year = {2023},
doi = {10.1021/acs.jcim.3c01241},
html = {https://doi.org/10.1021/acs.jcim.3c01241},
website = {https://mp.weixin.qq.com/s/L_PAA3ZGO9ljkv8qB0r4NA}
}
@article{zhou2020bayesian,
preview={BayesianPET.jpg},
author = {Qingping Zhou and Tengchao Yu and Xiaoqun Zhang and Jinglai Li},
title = {Bayesian Inference and Uncertainty Quantification for Medical Image Reconstruction with Poisson Data},
abstract={We provide a complete framework for performing infinite dimensional Bayesian inference and uncertainty quantification for image reconstruction with Poisson data. In particular, we address the following issues to make the Bayesian framework applicable in practice. We first introduce a positivity-preserving reparametrization, and we prove that under the reparametrization and a hybrid prior, the posterior distribution is well-posed in the infinite dimensional setting. Second, we provide a dimension-independent Markov chain Monte Carlo algorithm, based on the preconditioned Crank--Nicolson Langevin method, in which we use a primal-dual scheme to compute the offset direction. Third, we give a method combining the model discrepancy method and maximum likelihood estimation to determine the regularization parameter in the hybrid prior. Finally we propose to use the obtained posterior distribution to detect artifacts in a recovered image. We provide an example to demonstrate the effectiveness of the proposed method.},
journal = {SIAM Journal on Imaging Sciences},
slides={BayesPET_slides.pdf},
volume = {13},
number = {1},
pages = {29-52},
year = {2020},
doi = {10.1137/19M1248352},
html = {https://epubs.siam.org/doi/abs/10.1137/19M1248352},
pdf = {https://arxiv.org/abs/1903.02075},
selected = {true}
}
@article{lv2020nonlocal,
preview={nonlocalTV.jpg},
title={Nonlocal TV-Gaussian prior for Bayesian inverse problems with applications to limited CT reconstruction},
author={Didi Lv and Qingping Zhou and Jae Kyu Choi and Jinglai Li and Xiaoqun Zhang},
abstract={Bayesian inference methods have been widely applied in inverse problems due to the ability of uncertainty characterization of the estimation. The prior distribution of the unknown plays an essential role in the Bayesian inference, and a good prior distribution can significantly improve the inference results. In this paper, we propose a hybrid prior distribution on combining the nonlocal total variation regularization (NLTV) and the Gaussian distribution, namely NLTG prior. The advantage of this hybrid prior is two-fold. The proposed prior models both texture and geometric structures present in images through the NLTV. The Gaussian reference measure also provides a flexibility of incorporating structure information from a reference image. Some theoretical properties are established for the hybrid prior. We apply the proposed prior to limited tomography reconstruction problem that is difficult due to severe data missing. Both maximum a posteriori and conditional mean estimates are computed through two efficient methods and the numerical experiments validate the advantages and feasibility of the proposed NLTG prior.},
journal={Inverse Problems \& Imaging},
volume={14},
number={1},
year={2020},
html={https://www.aimsciences.org/article/doi/10.3934/ipi.2019066},
pdf = {https://arxiv.org/abs/1901.00262}
}
@article{Zhou_2018,
preview={linearGauss.jpg},
doi = {10.1088/1361-6420/aac287},
url = {https://dx.doi.org/10.1088/1361-6420/aac287},
year = {2018},
month = {jun},
publisher = {IOP Publishing},
volume = {34},
number = {9},
pages = {095001},
author = {Qingping Zhou and Wenqing Liu and Jinglai Li and Youssef M Marzouk},
title = {An approximate empirical Bayesian method for large-scale linear-Gaussian inverse problems},
abstract={We study Bayesian inference methods for solving linear inverse problems, focusing on hierarchical formulations where the prior or the likelihood function depend on unspecified hyperparameters. In practice, these hyperparameters are often determined via an empirical Bayesian method that maximizes the marginal likelihood function, i.e. the probability density of the data conditional on the hyperparameters. Evaluating the marginal likelihood, however, is computationally challenging for large-scale problems. In this work, we present a method to approximately evaluate marginal likelihood functions, based on a low-rank approximation of the update from the prior covariance to the posterior covariance. We show that this approximation is optimal in a minimax sense. Moreover, we provide an efficient algorithm to implement the proposed method, based on a combination of the randomized SVD and a spectral approximation method to compute square roots of the prior covariance matrix. Several numerical examples demonstrate good performance of the proposed method.},
journal = {Inverse Problems},
slides={EB_hyper_slides.pdf},
html={https://iopscience.iop.org/article/10.1088/1361-6420/aac287},
pdf = {https://arxiv.org/abs/1705.07646}
}
@article{zhou2017hybrid,
preview={adaptiveMCMC.jpg},
author = {Qingping Zhou and Zixi Hu and Zhewei Yao and Jinglai Li},
title = {A Hybrid Adaptive MCMC Algorithm in Function Spaces},
abstract = {The preconditioned Crank--Nicolson (pCN) method is a Markov chain Monte Carlo (MCMC) scheme, specifically designed to perform Bayesian inferences in function spaces. Unlike many standard MCMC algorithms, the pCN method can preserve the sampling efficiency under the mesh refinement, a property referred to as being dimension independent. In this work we consider an adaptive strategy to further improve the efficiency of pCN. In particular we develop a hybrid adaptive MCMC method: the algorithm performs an adaptive Metropolis scheme in a chosen finite dimensional subspace and a standard pCN algorithm in the complement space of the chosen subspace. We show that the proposed algorithm satisfies certain important ergodicity conditions. Finally with numerical examples we demonstrate that the proposed method has competitive performance with existing adaptive algorithms.},
journal = {SIAM/ASA Journal on Uncertainty Quantification},
slides={adap_pCN_slides.pdf},
volume = {5},
number = {1},
pages = {621-639},
year = {2017},
doi = {10.1137/16M1082950},
html = {https://epubs.siam.org/doi/abs/10.1137/16M1082950},
pdf = {https://arxiv.org/abs/1607.01458}
}