Skip to content

Commit

Permalink
Merge pull request #519 from reneeotten/PR512
Browse files Browse the repository at this point in the history
updates to complement PR512
  • Loading branch information
newville authored Nov 26, 2018
2 parents 69dfd02 + deff523 commit 7fec373
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 43 deletions.
Binary file modified doc/_images/emcee_corner.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified doc/_images/emcee_dbl_exp.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified doc/_images/emcee_dbl_exp2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
71 changes: 37 additions & 34 deletions doc/fitting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ exponential decay. A small amount of Gaussian noise is also added in::
>>> x = np.linspace(1, 10, 250)
>>> np.random.seed(0)
>>> y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(-(x - 0.1) / 10.) + 0.1 * np.random.randn(len(x))
>>> plt.plot(x, y)
>>> plt.plot(x, y, 'b')
>>> plt.show()

.. image:: _images/emcee_dbl_exp.png
Expand All @@ -436,12 +436,12 @@ Solving with :func:`minimize` gives the Maximum Likelihood solution::
>>> mi = lmfit.minimize(residual, p, method='Nelder', nan_policy='omit')
>>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
[[Variables]]
a1: 2.98623689 (init = 4)
a2: -4.33525597 (init = 4)
t1: 1.30993186 (init = 3)
t2: 11.8240752 (init = 3)
a1: 2.98623689 (init = 4)
a2: -4.33525597 (init = 4)
t1: 1.30993186 (init = 3)
t2: 11.8240752 (init = 3)

>>> plt.plot(x, y)
>>> plt.plot(x, y, 'b')
>>> plt.plot(x, residual(mi.params) + y, 'r')
>>> plt.show()

Expand All @@ -457,21 +457,24 @@ log-prior probability and log-likelihood functions. The log-prior probability is
assumed to be zero if all the parameters are within their bounds and ``-np.inf``
if any of the parameters are outside their bounds.

>>> # add a noise parameter
>>> mi.params.add('noise', value=1, min=0.001, max=2)
If the objective function returns an array of unweighted residuals (i.e.,
``data-model``) as is the case here, you can use ``is_weighted=False`` as an
argument for ``emcee``. In that case, ``emcee`` will automatically add/use the
``__lnsigma`` parameter to estimate the true uncertainty in the data. To
place boundaries on this parameter one can do::

>>> # This is the log-likelihood probability for the sampling. We're going to estimate the
>>> # size of the uncertainties on the data as well.
>>> def lnprob(p):
... noise = p['noise']
... return -0.5 * np.sum((residual(p) / noise)**2 + np.log(2 * np.pi * noise**2))
>>> mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))

Now we have to set up the minimizer and do the sampling::

>>> mini = lmfit.Minimizer(lnprob, mi.params)
>>> res = mini.emcee(burn=300, steps=1000, thin=20, params=mi.params)
>>> res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300, steps=1000, thin=20,
params=mi.params, is_weighted=False)

Lets have a look at those posterior distributions for the parameters. This requires
Of note, the ``is_weighted`` argument will be ignored if your objective function
returns a float instead of an array. See the Notes in :meth:`Minimizer.emcee` for
more information.

Lets have a look at those posterior distributions for the parameters. This requires
installation of the `corner` package::

>>> import corner
Expand All @@ -493,19 +496,18 @@ You can see that we recovered the right uncertainty level on the data::
median of posterior probability distribution
--------------------------------------------
[[Variables]]
a1: 2.99342394 +/- 0.15851315 (5.30%) (init = 2.986237)
a2: -4.34384999 +/- 0.12454831 (2.87%) (init = -4.335256)
t1: 1.32338403 +/- 0.14120290 (10.67%) (init = 1.309932)
t2: 11.7962437 +/- 0.48632272 (4.12%) (init = 11.82408)
noise: 0.09761521 +/- 0.00431795 (4.42%) (init = 1)
a1: 3.00096797 +/- 0.14231292 (4.74%) (init = 2.986237)
a2: -4.34208406 +/- 0.12652899 (2.91%) (init = -4.335256)
t1: 1.31910309 +/- 0.14180921 (10.75%) (init = 1.309932)
t2: 11.7812397 +/- 0.49805858 (4.23%) (init = 11.82408)
__lnsigma: -2.32838725 +/- 0.04403968 (1.89%) (init = -2.302585)
[[Correlations]] (unreported correlations are < 0.100)
C(a2, t1) = -0.965
C(a2, t2) = 0.959
C(t1, t2) = -0.927
C(a1, a2) = -0.241
C(a1, t2) = -0.168
C(a2, noise) = -0.116
C(t1, noise) = 0.107
C(a2, t2) = 0.982
C(a2, t1) = -0.940
C(t1, t2) = -0.897
C(a1, t1) = -0.504
C(a1, a2) = 0.214
C(a1, t2) = 0.174

>>> # find the maximum likelihood solution
>>> highest_prob = np.argmax(res.lnprob)
Expand All @@ -519,16 +521,17 @@ You can see that we recovered the right uncertainty level on the data::
>>> print(p)
Maximum likelihood Estimation
-----------------------------
Parameters([('a1', <Parameter 'a1', 2.9684811738216754, bounds=[-inf:inf]>),
('a2', <Parameter 'a2', -4.355238699173162, bounds=[-inf:inf]>),
('t1', <Parameter 't1', 1.3337647386777762, bounds=[-inf:inf]>),
('t2', <Parameter 't2', 11.758394302818514, bounds=[-inf:inf]>)])
Parameters([('a1', <Parameter 'a1', 3.003878755004621, bounds=[-inf:inf]>),
('a2', <Parameter 'a2', -4.327965057455135, bounds=[-inf:inf]>),
('t1', <Parameter 't1', 1.3006869431937975, bounds=[-inf:inf]>),
('t2', <Parameter 't2', 11.848423244767798, bounds=[-inf:inf]>)])

>>> # Finally lets work out a 1 and 2-sigma error estimate for 't1'
>>> quantiles = np.percentile(res.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
>>> print("1 sigma spread", 0.5 * (quantiles[3] - quantiles[1]))
>>> print("2 sigma spread", 0.5 * (quantiles[4] - quantiles[0]))
1 sigma spread 0.1414604069179637
2 sigma spread 0.453234685099423
1 sigma spread 0.14235120263891188
2 sigma spread 0.2873717185860183

Getting and Printing Fit Reports
===========================================
Expand Down
9 changes: 6 additions & 3 deletions doc/whatsnew.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@ consult the `lmfit GitHub repository`_.
Version 0.9.12 Release Notes
==========================================

Lmfit package is now licensed under BSD-3.

New features:

- SkewedVoigtModel was added as built-in model (Issue #493)
- Parameter uncertainties and correlations are reported for least_squares
- Plotting of complex-valued models is now handled in ModelResult class (PR #503)
- A model's independent variable is allowed to be an object (Issue #492)
- Added `usersyms` to Parameters() initialization to make it easier to add custom functions and symbols (Issue #507)
- the `numdifftools` package can be used to calculate parameter uncertainties and correlations for all solvers that do not natively support this.

- the `numdifftools` package can be used to calculate parameter uncertainties and correlations for all solvers that do not natively support this (PR #506)
- `emcee` can now be used as method keyword-argument to Minimizer.minimize and minimize function, which allows for using `emcee` in the Model class (PR #512; see `examples/example_emcee_with_Model.py`)

(Bug)fixes:

Expand All @@ -33,7 +35,8 @@ New features:
- loading a saved ModelResult now restores all attributes (Issue #491)
- development versions of scipy and emcee are now supported (Issue #497 and PR #496)
- ModelResult.eval() do no longer overwrite the userkws dictionary (Issue #499)
- running the test suite requires `pytest` only (Issue #504).
- running the test suite requires `pytest` only (Issue #504)
- improved FWHM calculation for VoigtModel (PR #514)


.. _whatsnew_0910_label:
Expand Down
8 changes: 4 additions & 4 deletions examples/doc_fitting_emcee.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
y = (3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1) / 10.) +
0.1*np.random.randn(len(x)))
if HASPYLAB:
plt.plot(x, y)
plt.plot(x, y, 'b')
# plt.savefig('../doc/_images/emcee_dbl_exp.png')
plt.show()

Expand All @@ -39,8 +39,8 @@ def residual(p):
lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
if HASPYLAB:
plt.figure()
plt.plot(x, y)
plt.plot(x, residual(mi.params) + y, 'k')
plt.plot(x, y, 'b')
plt.plot(x, residual(mi.params) + y, 'r')
# plt.savefig('../doc/_images/emcee_dbl_exp2.png')
plt.show()

Expand Down Expand Up @@ -73,7 +73,7 @@ def residual(p):

if HASPYLAB:
plt.figure()
plt.plot(x, y)
plt.plot(x, y, 'b')
plt.plot(x, residual(mi.params) + y, 'r', label='Nelder-Mead')
plt.plot(x, residual(res.params) + y, 'k--', label='emcee')
plt.legend()
Expand Down
3 changes: 1 addition & 2 deletions lmfit/minimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def _calculate_statistics(self):
self.chisqr = self.residual
self.ndata = 1
self.nfree = 1
self.redchi = self.chisqr / self.nfree
self.redchi = self.chisqr / max(1, self.nfree)
# this is -2*loglikelihood
_neg2_log_likel = self.ndata * np.log(self.chisqr / self.ndata)
self.aic = _neg2_log_likel + 2 * self.nvarys
Expand Down Expand Up @@ -1312,7 +1312,6 @@ def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
# Handle special case unique to emcee:
# This should eventually be moved into result._calculate_statistics.
elif float_behavior == 'posterior':
result.nvarys = len(result.init_vals)
result.ndata = 1
result.nfree = 1

Expand Down
14 changes: 14 additions & 0 deletions tests/test_nose.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,20 @@ def test_emcee(self):

check_paras(out.params, self.p_true, sig=3)

@dec.slow
def test_emcee_method_kwarg(self):
# test with emcee as method keyword argument
if not HAS_EMCEE:
return True

np.random.seed(123456)
out = self.mini.minimize(method='emcee', nwalkers=100, steps=200,
burn=50, thin=10)
assert out.method == 'emcee'
assert out.nfev == 100*200

check_paras(out.params, self.p_true, sig=3)

@dec.slow
def test_emcee_PT(self):
# test emcee with parallel tempering
Expand Down

0 comments on commit 7fec373

Please sign in to comment.