diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c08e003ea1..5d4fa605f8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: OVERRIDE_VERSION: ${{ github.event.inputs.override_version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 name: Install Python @@ -57,7 +57,7 @@ jobs: # For the sdist we should be as conservative as possible with our # Python version. This should be the lowest supported version. This # means that no unsupported syntax can sneak through. - python-version: '3.9' + python-version: '3.10' - name: Install pip build run: | @@ -90,7 +90,7 @@ jobs: zip "$zipfile" -r "$stem" rm -r "$stem" - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: sdist path: | @@ -107,21 +107,21 @@ jobs: matrix: os: [ubuntu-latest, windows-latest, macos-latest] env: - # Set up wheels matrix. This is CPython 3.9--3.12 for all OS targets. - CIBW_BUILD: "cp3{9,10,11,12}-*" + # Set up wheels matrix. This is CPython 3.10--3.12 for all OS targets. + CIBW_BUILD: "cp3{10,11,12}-*" # Numpy and SciPy do not supply wheels for i686 or win32 for # Python 3.10+, so we skip those: CIBW_SKIP: "*-musllinux* cp3{10,11,12}-manylinux_i686 cp3{10,11,12}-win32" OVERRIDE_VERSION: ${{ github.event.inputs.override_version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 name: Install Python with: # This is about the build environment, not the released wheel version. - python-version: '3.9' + python-version: '3.10' - name: Install cibuildwheel run: | @@ -137,7 +137,7 @@ jobs: if [[ ! -z "$OVERRIDE_VERSION" ]]; then echo "$OVERRIDE_VERSION" > VERSION; fi python -m cibuildwheel --output-dir wheelhouse - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: wheels path: ./wheelhouse/*.whl @@ -160,17 +160,17 @@ jobs: steps: - name: Download build artifacts to local runner - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.9' + python-version: '3.10' - name: Verify this is not a dev version shell: bash run: | - python -m pip install wheels/*-cp39-cp39-manylinux*.whl + python -m pip install wheels/*-cp310-cp310-manylinux*.whl python -c 'import qutip; print(qutip.__version__); assert "dev" not in qutip.__version__; assert "+" not in qutip.__version__' # We built the zipfile for convenience distributing to Windows users on @@ -193,17 +193,17 @@ jobs: steps: - name: Download build artifacts to local runner - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.9' + python-version: '3.10' - name: Verify this is not a dev version shell: bash run: | - python -m pip install wheels/*-cp39-cp39-manylinux*.whl + python -m pip install wheels/*-cp310-cp310-manylinux*.whl python -c 'import qutip; print(qutip.__version__); assert "dev" not in qutip.__version__; assert "+" not in qutip.__version__' # We built the zipfile for convenience distributing to Windows users on diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml index b5c6eb9cf6..f1f7a258df 100644 --- a/.github/workflows/build_documentation.yml +++ b/.github/workflows/build_documentation.yml @@ -9,12 +9,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.11' - name: Install documentation dependencies run: | @@ -27,7 +27,7 @@ jobs: run: | # Build without build isolation so that we use the build # dependencies already installed from doc/requirements.txt. - python -m pip install -e .[full] --no-build-isolation + python -m pip install -e .[full] --no-build-isolation --config-settings editable_mode=compat # Install in editable mode so it doesn't matter if we import from # inside the installation directory, otherwise we can get some errors # because we're importing from the wrong location. @@ -43,7 +43,7 @@ jobs: # -T : display a full traceback if a Python exception occurs - name: Upload built PDF files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: qutip_pdf_docs path: doc/_build/latex/* @@ -59,7 +59,7 @@ jobs: # -T : display a full traceback if a Python exception occurs - name: Upload built HTML files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: qutip_html_docs path: doc/_build/html/* diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c046f02a4e..55d6693be4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -29,96 +29,102 @@ jobs: # matrix size; make sure to test all supported versions in some form. python-version: ["3.11"] case-name: [defaults] - numpy-requirement: [">=1.22"] - scipy-requirement: [">=1.8"] + # Version 2 not yet available on conda's default channel + condaforge: [1] + numpy-build: [""] + numpy-requirement: [""] + scipy-requirement: [">=1.9"] coverage-requirement: ["==6.5"] # Extra special cases. In these, the new variable defined should always # be a truth-y value (hence 'nomkl: 1' rather than 'mkl: 0'), because # the lack of a variable is _always_ false-y, and the defaults lack all # the special cases. include: - # Python 3.9, Scipy 1.7, numpy 1.22 - # On more version than suggested by SPEC 0 - # https://scientific-python.org/specs/spec-0000/ - # There are deprecation warnings when using cython 0.29.X - - case-name: Old setup + - case-name: p312 numpy 2 os: ubuntu-latest - python-version: "3.9" - scipy-requirement: ">=1.8,<1.9" - numpy-requirement: ">=1.22,<1.23" - condaforge: 1 - oldcython: 1 - pytest-extra-options: "-W ignore:dep_util:DeprecationWarning" - - # Python 3.10, no mkl, scipy 1.9, numpy 1.23 - # Scipy 1.9 did not support cython 3.0 yet. - # cython#17234 - - case-name: no mkl - os: ubuntu-latest - python-version: "3.10" - scipy-requirement: ">=1.9,<1.10" - numpy-requirement: ">=1.23,<1.24" - condaforge: 1 - oldcython: 1 - nomkl: 1 - pytest-extra-options: "-W ignore:dep_util:DeprecationWarning" + python-version: "3.12" + numpy-build: ">=2.0.0" + numpy-requirement: ">=2.0.0" + pypi: 1 - # Python 3.10, no cython, scipy 1.10, numpy 1.24 - - case-name: no cython + - case-name: p310 numpy 1.22 os: ubuntu-latest python-version: "3.10" + numpy-build: ">=1.22.0,<1.23.0" + numpy-requirement: ">=1.22.0,<1.23.0" scipy-requirement: ">=1.10,<1.11" - numpy-requirement: ">=1.24,<1.25" - nocython: 1 - - # Python 3.11 and recent numpy - # Use conda-forge to provide Python 3.11 and latest numpy - # Ignore deprecation of the cgi module in Python 3.11 that is - # still imported by Cython.Tempita. This was addressed in - # https://github.com/cython/cython/pull/5128 but not backported - # to any currently released version. - - case-name: Python 3.11 - os: ubuntu-latest - python-version: "3.11" - condaforge: 1 - scipy-requirement: ">=1.11,<1.12" - numpy-requirement: ">=1.25,<1.26" - conda-extra-pkgs: "suitesparse" # for compiling cvxopt + semidefinite: 1 + oldcython: 1 + pypi: 1 + pytest-extra-options: "-W ignore:dep_util:DeprecationWarning -W \"ignore:The 'renderer' parameter of do_3d_projection\"" # Python 3.12 and latest numpy # Use conda-forge to provide Python 3.11 and latest numpy - - case-name: Python 3.12 + - case-name: p312, numpy fallback os: ubuntu-latest python-version: "3.12" - scipy-requirement: ">=1.12,<1.13" numpy-requirement: ">=1.26,<1.27" + scipy-requirement: ">=1.11,<1.12" condaforge: 1 - pytest-extra-options: "-W ignore:datetime:DeprecationWarning" # Install mpi4py to test mpi_pmap # Should be enough to include this in one of the runs includempi: 1 + # Python 3.10, no mkl, scipy 1.9, numpy 1.23 + # Scipy 1.9 did not support cython 3.0 yet. + # cython#17234 + - case-name: p310 no mkl + os: ubuntu-latest + python-version: "3.10" + numpy-requirement: ">=1.23,<1.24" + scipy-requirement: ">=1.9,<1.10" + semidefinite: 1 + condaforge: 1 + oldcython: 1 + nomkl: 1 + pytest-extra-options: "-W ignore:dep_util:DeprecationWarning -W \"ignore:The 'renderer' parameter of do_3d_projection\"" + # Mac # Mac has issues with MKL since september 2022. - case-name: macos - os: macos-latest + # setup-miniconda not compatible with macos-latest presently. + # https://github.com/conda-incubator/setup-miniconda/issues/344 + os: macos-12 + python-version: "3.12" + numpy-build: ">=2.0.0" + numpy-requirement: ">=2.0.0" + condaforge: 1 + nomkl: 1 + + - case-name: macos - numpy fallback + os: macos-12 python-version: "3.11" + numpy-build: ">=2.0.0" + numpy-requirement: ">=1.25,<1.26" condaforge: 1 nomkl: 1 - # Windows. Once all tests pass without special options needed, this - # can be moved to the main os list in the test matrix. All the tests - # that fail currently seem to do so because mcsolve uses - # multiprocessing under the hood. Windows does not support fork() - # well, which makes transfering objects to the child processes - # error prone. See, e.g., https://github.com/qutip/qutip/issues/1202 - case-name: Windows os: windows-latest python-version: "3.11" + numpy-build: ">=2.0.0" + numpy-requirement: ">=2.0.0" + pypi: 1 + + - case-name: Windows - numpy fallback + os: windows-latest + python-version: "3.10" + numpy-build: ">=2.0.0" + numpy-requirement: ">=1.24,<1.25" + semidefinite: 1 + oldcython: 1 + nocython: 1 + condaforge: 1 + pytest-extra-options: "-W ignore:dep_util:DeprecationWarning -W \"ignore:The 'renderer' parameter of do_3d_projection\"" steps: - - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true python-version: ${{ matrix.python-version }} @@ -128,23 +134,28 @@ jobs: # In the run, first we handle any special cases. We do this in bash # rather than in the GitHub Actions file directly, because bash gives us # a proper programming language to use. + # We install without build isolation so qutip is compiled with the + # version of cython, scipy, numpy in the test matrix, not a temporary + # version use in the installation virtual environment. run: | - QUTIP_TARGET="tests,graphics,semidefinite,ipython,extras" - if [[ -z "${{ matrix.nocython }}" ]]; then - QUTIP_TARGET="$QUTIP_TARGET,runtime_compilation" - fi - if [[ "${{ matrix.oldcython }}" ]]; then - pip install cython==0.29.36 - fi - export CI_QUTIP_WITH_OPENMP=${{ matrix.openmp }} - if [[ -z "${{ matrix.nomkl }}" ]]; then - conda install blas=*=mkl "numpy${{ matrix.numpy-requirement }}" "scipy${{ matrix.scipy-requirement }}" + # Install the extra requirement + python -m pip install pytest>=5.2 pytest-rerunfailures # tests + python -m pip install ipython # ipython + python -m pip install loky tqdm # extras + python -m pip install "coverage${{ matrix.coverage-requirement }}" chardet + python -m pip install pytest-cov coveralls pytest-fail-slow + + if [[ "${{ matrix.pypi }}" ]]; then + pip install "numpy${{ matrix.numpy-build }}" + pip install "scipy${{ matrix.scipy-requirement }}" + elif [[ -z "${{ matrix.nomkl }}" ]]; then + conda install blas=*=mkl "numpy${{ matrix.numpy-build }}" "scipy${{ matrix.scipy-requirement }}" elif [[ "${{ matrix.os }}" =~ ^windows.*$ ]]; then # Conda doesn't supply forced nomkl builds on Windows, so we rely on # pip not automatically linking to MKL. - pip install "numpy${{ matrix.numpy-requirement }}" "scipy${{ matrix.scipy-requirement }}" + pip install "numpy${{ matrix.numpy-build }}" "scipy${{ matrix.scipy-requirement }}" else - conda install nomkl "numpy${{ matrix.numpy-requirement }}" "scipy${{ matrix.scipy-requirement }}" + conda install nomkl "numpy${{ matrix.numpy-build }}" "scipy${{ matrix.scipy-requirement }}" fi if [[ -n "${{ matrix.conda-extra-pkgs }}" ]]; then conda install "${{ matrix.conda-extra-pkgs }}" @@ -153,9 +164,31 @@ jobs: # Use openmpi because mpich causes problems. Note, environment variable names change in v5 conda install "openmpi<5" mpi4py fi - python -m pip install -e .[$QUTIP_TARGET] - python -m pip install "coverage${{ matrix.coverage-requirement }}" - python -m pip install pytest-cov coveralls pytest-fail-slow + if [[ "${{ matrix.oldcython }}" ]]; then + python -m pip install cython==0.29.36 filelock matplotlib==3.5 + else + python -m pip install cython filelock + fi + + python -m pip install -e . -v --no-build-isolation + + if [[ "${{ matrix.nocython }}" ]]; then + python -m pip uninstall cython -y + fi + + if [[ "${{ matrix.pypi }}" ]]; then + python -m pip install "numpy${{ matrix.numpy-requirement }}" + elif [[ -z "${{ matrix.nomkl }}" ]]; then + conda install "numpy${{ matrix.numpy-requirement }}" + elif [[ "${{ matrix.os }}" =~ ^windows.*$ ]]; then + python -m pip install "numpy${{ matrix.numpy-requirement }}" + else + conda install nomkl "numpy${{ matrix.numpy-requirement }}" + fi + if [[ -n "${{ matrix.semidefinite }}" ]]; then + python -m pip install cvxpy>=1.0 cvxopt + fi + python -m pip install matplotlib>=1.2.1 # graphics - name: Package information run: | @@ -226,7 +259,7 @@ jobs: name: Verify Towncrier entry added runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 104456854c..1d260bd44a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,6 +2,6 @@ You are most welcome to contribute to QuTiP development by forking this repository and sending pull requests, or filing bug reports at the [issues page](https://github.com/qutip/qutip/issues). You can also help out with users' questions, or discuss proposed changes in the [QuTiP discussion group](https://groups.google.com/g/qutip). -All code contributions are acknowledged in the [contributors](https://qutip.org/docs/latest/contributors.html) section in the documentation. +All code contributions are acknowledged in the [contributors](https://qutip.readthedocs.io/en/stable/contributors.html) section in the documentation. -For more information, including technical advice, please see the ["contributing to QuTiP development" section of the documentation](https://qutip.org/docs/latest/development/contributing.html). +For more information, including technical advice, please see the ["contributing to QuTiP development" section of the documentation](https://qutip.readthedocs.io/en/stable/development/contributing.html). diff --git a/README.md b/README.md index ba6dbf7c6c..98f1b2790b 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,8 @@ QuTiP: Quantum Toolbox in Python [J. Lishman](https://github.com/jakelishman), [S. Cross](https://github.com/hodgestar), [A. Galicia](https://github.com/AGaliciaMartinez), +[P. Menczel](https://github.com/pmenczel), +[P. Hopf](https://github.com/flowerthrower/), [P. D. Nation](https://github.com/nonhermitian), and [J. R. Johansson](https://github.com/jrjohansson) @@ -22,19 +24,6 @@ and [J. R. Johansson](https://github.com/jrjohansson) [![PyPi Downloads](https://img.shields.io/pypi/dm/qutip?label=downloads%20%7C%20pip&logo=PyPI)](https://pypi.org/project/qutip) [![Conda-Forge Downloads](https://img.shields.io/conda/dn/conda-forge/qutip?label=downloads%20%7C%20conda&logo=Conda-Forge)](https://anaconda.org/conda-forge/qutip) -> **Note** -> -> The master branch now contains the alpha version of QuTiP 5. This is major -> revision that breaks compatibility in many small ways withh QuTiP 4.7. -> -> If you need to track QuTiP 4.7 changes or submit pull requests for 4.7, -> please use the `qutip-4.7.X` branch. -> -> If you need to track QuTiP 5 changes or submit pull request for 5, -> please use the `master` branch (and not the `dev.major` branch). -> -> The change to master happened on 16 January 2023 in commit @fccec5d. - QuTiP is open-source software for simulating the dynamics of closed and open quantum systems. It uses the excellent Numpy, Scipy, and Cython packages as numerical backends, and graphical output is provided by Matplotlib. QuTiP aims to provide user-friendly and efficient numerical simulations of a wide variety of quantum mechanical problems, including those with Hamiltonians and/or collapse operators with arbitrary time-dependence, commonly found in a wide range of physics applications. @@ -70,10 +59,10 @@ pip install qutip to get the minimal installation. You can instead use the target `qutip[full]` to install QuTiP with all its optional dependencies. -For more details, including instructions on how to build from source, see [the detailed installation guide in the documentation](https://qutip.org/docs/latest/installation.html). +For more details, including instructions on how to build from source, see [the detailed installation guide in the documentation](https://qutip.readthedocs.io/en/stable/installation.html). All back releases are also available for download in the [releases section of this repository](https://github.com/qutip/qutip/releases), where you can also find per-version changelogs. -For the most complete set of release notes and changelogs for historic versions, see the [changelog](https://qutip.org/docs/latest/changelog.html) section in the documentation. +For the most complete set of release notes and changelogs for historic versions, see the [changelog](https://qutip.readthedocs.io/en/stable/changelog.html) section in the documentation. The pre-release of QuTiP 5.0 is available on PyPI and can be installed using pip: @@ -107,9 +96,9 @@ Contribute You are most welcome to contribute to QuTiP development by forking this repository and sending pull requests, or filing bug reports at the [issues page](https://github.com/qutip/qutip/issues). You can also help out with users' questions, or discuss proposed changes in the [QuTiP discussion group](https://groups.google.com/g/qutip). -All code contributions are acknowledged in the [contributors](https://qutip.org/docs/latest/contributors.html) section in the documentation. +All code contributions are acknowledged in the [contributors](https://qutip.readthedocs.io/en/stable/contributors.html) section in the documentation. -For more information, including technical advice, please see the ["contributing to QuTiP development" section of the documentation](https://qutip.org/docs/latest/development/contributing.html). +For more information, including technical advice, please see the ["contributing to QuTiP development" section of the documentation](https://qutip.readthedocs.io/en/stable/development/contributing.html). Citing QuTiP diff --git a/VERSION b/VERSION index 6f5f6c392d..5ef8a7e04a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -5.0.0.dev +5.1.0.dev diff --git a/doc/apidoc/classes.rst b/doc/apidoc/classes.rst index c8182bcb94..3240b4aafe 100644 --- a/doc/apidoc/classes.rst +++ b/doc/apidoc/classes.rst @@ -7,7 +7,7 @@ Classes .. _classes-qobj: Qobj --------------- +---- .. autoclass:: qutip.core.qobj.Qobj :members: @@ -16,17 +16,25 @@ Qobj .. _classes-qobjevo: QobjEvo --------------- +------- .. autoclass:: qutip.core.cy.qobjevo.QobjEvo :members: :special-members: __call__ +.. _classes-coreoptions: + +CoreOptions +----------- + +.. autoclass:: qutip.core.options.CoreOptions + :members: + .. _classes-bloch: Bloch sphere ---------------- +------------ .. autoclass:: qutip.bloch.Bloch :members: @@ -240,20 +248,22 @@ Solver Options and Results :inherited-members: :exclude-members: add_processor, add -.. autoclass:: qutip.solver.result.MultiTrajResult +.. autoclass:: qutip.solver.multitrajresult.MultiTrajResult :members: :inherited-members: :exclude-members: add_processor, add, add_end_condition -.. autoclass:: qutip.solver.result.McResult +.. autoclass:: qutip.solver.result.TrajectoryResult + :show-inheritance: :members: - :inherited-members: - :exclude-members: add_processor, add, add_end_condition -.. autoclass:: qutip.solver.result.NmmcResult +.. autoclass:: qutip.solver.multitrajresult.McResult + :show-inheritance: + :members: + +.. autoclass:: qutip.solver.multitrajresult.NmmcResult + :show-inheritance: :members: - :inherited-members: - :exclude-members: add_processor, add, add_end_condition .. _classes-piqs: @@ -274,6 +284,12 @@ Distribution functions .. autoclass:: qutip.distributions.Distribution :members: +CompilationOptions +------------------ + +.. autoclass:: qutip.core.coefficient.CompilationOptions + + .. Docstrings are empty... diff --git a/doc/apidoc/functions.rst b/doc/apidoc/functions.rst index f64707a415..475c69aa98 100644 --- a/doc/apidoc/functions.rst +++ b/doc/apidoc/functions.rst @@ -21,6 +21,13 @@ Quantum Operators :members: charge, commutator, create, destroy, displace, fcreate, fdestroy, jmat, num, qeye, identity, momentum, phase, position, qdiags, qutrit_ops, qzero, sigmam, sigmap, sigmax, sigmay, sigmaz, spin_Jx, spin_Jy, spin_Jz, spin_Jm, spin_Jp, squeeze, squeezing, tunneling, qeye_like, qzero_like +Quantum Gates +----------------- + +.. automodule:: qutip.core.gates + :members: rx, ry, rz, sqrtnot, snot, phasegate, qrot, cy_gate, cz_gate, s_gate, t_gate, cs_gate, ct_gate, cphase, cnot, csign, berkeley, swapalpha, swap, iswap, sqrtswap, sqrtiswap, fredkin, molmer_sorensen, toffoli, hadamard_transform, qubit_clifford_group, globalphase + + Energy Restricted Operators --------------------------- @@ -34,7 +41,10 @@ Quantum Objects --------------- .. automodule:: qutip.core.qobj - :members: ptrace, issuper, isoper, isoperket, isoperbra, isket, isbra, isherm + :members: ptrace + +.. automodule:: qutip.core.properties + :members: issuper, isoper, isoperket, isoperbra, isket, isbra, isherm Random Operators and States diff --git a/doc/biblio.rst b/doc/biblio.rst index 1bdabce891..9ae69b60df 100644 --- a/doc/biblio.rst +++ b/doc/biblio.rst @@ -40,6 +40,10 @@ Bibliography C. Wood, J. Biamonte, D. G. Cory, *Tensor networks and graphical calculus for open quantum systems*. :arxiv:`1111.6950` +.. [AKN98] + D. Aharonov, A. Kitaev, and N. Nisan, *Quantum circuits with mixed states*, in Proceedings of the + thirtieth annual ACM symposium on Theory of computing, 20-30 (1998). :arxiv:`quant-ph/9806029` + .. [dAless08] D. d’Alessandro, *Introduction to Quantum Control and Dynamics*, (Chapman & Hall/CRC, 2008). diff --git a/doc/changelog.rst b/doc/changelog.rst index a916a657c5..0d4afdb04a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,530 @@ Change Log .. towncrier release notes start +QuTiP 5.0.3 (2024-06-20) +======================== + +Micro release to add support for numpy 2. + +Bug Fixes +--------- + +- Bug Fix in Process Matrix Rendering. (#2400, by Anush Venkatakrishnan) +- Fix steadystate permutation being reversed. (#2443) +- Add parallelizing support for `vernN` methods with `mcsolve`. (#2454 by Utkarsh) + + +Documentation +------------- + +- Added `qutip.core.gates` to apidoc/functions.rst and a Gates section to guide-states.rst. (#2441, by alan-nala) + + +Miscellaneous +------------- + +- Add support for numpy 2 (#2421, #2457) +- Add support for scipy 1.14 (#2469) + + +QuTiP 5.0.2 (2024-05-16) +======================== + +Bug Fixes +--------- + +- Use CSR as the default for expand_operator (#2380, by BoxiLi) +- Fix import of the partial_transpose function. + Ensures that the negativity function can handle both kets and density operators as input. (#2371, by vikas-chaudhary-2802) +- Ensure that end_condition of mcsolve result doesn't say target tolerance reached when it hasn't (#2382, by magzpavz) +- Fix two bugs in steadystate floquet solver, and adjust tests to be sensitive to this issue. (#2393, by Neill Lambert) + + +Documentation +------------- + +- Correct a mistake in the doc (#2401, by PositroniumJS) +- Fix #2156: Correct a sample of code in the doc (#2409, by PositroniumJS) + + +Miscellaneous +------------- + +- Better metadata management in operators creation functions (#2388) +- Implicitly set minimum python version to 3.9 (#2413) +- Qobj.__eq__ uses core's settings rtol. (#2425) +- Only normalize solver states when the initial state is already normalized. (#2427) + + +QuTiP 5.0.1 (2024-04-03) +======================== + + +Patch update fixing small issues with v5.0.0 release + +- Fix broken links in the documentation when migrating to readthedocs +- Fix readthedocs search feature +- Add setuptools to runtime compilation requirements +- Fix mcsolve documentation for open systems +- Fix OverFlowError in progress bars + + +QuTiP 5.0.0 (2024-03-26) +======================== + + +QuTiP 5 is a redesign of many of the core components of QuTiP (``Qobj``, +``QobjEvo``, solvers) to make them more consistent and more flexible. + +``Qobj`` may now be stored in either sparse or dense representations, +and the two may be mixed sensibly as needed. ``QobjEvo`` is now used +consistently throughout QuTiP, and the implementation has been +substantially cleaned up. A new ``Coefficient`` class is used to +represent the time-dependent factors inside ``QobjEvo``. + +The solvers have been rewritten to work well with the new data layer +and the concept of ``Integrators`` which solve ODEs has been introduced. +In future, new data layers may provide their own ``Integrators`` +specialized to their representation of the underlying data. + +Much of the user-facing API of QuTiP remains familiar, but there have +had to be many small breaking changes. If we can make changes to +easy migrating code from QuTiP 4 to QuTiP 5, please let us know. + +An extensive list of changes follows. + +Contributors +------------ + +QuTiP 5 has been a large effort by many people over the last three years. + +In particular: + +- Jake Lishman led the implementation of the new data layer and coefficients. +- Eric Giguère led the implementation of the new QobjEvo interface and solvers. +- Boxi Li led the updating of QuTiP's QIP support and the creation of ``qutip_qip``. + +Other members of the QuTiP Admin team have been heavily involved in reviewing, +testing and designing QuTiP 5: + +- Alexander Pitchford +- Asier Galicia +- Nathan Shammah +- Shahnawaz Ahmed +- Neill Lambert +- Simon Cross +- Paul Menczel + +Two Google Summer of Code contributors updated the tutorials and benchmarks to +QuTiP 5: + +- Christian Staufenbiel updated many of the tutorials (``). +- Xavier Sproken update the benchmarks (``). + +During an internship at RIKEN, Patrick Hopf created a new quantum control method and +improved the existing methods interface: + +- Patrick Hopf created new quantum control package (``). + +Four experimental data layers backends were written either as part of Google Summer +of Code or as separate projects. While these are still alpha quality, they helped +significantly to test the data layer API: + +- ``qutip-tensorflow``: a TensorFlow backend by Asier Galicia (``) +- ``qutip-cupy``: a CuPy GPU backend by Felipe Bivort Haiek (``)` +- ``qutip-tensornetwork``: a TensorNetwork backend by Asier Galicia (``) +- ``qutip-jax``: a JAX backend by Eric Giguère (``) + +Finally, Yuji Tamakoshi updated the visualization function and added animation +functions as part of Google Summer of Code project. + +We have also had many other contributors, whose specific contributions are +detailed below: + +- Pieter Eendebak (updated the required SciPy to 1.5+, `#1982 `). +- Pieter Eendebak (reduced import times by setting logger names, `#1981 `) +- Pieter Eendebak (Allow scipy 1.12 to be used with qutip, `#2354 `) +- Xavier Sproken (included C header files in the source distribution, `#1971 `) +- Christian Staufenbiel (added support for multiple collapse operators to the Floquet solver, `#1962 `) +- Christian Staufenbiel (fixed the basis used in the Floquet Master Equation solver, `#1952 `) +- Christian Staufenbiel (allowed the ``bloch_redfield_tensor`` function to accept strings and callables for `a_ops`, `#1951 `) +- Christian Staufenbiel (Add a guide on Superoperators, Pauli Basis and Channel Contraction, `#1984 `) +- Henrique Silvéro (allowed ``qutip_qip`` to be imported as ``qutip.qip``, `#1920 `) +- Florian Hopfmueller (added a vastly improved implementations of ``process_fidelity`` and ``average_gate_fidelity``, `#1712 `, `#1748 `, `#1788 `) +- Felipe Bivort Haiek (fixed inaccuracy in docstring of the dense implementation of negation, `#1608 `) +- Rajath Shetty (added support for specifying colors for individual points, vectors and states display by `qutip.Bloch`, `#1335 `) +- Rochisha Agarwal (Add dtype to printed ouput of qobj, `#2352 `) +- Kosuke Mizuno (Add arguments of plot_wigner() and plot_wigner_fock_distribution() to specify parameters for wigner(), `#2057 `) +- Matt Ord (Only pre-compute density matrices if keep_runs_results is False, `#2303 `) +- Daniel Moreno Galán (Add the possibility to customize point colors as in V4 and fix point plot behavior for 'l' style, `#2303 `) +- Sola85 (Fixed simdiag not returning orthonormal eigenvectors, `#2269 `) +- Edward Thomas (Fix LaTeX display of Qobj state in Jupyter cell outputs, `#2272 `) +- Bogdan Reznychenko (Rework `kraus_to_choi` making it faster, `#2284 `) +- gabbence95 (Fix typos in `expect` documentation, `#2331 `) +- lklivingstone (Added __repr__ to QobjEvo, `#2111 `) +- Yuji Tamakoshi (Improve print(qutip.settings) by make it shorter, `#2113 `) +- khnikhil (Added fermionic annihilation and creation operators, `#2166 `) +- Daniel Weiss (Improved sampling algorithm for mcsolve, `#2218 `) +- SJUW (Increase missing colorbar padding for matrix_histogram_complex() from 0 to 0.05, `#2181 `) +- Valan Baptist Mathuranayagam (Changed qutip-notebooks to qutip-tutorials and fixed the typo in the link redirecting to the changelog section in the PR template, `#2107 `) +- Gerardo Jose Suarez (Added information on sec_cutoff to the documentation, `#2136 `) +- Cristian Emiliano Godinez Ramirez (Added inherited members to API doc of MESolver, SMESolver, SSESolver, NonMarkovianMCSolver, `#2167 `) +- Andrey Rakhubovsky (Corrected grammar in Bloch-Redfield master equation documentation, `#2174 `) +- Rushiraj Gadhvi (qutip.ipynbtools.version_table() can now be called without Cython installed, `#2110 `) +- Harsh Khilawala (Moved HTMLProgressBar from qutip/ipynbtools.py to qutip/ui/progressbar.py, `#2112 `) +- Avatar Srinidhi P V (Added new argument bc_type to take boundary conditions when creating QobjEvo, `#2114 `) +- Andrey Rakhubovsky (Fix types in docstring of projection(), `#2363 `) + + +Qobj changes +------------ + +Previously ``Qobj`` data was stored in a SciPy-like sparse matrix. Now the +representation is flexible. Implementations for dense and sparse formats are +included in QuTiP and custom implementations are possible. QuTiP's performance +on dense states and operators is significantly improved as a result. + +Some highlights: + +- The data is still acessible via the ``.data`` attribute, but is now an + instance of the underlying data type instead of a SciPy-like sparse matrix. + The operations available in ``qutip.core.data`` may be used on ``.data``, + regardless of the data type. +- ``Qobj`` with different data types may be mixed in arithmetic and other + operations. A sensible output type will be automatically determined. +- The new ``.to(...)`` method may be used to convert a ``Qobj`` from one data type + to another. E.g. ``.to("dense")`` will convert to the dense representation and + ``.to("csr")`` will convert to the sparse type. +- Many ``Qobj`` methods and methods that create ``Qobj`` now accepted a ``dtype`` + parameter that allows the data type of the returned ``Qobj`` to specified. +- The new ``&`` operator may be used to obtain the tensor product. +- The new ``@`` operator may be used to obtain the matrix / operator product. + ``bar @ ket`` returns a scalar. +- The new ``.contract()`` method will collapse 1D subspaces of the dimensions of + the ``Qobj``. +- The new ``.logm()`` method returns the matrix logarithm of an operator. +- The methods ``.set_data``, ``.get_data``, ``.extract_state``, ``.eliminate_states``, + ``.evaluate`` and ``.check_isunitary`` have been removed. +- The property ``dtype`` return the representation of the data used. +- The new ``data_as`` allow to obtain the data as a common python formats: + numpy array, scipy sparse matrix, JAX Array, etc. + +QobjEvo changes +--------------- + +The ``QobjEvo`` type for storing time-dependent quantum objects has been +significantly expanded, standardized and extended. The time-dependent +coefficients are now represented using a new ``Coefficient`` type that +may be independently created and manipulated if required. + +Some highlights: + +- The ``.compile()`` method has been removed. Coefficients specified as + strings are automatically compiled if possible and the compilation is + cached across different Python runs and instances. +- Mixing coefficient types within a single ``Qobj`` is now supported. +- Many new attributes were added to ``QobjEvo`` for convenience. Examples + include ``.dims``, ``.shape``, ``.superrep`` and ``.isconstant``. +- Many old attributes such as ``.cte``, ``.use_cython``, ``.type``, ``.const``, + and ``.coeff_file`` were removed. +- A new ``Spline`` coefficient supports spline interpolations of different + orders. The old ``Cubic_Spline`` coefficient has been removed. +- The new ``.arguments(...)`` method allows additional arguments to the + underlying coefficient functions to be updated. +- The ``_step_func_coeff`` argument has been replaced by the ``order`` + parameter. ``_step_func_coeff=False`` is equivalent to ``order=3``. + ``_step_func_coeff=True`` is equivalent to ``order=0``. Higher values + of ``order`` gives spline interpolations of higher orders. +- The spline type can take ``bc_type`` to control the boundary conditions. +- QobjEvo can be creating from the multiplication of a Qobj with a coefficient: + ``oper * qutip.coefficient(f, args=args)`` is equivalent to + ``qutip.QobjEvo([[oper, f]], args=args)``. +- Coefficient function can be defined in a pythonic manner: ``def f(t, A, w)``. + The dictionary ``args`` second argument is no longer needed. + Function using the exact ``f(t, args)`` signature will use the old method for + backward compatibility. + +Solver changes +-------------- + +The solvers in QuTiP have been heavily reworked and standardized. +Under the hood solvers now make use of swappable ODE ``Integrators``. +Many ``Integrators`` are included (see the list below) and +custom implementations are possible. Solvers now consistently +accept a ``QobjEvo`` instance at the Hamiltonian or Liouvillian, or +any object which can be passed to the ``QobjEvo`` constructor. + +A breakdown of highlights follows. + +All solvers: + +- Solver options are now supplied in an ordinary Python dict. + ``qutip.Options`` is deprecated and returns a dict for backwards + compatibility. +- A specific ODE integrator may be selected by supplying a + ``method`` option. +- Each solver provides a class interface. Creating an instance + of the class allows a solver to be run multiple times for the + same system without having to repeatedly reconstruct the + right-hand side of the ODE to be integrated. +- A ``QobjEvo`` instance is accepted for most operators, e.g., + ``H``, ``c_ops``, ``e_ops``, ``a_ops``. +- The progress bar is now selected using the ``progress_bar`` option. + A new progess bar using the ``tqdm`` Python library is provided. +- Dynamic arguments, where the value of an operator depends on + the current state of the evolution interface reworked. Now a property of the + solver is to be used as an arguments: + ``args={"state": MESolver.StateFeedback(default=rho0)}`` + +Integrators: + +- The SciPy zvode integrator is available with the BDF and + Adams methods as ``bdf`` and ``adams``. +- The SciPy dop853 integrator (an eighth order Runge-Kutta method by + Dormand & Prince) is available as ``dop853``. +- The SciPy lsoda integrator is available as ``lsoda``. +- QuTiP's own implementation of Verner's "most efficient" Runge-Kutta methods + of order 7 and 9 are available as ``vern7`` and ``vern9``. See + http://people.math.sfu.ca/~jverner/ for a description of the methods. +- QuTiP's own implementation of a solver that directly diagonalizes the + the system to be integrated is available as ``diag``. It only works on + time-independent systems and is slow to setup, but once the diagonalization + is complete, it generates solutions very quickly. +- QuTiP's own implementatoin of an approximate Krylov subspace integrator is + available as ``krylov``. This integrator is only usable with ``sesolve``. + +Result class: + +- A new ``.e_data`` attribute provides expectation values as a dictionary. + Unlike ``.expect``, the values are provided in a Python list rather than + a numpy array, which better supports non-numeric types. +- The contents of the ``.stats`` attribute changed significantly and is + now more consistent across solvers. + +Monte-Carlo Solver (mcsolve): + +- The system, H, may now be a super-operator. +- The ``seed`` parameter now supports supplying numpy ``SeedSequence`` or + ``Generator`` types. +- The new ``timeout`` and ``target_tol`` parameters allow the solver to exit + early if a timeout or target tolerance is reached. +- The ntraj option no longer supports a list of numbers of trajectories. + Instead, just run the solver multiple times and use the class ``MCSolver`` + if setting up the solver uses a significant amount of time. +- The ``map_func`` parameter has been replaced by the ``map`` option. +- A loky based parallel map as been added. +- A mpi based parallel map as been added. +- The result returned by ``mcsolve`` now supports calculating photocurrents + and calculating the steady state over N trajectories. +- The old ``parfor`` parallel execution function has been removed from + ``qutip.parallel``. Use ``parallel_map``, ``loky_map`` or ``mpi_pmap`` instead. +- Added improved sampling options which converge much faster when the + probability of collapse is small. + +Non Markovian Monte-Carlo Solver (nm_mcsolve): + +- New Monte-Carlo Solver supporting negative decay rates. +- Based on the influence martingale approach, Donvil et al., Nat Commun 13, 4140 (2022). +- Most of the improvements made to the regular Monte-Carlo solver are also available here. +- The value of the influence martingale is available through the ``.trace`` attribute of the result. + +Stochastic Equation Solvers (ssesolve, smesolve) + +- Function call greatly changed: many keyword arguments are now options. +- m_ops and dW_factors are now changed from the default from the new class interface only. +- Use the same parallel maps as mcsolve: support for loky and mpi map added. +- End conditions ``timeout`` and ``target_tol`` added. +- The ``seed`` parameter now supports supplying numpy ``SeedSequence``. +- Wiener function is now available as a feedback. + +Bloch-Redfield Master Equation Solver (brmesolve): + +- The ``a_ops`` and ``spectra`` support implementations been heavily reworked to + reuse the techniques from the new Coefficient and QobjEvo classes. +- The ``use_secular`` parameter has been removed. Use ``sec_cutoff=-1`` instead. +- The required tolerance is now read from ``qutip.settings``. + +Krylov Subspace Solver (krylovsolve): + +- The Krylov solver is now implemented using ``SESolver`` and the ``krylov`` + ODE integrator. The function ``krylovsolve`` is maintained for convenience + and now supports many more options. +- The ``sparse`` parameter has been removed. Supply a sparse ``Qobj`` for the + Hamiltonian instead. + +Floquet Solver (fsesolve and fmmesolve): + +- The Floquet solver has been rewritten to use a new ``FloquetBasis`` class + which manages the transformations from lab to Floquet basis and back. +- Many of the internal methods used by the old Floquet solvers have + been removed. The Floquet tensor may still be retried using + the function ``floquet_tensor``. +- The Floquet Markov Master Equation solver has had many changes and + new options added. The environment temperature may be specified using + ``w_th``, and the result states are stored in the lab basis and optionally + in the Floquet basis using ``store_floquet_state``. +- The spectra functions supplied to ``fmmesolve`` must now be vectorized + (i.e. accept and return numpy arrays for frequencies and densities) and + must accept negative frequence (i.e. usually include a ``w > 0`` factor + so that the returned densities are zero for negative frequencies). +- The number of sidebands to keep, ``kmax`` may only be supplied when using + the ``FMESolver`` +- The ``Tsteps`` parameter has been removed from both ``fsesolve`` and + ``fmmesolve``. The ``precompute`` option to ``FloquetBasis`` may be used + instead. + +Evolution of State Solver (essovle): + +- The function ``essolve`` has been removed. Use the ``diag`` integration + method with ``sesolve`` or ``mesolve`` instead. + +Steady-state solvers (steadystate module): + +- The ``method`` parameter and ``solver`` parameters have been separated. Previously + they were mixed together in the ``method`` parameter. +- The previous options are now passed as parameters to the steady state + solver and mostly passed through to the underlying SciPy functions. +- The logging and statistics have been removed. + +Correlation functions (correlation module): + +- A new ``correlation_3op`` function has been added. It supports ``MESolver`` + or ``BRMESolver``. +- The ``correlation``, ``correlation_4op``, and ``correlation_ss`` functions have been + removed. +- Support for calculating correlation with ``mcsolve`` has been removed. + +Propagators (propagator module): + +- A class interface, ``qutip.Propagator``, has been added for propagators. +- Propagation of time-dependent systems is now supported using ``QobjEvo``. +- The ``unitary_mode`` and ``parallel`` options have been removed. + +Correlation spectra (spectrum module): + +- The functions ``spectrum_ss`` and ``spectrum_pi`` have been removed and + are now internal functions. +- The ``use_pinv`` parameter for ``spectrum`` has been removed and the + functionality merged into the ``solver`` parameter. Use ``solver="pi"`` + instead. + +Hierarchical Equation of Motion Solver (HEOM) + +- Updated the solver to use the new QuTiP integrators and data layer. +- Updated all the HEOM tutorials to QuTiP 5. +- Added support for combining bosonic and fermionic baths. +- Sped up the construction of the RHS of the HEOM solver by a factor of 4x. +- As in QuTiP 4, the HEOM supports arbitrary spectral densities, bosonic and fermionic baths, Páde and Matsubara expansions of the correlation functions, calculating the Matsubara terminator and inspection of the ADOs (auxiliary density operators). + + +QuTiP core +---------- + +There have been numerous other small changes to core QuTiP features: + +- ``qft(...)`` the function that returns the quantum Fourier + transform operator was moved from ``qutip.qip.algorithm`` into ``qutip``. +- The Bloch-Redfield solver tensor, ``brtensor``, has been moved into + ``qutip.core``. See the section above on the Bloch-Redfield solver + for details. +- The functions ``mat2vec`` and ``vec2mat`` for transforming states to and + from super-operator states have been renamed to ``stack_columns`` and + ``unstack_columns``. +- The function ``liouvillian_ref`` has been removed. Used ``liouvillian`` + instead. +- The superoperator transforms ``super_to_choi``, ``choi_to_super``, + ``choi_to_kraus``, ``choi_to_chi`` and ``chi_to_choi`` have been removed. + Used ``to_choi``, ``to_super``, ``to_kraus`` and ``to_chi`` instead. +- All of the random object creation functions now accepted a + numpy ``Generator`` as a seed. +- The ``dims`` parameter of all random object creation functions has + been removed. Supply the dimensions as the first parameter if + explicit dimensions are required. +- The function ``rand_unitary_haar`` has been removed. Use + ``rand_unitary(distribution="haar")`` instead. +- The functions ``rand_dm_hs`` and ``rand_dm_ginibre`` have been removed. + Use ``rand_dm(distribution="hs")`` and ``rand_dm(distribution="ginibre")`` + instead. +- The function ``rand_ket_haar`` has been removed. Use + ``rand_ket(distribution="haar")`` instead. +- The measurement functions have had the ``target`` parameter for + expanding the measurement operator removed. Used ``expand_operator`` + to expand the operator instead. +- ``qutip.Bloch`` now supports applying colours per-point, state or vector in + ``add_point``, ``add_states``, and ``add_vectors``. +- Dimensions use a class instead of layered lists. +- Allow measurement functions to support degenerate operators. +- Add ``qeye_like`` and ``qzero_like``. +- Added fermionic annihilation and creation operators. + +QuTiP settings +-------------- + +Previously ``qutip.settings`` was an ordinary module. Now ``qutip.settings`` is +an instance of a settings class. All the runtime modifiable settings for +core operations are in ``qutip.settings.core``. The other settings are not +modifiable at runtime. + +- Removed ``load``. ``reset`` and ``save`` functions. +- Removed ``.debug``, ``.fortran``, ``.openmp_thresh``. +- New ``.compile`` stores the compilation options for compiled coefficients. +- New ``.core["rtol"]`` core option gives the default relative tolerance used by QuTiP. +- The absolute tolerance setting ``.atol`` has been moved to ``.core["atol"]``. + +Visualization +------------- + +- Added arguments to ``plot_wigner`` and ``plot_wigner_fock_distribution`` to specify parameters for ``wigner``. +- Removed ``Bloch3D``. The same functionality is provided by ``Bloch``. +- Added ``fig``, ``ax`` and ``cmap`` keyword arguments to all visualization functions. +- Most visualization functions now respect the ``colorblind_safe`` setting. +- Added new functions to create animations from a list of ``Qobj`` or directly from solver results with saved states. + + +Package reorganization +---------------------- + +- ``qutip.qip`` has been moved into its own package, qutip-qip. Once installed, qutip-qip is available as either ``qutip.qip`` or ``qutip_qip``. Some widely useful gates have been retained in ``qutip.gates``. +- ``qutip.control`` has been moved to qutip-qtrl and once installed qutip-qtrl is available as either ``qutip.control`` or ``qutip_qtrl``. Note that ``quitp_qtrl`` is provided primarily for backwards compatibility. Improvements to optimal control will take place in the new ``qutip_qoc`` package. +- ``qutip.lattice`` has been moved into its own package, qutip-lattice. It is available from ``. +- ``qutip.sparse`` has been removed. It contained the old sparse matrix representation and is replaced by the new implementation in ``qutip.data``. +- ``qutip.piqs`` functions are no longer available from the ``qutip`` namespace. They are accessible from ``qutip.piqs`` instead. + +Miscellaneous +------------- + +- Support has been added for 64-bit integer sparse matrix indices, allowing + sparse matrices with up to 2**63 rows and columns. This support needs to + be enabled at compilation time by calling ``setup.py`` and passing + ``--with-idxint-64``. + +Feature removals +---------------- + +- Support for OpenMP has been removed. If there is enough demand and a good plan for how to organize it, OpenMP support may return in a future QuTiP release. +- The ``qutip.parfor`` function has been removed. Use ``qutip.parallel_map`` instead. +- ``qutip.graph`` has been removed and replaced by SciPy's graph functions. +- ``qutip.topology`` has been removed. It contained only one function ``berry_curvature``. +- The ``~/.qutip/qutiprc`` config file is no longer supported. It contained settings for the OpenMP support. +- Deprecate ``three_level_atom`` +- Deprecate ``orbital`` + + +Changes from QuTiP 5.0.0b1: +--------------------------- + +Features +-------- + +- Add dtype to printed ouput of qobj (#2352 by Rochisha Agarwal) + + +Miscellaneous +------------- + +- Allow scipy 1.12 to be used with qutip. (#2354 by Pieter Eendebak) + + QuTiP 5.0.0b1 (2024-03-04) ========================== @@ -58,7 +582,7 @@ Features - Change the order of parameters in expand_operator (#1991) - Add `svn` and `solve` to dispatched (#2002) - Added nm_mcsolve to provide support for Monte-Carlo simulations of master equations with possibly negative rates. The method implemented here is described in arXiv:2209.08958 [quant-ph]. (#2070 by pmenczel) -- Add support for combining bosinic and fermionic HEOM baths (#2089) +- Add support for combining bosonic and fermionic HEOM baths (#2089) - Added __repr__ to QobjEvo (#2111 by lklivingstone) - Improve print(qutip.settings) by make it shorter (#2113 by tamakoshi2001) - Create the `trace_oper_ket` operation (#2126) diff --git a/doc/changes/2318.feature b/doc/changes/2318.feature new file mode 100644 index 0000000000..d8b7f54b23 --- /dev/null +++ b/doc/changes/2318.feature @@ -0,0 +1,2 @@ +Create `run_from_experiment`, which allows to run stochastic evolution from +know noise or measurements. diff --git a/doc/changes/2327.feature b/doc/changes/2327.feature new file mode 100644 index 0000000000..de21691907 --- /dev/null +++ b/doc/changes/2327.feature @@ -0,0 +1 @@ +Add types hints in core solvers functions. diff --git a/doc/changes/2329.misc b/doc/changes/2329.misc new file mode 100644 index 0000000000..73e562b4a3 --- /dev/null +++ b/doc/changes/2329.misc @@ -0,0 +1 @@ +Add auto_real_casting options. diff --git a/doc/changes/2369.feature b/doc/changes/2369.feature new file mode 100644 index 0000000000..3eb02eb8a9 --- /dev/null +++ b/doc/changes/2369.feature @@ -0,0 +1 @@ +Weighted trajectories in trajectory solvers (enables improved sampling for nm_mcsolve) \ No newline at end of file diff --git a/doc/changes/2403.doc b/doc/changes/2403.doc new file mode 100644 index 0000000000..64c49283c6 --- /dev/null +++ b/doc/changes/2403.doc @@ -0,0 +1 @@ +Improve guide-settings page. \ No newline at end of file diff --git a/doc/changes/2416.feature b/doc/changes/2416.feature new file mode 100644 index 0000000000..30dffbec0b --- /dev/null +++ b/doc/changes/2416.feature @@ -0,0 +1,2 @@ +Updated `qutip.core.metrics.dnorm` to have an efficient speedup when finding the difference of two unitaries. We use a result on page 18 of +D. Aharonov, A. Kitaev, and N. Nisan, (1998). \ No newline at end of file diff --git a/doc/changes/2436.doc b/doc/changes/2436.doc new file mode 100644 index 0000000000..1066f15917 --- /dev/null +++ b/doc/changes/2436.doc @@ -0,0 +1 @@ +Tidy up formatting of type aliases in the api documentation \ No newline at end of file diff --git a/doc/changes/2437.feature b/doc/changes/2437.feature new file mode 100644 index 0000000000..4e1052b30c --- /dev/null +++ b/doc/changes/2437.feature @@ -0,0 +1 @@ +Allow mixed initial conditions for mcsolve and nm_mcsolve. \ No newline at end of file diff --git a/doc/changes/2445.bugfix b/doc/changes/2445.bugfix new file mode 100644 index 0000000000..569c5c556d --- /dev/null +++ b/doc/changes/2445.bugfix @@ -0,0 +1,3 @@ +Fix a dimension problem for the argument color of Bloch.add_states +Clean-up of the code in Bloch.add_state +Add Bloch.add_arc and Bloch.add_line in the guide on Bloch class \ No newline at end of file diff --git a/doc/changes/2453.feature b/doc/changes/2453.feature new file mode 100644 index 0000000000..80636634d2 --- /dev/null +++ b/doc/changes/2453.feature @@ -0,0 +1 @@ +Add dispatcher for sqrtm \ No newline at end of file diff --git a/doc/changes/2466.bugfix b/doc/changes/2466.bugfix new file mode 100644 index 0000000000..7f04e25dbf --- /dev/null +++ b/doc/changes/2466.bugfix @@ -0,0 +1 @@ +Fixed rounding error in dicke_trace_function that resulted in negative eigenvalues. diff --git a/doc/changes/2473.misc b/doc/changes/2473.misc new file mode 100644 index 0000000000..103b22aeb3 --- /dev/null +++ b/doc/changes/2473.misc @@ -0,0 +1 @@ +Add type hints for Qobj creation functions. diff --git a/doc/changes/2474.feature b/doc/changes/2474.feature new file mode 100644 index 0000000000..a082c289e3 --- /dev/null +++ b/doc/changes/2474.feature @@ -0,0 +1 @@ +Allow merging results from stochastic solvers. diff --git a/doc/changes/2484.bugfix b/doc/changes/2484.bugfix new file mode 100644 index 0000000000..bb2559b969 --- /dev/null +++ b/doc/changes/2484.bugfix @@ -0,0 +1 @@ +This change makes expm, cosm, sinm work with jax. diff --git a/doc/changes/2491.bugfix b/doc/changes/2491.bugfix new file mode 100644 index 0000000000..2abe5945d4 --- /dev/null +++ b/doc/changes/2491.bugfix @@ -0,0 +1 @@ +Fix stochastic solver step method diff --git a/doc/changes/2493.feature b/doc/changes/2493.feature new file mode 100644 index 0000000000..b7bb95f78e --- /dev/null +++ b/doc/changes/2493.feature @@ -0,0 +1 @@ +Support measurement statistics for `jax` and `jaxdia` dtypes \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index 818b5c5821..724a1efa1d 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -62,10 +62,13 @@ 'B. Li', 'J. Lishman', 'S. Cross', + 'A. Galicia', + 'P. Menczel', + 'P. Hopf', 'and E. Giguère' ]) -copyright = '2011 to 2021 inclusive, QuTiP developers and contributors' +copyright = '2011 to 2024 inclusive, QuTiP developers and contributors' def _check_source_folder_and_imported_qutip_match(): @@ -360,6 +363,17 @@ def qutip_version(): autodoc_member_order = 'alphabetical' +# Makes the following types appear as their alias in the apidoc +# instead of expanding the alias +autodoc_type_aliases = { + 'CoefficientLike': 'CoefficientLike', + 'ElementType': 'ElementType', + 'QobjEvoLike': 'QobjEvoLike', + 'EopsLike': 'EopsLike', + 'LayerType': 'LayerType', + 'ArrayLike': 'ArrayLike' +} + ## EXTLINKS CONFIGURATION ###################################################### extlinks = { diff --git a/doc/development/contributing.rst b/doc/development/contributing.rst index 083754475e..ecdbae1d18 100644 --- a/doc/development/contributing.rst +++ b/doc/development/contributing.rst @@ -113,6 +113,20 @@ This includes using the same variable names, especially if they are function arg Other than this, general "good-practice" Python standards apply: try not to duplicate code; try to keep functions short, descriptively-named and side-effect free; provide a docstring for every new function; and so on. +Type Hints +---------- + +Adding type hints to users facing functions is recommended. +QuTiP's approach is such: + +- Type hints are *hints* for the users. +- Type hints can show the preferred usage over real implementation, for example: + - ``Qobj.__mul__`` is typed to support product with scalar, not other ``Qobj``, for which ``__matmul__`` should is preferred. + - ``solver.options`` claims it return a dict not ``_SolverOptions`` (which is a subclass of dict). +- Type alias are added to ``qutip.typing``. +- `Any` can be used for input which type can be extended by plugin modules, (``qutip-cupy``, ``qutip-jax``, etc.) + + Documenting ----------- diff --git a/doc/development/release_distribution.rst b/doc/development/release_distribution.rst index 75c581cbf9..0f923a927b 100644 --- a/doc/development/release_distribution.rst +++ b/doc/development/release_distribution.rst @@ -16,10 +16,9 @@ In short, the steps you need to take are: 1. Prepare the release branch (see git_). 2. Run the "Build wheels, optionally deploy to PyPI" GitHub action to build binary and source packages and upload them to PyPI (see deploy_). -3. Retrieve the built documentation from GitHub (see docbuild_). -4. Create a GitHub release and uploaded the built files to it (see github_). -5. Update `qutip.org `_ with the new links and documentation (web_). -6. Update the conda feedstock, deploying the package to ``conda`` (cforge_). +3. Create a GitHub release and uploaded the built files to it (see github_). +4. Update `qutip.org `_ with the new links and documentation (web_). +5. Update the conda feedstock, deploying the package to ``conda`` (cforge_). @@ -120,6 +119,8 @@ You should now have a branch that you can see on the GitHub website that is call If you notice you have made a mistake, you can make additional pull requests to the release branch to fix it. ``master`` should look pretty similar, except the ``VERSION`` will be higher and have a ``.dev`` suffix, and the "Development Status" in ``setup.cfg`` will be different. +* Activate the readthedocs build for the newly created version branch and set it as the latest. + You are now ready to actually perform the release. Go to deploy_. @@ -189,7 +190,7 @@ Go to the `"Actions" tab at the top of the QuTiP code repository ..pdf`` into the folder ``downloads/..``. - -The legacy html documentation should be in a subfolder like :: - - docs/. - -For a major or minor release the previous version documentation should be moved into this folder. - -The latest version HTML documentation should be the folder :: - - docs/latest - -For any release which new documentation is included -- copy the contents ``qutip/doc/_build/html`` into this folder. **Note that the underscores at start of the subfolder names will need to be removed, otherwise Jekyll will ignore the folders**. There is a script in the ``docs`` folder for this. -https://github.com/qutip/qutip.github.io/blob/master/docs/remove_leading_underscores.py - - HTML File Updates ----------------- @@ -312,12 +276,12 @@ HTML File Updates - Edit ``_includes/sidebar.html`` - * The 'Latest release' version should be updated. The gztar and zip file links will need the micro release number updating in the traceEvent and file name. - * The link to the documentation folder and PDF file (if created) should be updated. + * Add the new version and release date. Only actively developed version should be listed. Micro replace the previous entry but the last major can be kept. + * Link to the installation instruction, documentation, source code and changelog should be updated. - Edit ``documentation.html`` - * The previous release tags should be moved (copied) to the 'Previous releases' section. + * For major and minor release, the previous release tags should be moved (copied) to the 'Previous releases' section and the links to the readthedocs of the new version added the to 'Latest releases' section. .. _cforge: diff --git a/doc/frontmatter.rst b/doc/frontmatter.rst index ba45c750a1..ef5a35dbce 100644 --- a/doc/frontmatter.rst +++ b/doc/frontmatter.rst @@ -40,6 +40,12 @@ This document contains a user guide and automatically generated API documentatio :Author: Simon Cross +:Author: Asier Galicia + +:Author: Paul Menczel + +:Author: Patrick Hopf + :release: |release| :copyright: diff --git a/doc/guide/dynamics/dynamics-intro.rst b/doc/guide/dynamics/dynamics-intro.rst index cfdf0c1ce0..4429766860 100644 --- a/doc/guide/dynamics/dynamics-intro.rst +++ b/doc/guide/dynamics/dynamics-intro.rst @@ -45,14 +45,14 @@ quantum systems and indicates the type of object returned by the solver: * - Monte Carlo evolution - :func:`~qutip.solver.mcsolve.mcsolve` - :obj:`~qutip.solver.mcsolve.MCSolver` - - :obj:`~qutip.solver.result.McResult` + - :obj:`~qutip.solver.multitrajresult.McResult` * - Non-Markovian Monte Carlo - :func:`~qutip.solver.nm_mcsolve.nm_mcsolve` - :obj:`~qutip.solver.nm_mcsolve.NonMarkovianMCSolver` - - :obj:`~qutip.solver.result.NmmcResult` + - :obj:`~qutip.solver.multitrajresult.NmmcResult` * - Bloch-Redfield master equation - - :func:`~qutip.solver.mesolve.brmesolve` - - :obj:`~qutip.solver.mesolve.BRSolver` + - :func:`~qutip.solver.brmesolve.brmesolve` + - :obj:`~qutip.solver.brmesolve.BRSolver` - :obj:`~qutip.solver.result.Result` * - Floquet-Markov master equation - :func:`~qutip.solver.floquet.fmmesolve` @@ -61,11 +61,11 @@ quantum systems and indicates the type of object returned by the solver: * - Stochastic Schrödinger equation - :func:`~qutip.solver.stochastic.ssesolve` - :obj:`~qutip.solver.stochastic.SSESolver` - - :obj:`~qutip.solver.result.MultiTrajResult` + - :obj:`~qutip.solver.multitrajresult.MultiTrajResult` * - Stochastic master equation - :func:`~qutip.solver.stochastic.smesolve` - :obj:`~qutip.solver.stochastic.SMESolver` - - :obj:`~qutip.solver.result.MultiTrajResult` + - :obj:`~qutip.solver.multitrajresult.MultiTrajResult` * - Transfer Tensor Method time-evolution - :func:`~qutip.solver.nonmarkov.transfertensor.ttmsolve` - None diff --git a/doc/guide/dynamics/dynamics-monte.rst b/doc/guide/dynamics/dynamics-monte.rst index 4ca406bf90..e5e94cb90c 100644 --- a/doc/guide/dynamics/dynamics-monte.rst +++ b/doc/guide/dynamics/dynamics-monte.rst @@ -282,6 +282,77 @@ trajectories: plt.show() +Mixed Initial states +-------------------- + +The Monte-Carlo solver can be used for mixed initial states. For example, if a +qubit can initially be in the excited state :math:`|+\rangle` with probability +:math:`p` or in the ground state :math:`|-\rangle` with probability +:math:`(1-p)`, the initial state is described by the density matrix +:math:`\rho_0 = p | + \rangle\langle + | + (1-p) | - \rangle\langle - |`. + +In QuTiP, this initial density matrix can be created as follows: + +.. code-block:: + + ground = qutip.basis(2, 0) + excited = qutip.basis(2, 1) + density_matrix = p * excited.proj() + (1 - p) * ground.proj() + +One can then pass this density matrix directly to ``mcsolve``, as in + +.. code-block:: + + mcsolve(H, density_matrix, ...) + +Alternatively, using the class interface, if ``solver`` is an +:class:`.MCSolver` object, one can either call +``solver.run(density_matrix, ...)`` or pass the list of initial states like + +.. code-block:: + + solver.run([(excited, p), (ground, 1-p)], ...) + +The number of trajectories can still be specified as a single number ``ntraj``. +In that case, QuTiP will automatically decide how many trajectories to use for +each of the initial states, guaranteeing that the total number of trajectories +is exactly the specified number. When using the class interface and providing +the initial state as a list, the `ntraj` parameter may also be a list +specifying the number of trajectories to use for each state manually. In either +case, the resulting :class:`McResult` will have attributes ``initial_states`` +and ``ntraj_per_initial_state`` listing the initial states and the +corresponding numbers of trajectories that were actually used. + +Note that in general, the fraction of trajectories starting in a given initial +state will (and can) not exactly match the probability :math:`p` of that state +in the initial ensemble. In this case, QuTiP will automatically apply a +correction to the averages, weighting for example the initial states with +"too few" trajectories more strongly. Therefore, the initial state returned in +the result object will always match the provided one up to numerical +inaccuracies. Furthermore, the result returned by the `mcsolve` call above is +equivalent to the following: + +.. code-block:: + + result1 = qutip.mcsolve(H, excited, ...) + result2 = qutip.mcsolve(H, ground, ...) + result1.merge(result2, p) + +However, the single ``mcsolve`` call allows for more parallelization (see +below). + +The Monte-Carlo solver with a mixed initial state currently does not support +specifying a target tolerance. Also, in case the simulation ends early due to +timeout, it is not guaranteed that all initial states have been sampled. If +not all initial states have been sampled, the resulting states will not be +normalized, and the result should be discarded. + +Finally note that what we just discussed concerns the case of mixed initial +states where the provided Hamiltonian is an operator. If it is a superoperator +(i.e., a Liouvillian), ``mcsolve`` will generate trajectories of mixed states +(see below) and the present discussion does not apply. + + Using the Improved Sampling Algorithm ------------------------------------- @@ -445,6 +516,29 @@ Open Systems ``mcsolve`` can be used to study systems which have measurement and dissipative interactions with their environment. This is done by passing a Liouvillian including the dissipative interaction to the solver instead of a Hamiltonian. +In this case the effective Liouvillian becomes: + +.. math:: + :label: Leff + + L_{\rm eff}\rho = L_{\rm sys}\rho -\frac{1}{2}\sum_{i}\left( C^{+}_{n}C_{n}\rho + \rho C^{+}_{n}C_{n}\right), + +With the collapse probability becoming: + +.. math:: + :label: L_jump + + \delta p =\delta t \sum_{n}\mathrm{tr}\left(\rho(t)C^{+}_{n}C_{n}\right), + +And a jump with the collapse operator ``n`` changing the state as: + +.. math:: + :label: L_project + + \rho(t+\delta t) = C_{n} \rho(t) C^{+}_{n} / \mathrm{tr}\left( C_{n} \rho(t) C^{+}_{n} \right), + + +We can redo the previous example for a situation where only half the emitted photons are detected. .. plot:: :context: close-figs @@ -454,8 +548,8 @@ dissipative interaction to the solver instead of a Hamiltonian. a = tensor(qeye(2), destroy(10)) sm = tensor(destroy(2), qeye(10)) H = 2*np.pi*a.dag()*a + 2*np.pi*sm.dag()*sm + 2*np.pi*0.25*(sm*a.dag() + sm.dag()*a) - L = liouvillian(H, [0.01 * sm, np.sqrt(0.1) * a]) - data = mcsolve(L, psi0, times, [np.sqrt(0.1) * a], e_ops=[a.dag() * a, sm.dag() * sm]) + L = liouvillian(H, [np.sqrt(0.05) * a]) + data = mcsolve(L, psi0, times, [np.sqrt(0.05) * a], e_ops=[a.dag() * a, sm.dag() * sm]) plt.figure() plt.plot((times[:-1] + times[1:])/2, data.photocurrent[0]) diff --git a/doc/guide/dynamics/dynamics-nmmonte.rst b/doc/guide/dynamics/dynamics-nmmonte.rst index fb8c0bcbde..ae040bdde7 100644 --- a/doc/guide/dynamics/dynamics-nmmonte.rst +++ b/doc/guide/dynamics/dynamics-nmmonte.rst @@ -79,6 +79,9 @@ associated jump rates :math:`\Gamma_n(t)\geq0` appropriate for simulation. We conclude with a simple example demonstrating the usage of the ``nm_mcsolve`` function. For more elaborate, physically motivated examples, we refer to the `accompanying tutorial notebook `_. +Note that the example also demonstrates the usage of the ``improved_sampling`` +option (which is explained in the guide for the +:ref:`Monte Carlo Solver`) in ``nm_mcsolve``. .. plot:: @@ -98,10 +101,11 @@ function. For more elaborate, physically motivated examples, we refer to the ops_and_rates = [] ops_and_rates.append([a0.dag(), gamma1]) ops_and_rates.append([a0, gamma2]) + nm_options = {'map': 'parallel', 'improved_sampling': True} MCSol = nm_mcsolve(H, psi0, times, ops_and_rates, args={'kappa': 1.0 / 0.129, 'nth': 0.063}, e_ops=[a0.dag() * a0, a0 * a0.dag()], - options={'map': 'parallel'}, ntraj=2500) + options=nm_options, ntraj=2500) # mesolve integration for comparison d_ops = [[lindblad_dissipator(a0.dag(), a0.dag()), gamma1], diff --git a/doc/guide/dynamics/dynamics-stochastic.rst b/doc/guide/dynamics/dynamics-stochastic.rst index 8e60bc2c44..025720cc0e 100644 --- a/doc/guide/dynamics/dynamics-stochastic.rst +++ b/doc/guide/dynamics/dynamics-stochastic.rst @@ -130,16 +130,21 @@ Example Below, we solve the dynamics for an optical cavity at 0K whose output is monitored using homodyne detection. The cavity decay rate is given by :math:`\kappa` and the :math:`\Delta` is the cavity detuning with respect to the driving field. -The measurement operators can be passed using the option ``m_ops``. The homodyne -current :math:`J_x` is calculated using +The homodyne current :math:`J_x` is calculated using .. math:: :label: measurement_result J_x = \langle x \rangle + dW / dt, -where :math:`x` is the operator passed using ``m_ops``. The results are available -in ``result.measurements``. +where :math:`x` is the operator build from the ``sc_ops`` as + +.. math:: + + x_n = S_n + S_n^\dagger + + +The results are available in ``result.measurement``. .. plot:: :context: reset @@ -178,21 +183,56 @@ in ``result.measurements``. ax.set_xlabel('Time') ax.legend() + +Run from known measurements +=========================== + +In situations where instead of running multiple trajectories, we want to reproduce a single trajectory from known noise or measurements obtained in lab. +In these cases, we can use :meth:`~qutip.solver.stochastic.SMESolver.run_from_experiment`. + +Let use the measurement output ``J_x`` of the first trajectory of the previous simulation as the input to recompute a trajectory: + +.. code-block:: + + # Create a stochastic solver instance with the some Hamiltonian as the + # previous evolution. + solver = SMESolver( + H, sc_ops=[np.sqrt(KAPPA) * a], + options={"dt": 0.00125, "store_measurement": True,} + ) + + # Run the evolution, noise + recreated_solution = solver.run_from_experiment( + rho_0, tlist, stoc_solution.measurements[0], + e_ops=[H], + # The third parameter is the measurement, not the Wiener increment + measurement=True, + ) + +This will recompute the states, expectation values and wiener increments for that trajectory. + +.. note:: + + The measurement in the result is by default computed from the state at the end of the time step. + However, when using ``run_from_experiment`` with measurement input, the state at the start of the time step is used. + To obtain the measurement at the start of the time step in the output of ``smesolve``, one may use the option ``{'store_measurement': 'start'}``. + + +For other examples on :func:`qutip.solver.stochastic.smesolve`, see the +notebooks available on the `QuTiP Tutorials page `_: + +* `Heterodyne detection `_ +* `Inefficient detection `_ + .. - TODO merge qutip-tutorials#61 - For other examples on :func:`qutip.solver.stochastic.smesolve`, see the - `following notebook <...>`_, as well as these notebooks available at - `QuTiP Tutorials page `_: - `heterodyne detection <...>`_, - `inefficient detection <...>`_, and - `feedback control `_. + TODO: Add back when the notebook is migrated + * `Feedback control `_ The stochastic solvers share many features with :func:`.mcsolve`, such as end conditions, seed control and running in parallel. See the sections :ref:`monte-ntraj`, :ref:`monte-seeds` and :ref:`monte-parallel` for details. - .. plot:: :context: reset :include-source: false diff --git a/doc/guide/guide-basics.rst b/doc/guide/guide-basics.rst index 1d17cf0976..ef00ff37ac 100644 --- a/doc/guide/guide-basics.rst +++ b/doc/guide/guide-basics.rst @@ -349,7 +349,7 @@ Conversion between storage type is done using the :meth:`.Qobj.to` method. >>> q.to("CSR").data CSR(shape=(4, 4), nnz=3) - >>> q.to("CSR").data_as("CSR_matrix") + >>> q.to("CSR").data_as("csr_matrix") <4x4 sparse matrix of type '' with 3 stored elements in Compressed Sparse Row format> @@ -427,6 +427,7 @@ Of course, like matrices, multiplying two objects of incompatible shape throws a In addition, the logic operators "is equal" `==` and "is not equal" `!=` are also supported. + .. _basics-functions: Functions operating on Qobj class diff --git a/doc/guide/guide-bloch.rst b/doc/guide/guide-bloch.rst index ad351a5f51..c6750e0ea7 100644 --- a/doc/guide/guide-bloch.rst +++ b/doc/guide/guide-bloch.rst @@ -29,7 +29,7 @@ Before getting into the details of these objects, we can simply plot the blank B .. plot:: :context: - b.make_sphere() + b.render() In addition to the ``show`` command, see the API documentation for :class:`~qutip.bloch.Bloch` for a full list of other available functions. As an example, we can add a single data point: @@ -109,6 +109,15 @@ a similar method works for adding vectors: b.add_vectors(vec) b.render() +You can also add lines and arcs: + +.. plot:: + :context: close-figs + + b.add_line(x, y) + b.add_arc(y, z) + b.render() + Adding multiple points to the Bloch sphere works slightly differently than adding multiple states or vectors. For example, lets add a set of 20 points around the equator (after calling `clear()`): .. plot:: @@ -323,25 +332,19 @@ Directly Generating an Animation The code to directly generate an mp4 movie of the Qubit decay is as follows :: from matplotlib import pyplot, animation - from mpl_toolkits.mplot3d import Axes3D fig = pyplot.figure() - ax = Axes3D(fig, azim=-40, elev=30) + ax = fig.add_subplot(azim=-40, elev=30, projection="3d") sphere = qutip.Bloch(axes=ax) def animate(i): sphere.clear() - sphere.add_vectors([np.sin(theta), 0, np.cos(theta)]) + sphere.add_vectors([np.sin(theta), 0, np.cos(theta)], ["r"]) sphere.add_points([sx[:i+1], sy[:i+1], sz[:i+1]]) - sphere.make_sphere() - return ax - - def init(): - sphere.vector_color = ['r'] + sphere.render() return ax - ani = animation.FuncAnimation(fig, animate, np.arange(len(sx)), - init_func=init, blit=False, repeat=False) + ani = animation.FuncAnimation(fig, animate, np.arange(len(sx)), blit=False, repeat=False) ani.save('bloch_sphere.mp4', fps=20) The resulting movie may be viewed here: `bloch_decay.mp4 `_ diff --git a/doc/guide/guide-settings.rst b/doc/guide/guide-settings.rst index 7314ff3484..8411a718a8 100644 --- a/doc/guide/guide-settings.rst +++ b/doc/guide/guide-settings.rst @@ -1,5 +1,70 @@ .. _settings: +************** +QuTiP settings +************** + +QuTiP has multiple settings that control it's behaviour: + +* ``qutip.settings`` contains installation and runtime information. + Most of these parameters are readonly. But systems paths used by QuTiP are + also included here and could need updating in none standard environment. +* ``qutip.settings.core`` contains options for operations with ``Qobj`` and + other qutip's class. All options are writable. +* ``qutip.settings.compile`` has options that control compilation of string + coefficients to cython modules. All options are writable. + +.. _settings-install: + +******************** +Environment settings +******************** + +``qutip.settings`` has information about the run time environment: + +.. tabularcolumns:: | p{3cm} | p{2cm} | p{10cm} | + +.. cssclass:: table-striped + ++-------------------+-----------+----------------------------------------------------------+ +| Setting | Read Only | Description | ++===================+===========+==========================================================+ +| `has_mkl` | True | Whether qutip can find mkl libraries. | +| | | mkl sparse linear equation solver can be used when True. | ++-------------------+-----------+----------------------------------------------------------+ +| `mkl_lib` | True | Path of the mkl libraries found. | ++-------------------+-----------+----------------------------------------------------------+ +| `ipython` | True | Whether running in IPython. | ++-------------------+-----------+----------------------------------------------------------+ +| `eigh_unsafe` | True | When true, SciPy's `eigh` and `eigvalsh` are replaced | +| | | with custom implementations that call `eig` and | +| | | `eigvals` instead. This setting exists because in some | +| | | environments SciPy's `eigh` segfaults or gives invalid | +| | | results. | ++-------------------+-----------+----------------------------------------------------------+ +| `coeffroot` | False | Directory in which QuTiP creates cython modules for | +| | | string coefficient. | ++-------------------+-----------+----------------------------------------------------------+ +| `coeff_write_ok` | True | Whether QuTiP has write permission for `coeffroot`. | ++-------------------+-----------+----------------------------------------------------------+ +| `idxint_size` | True | Whether QuTiP's sparse matrix indices use 32 or 64 bits. | +| | | Sparse matrices' size are limited to 2**(idxint_size-1) | +| | | rows and columns. | ++-------------------+-----------+----------------------------------------------------------+ +| `num_cpus` | True | Detected number of cpus. | ++-------------------+-----------+----------------------------------------------------------+ +| `colorblind_safe` | False | Control the default cmap in visualization functions. | ++-------------------+-----------+----------------------------------------------------------+ + + +It may be needed to update ``coeffroot`` if the default HOME is not writable. It can be done with: + +>>> qutip.settings.coeffroot = "path/to/string/coeff/directory" + +In QuTiP version 5 and later, strings compiled in a session are kept for future sessions. +As long as the same ``coeffroot`` is used, each string will only be compiled once. + + ********************************* Modifying Internal QuTiP Settings ********************************* @@ -9,33 +74,42 @@ Modifying Internal QuTiP Settings User Accessible Parameters ========================== -In this section we show how to modify a few of the internal parameters used by QuTiP. +In this section we show how to modify a few of the internal parameters used by ``Qobj``. The settings that can be modified are given in the following table: .. tabularcolumns:: | p{3cm} | p{5cm} | p{5cm} | .. cssclass:: table-striped -+------------------------------+----------------------------------------------+------------------------------+ -| Setting | Description | Options | -+==============================+==============================================+==============================+ -| `auto_tidyup` | Automatically tidyup sparse quantum objects. | True / False | -+------------------------------+----------------------------------------------+------------------------------+ -| `auto_tidyup_atol` | Tolerance used by tidyup. (sparse only) | float {1e-14} | -+------------------------------+----------------------------------------------+------------------------------+ -| `atol` | General absolute tolerance. | float {1e-12} | -+------------------------------+----------------------------------------------+------------------------------+ -| `rtol` | General relative tolerance. | float {1e-12} | -+------------------------------+----------------------------------------------+------------------------------+ -| `function_coefficient_style` | Signature expected by function coefficients. | {"auto", "pythonic", "dict"} | -+------------------------------+----------------------------------------------+------------------------------+ ++------------------------------+----------------------------------------------+--------------------------------+ +| Options | Description | type [default] | ++==============================+==============================================+================================+ +| `auto_tidyup` | Automatically tidyup sparse quantum objects. | bool [True] | ++------------------------------+----------------------------------------------+--------------------------------+ +| `auto_tidyup_atol` | Tolerance used by tidyup. (sparse only) | float [1e-14] | ++------------------------------+----------------------------------------------+--------------------------------+ +| `auto_tidyup_dims` | Whether the scalar dimension are contracted | bool [False] | ++------------------------------+----------------------------------------------+--------------------------------+ +| `atol` | General absolute tolerance. | float [1e-12] | ++------------------------------+----------------------------------------------+--------------------------------+ +| `rtol` | General relative tolerance. | float [1e-12] | ++------------------------------+----------------------------------------------+--------------------------------+ +| `function_coefficient_style` | Signature expected by function coefficients. | {["auto"], "pythonic", "dict"} | ++------------------------------+----------------------------------------------+--------------------------------+ +| `default_dtype` | Data format used when creating Qobj from | {[None], "CSR", "Dense", | +| | QuTiP functions, such as ``qeye``. | "Dia"} + other from plugins | ++------------------------------+----------------------------------------------+--------------------------------+ + +See also :class:`.CoreOptions`. .. _settings-usage: Example: Changing Settings ========================== -The two most important settings are ``auto_tidyup`` and ``auto_tidyup_atol`` as they control whether the small elements of a quantum object should be removed, and what number should be considered as the cut-off tolerance. +The two most important settings are ``auto_tidyup`` and ``auto_tidyup_atol`` as +they control whether the small elements of a quantum object should be removed, +and what number should be considered as the cut-off tolerance. Modifying these, or any other parameters, is quite simple:: >>> qutip.settings.core["auto_tidyup"] = False @@ -44,3 +118,82 @@ The settings can also be changed for a code block:: >>> with qutip.CoreOptions(atol=1e-5): >>> assert qutip.qeye(2) * 1e-9 == qutip.qzero(2) + + + +.. _settings-compile: + +String Coefficient Parameters +============================= + +String based coefficient used for time dependent system are compiled using Cython when available. +Speeding the simulations, it tries to set c types to passed variables. +``qutip.settings.compile`` has multiple options for compilation. + +There are options are about to whether to compile. + +.. tabularcolumns:: | p{3cm} | p{10cm} | + +.. cssclass:: table-striped + ++--------------------------+-----------------------------------------------------------+ +| Options | Description | ++==========================+===========================================================+ +| `use_cython` | Whether to compile string using cython or using ``eval``. | ++--------------------------+-----------------------------------------------------------+ +| `recompile` | Whether to force recompilation or use a previously | +| | constructed coefficient if available. | ++--------------------------+-----------------------------------------------------------+ + + +Some options passed to cython and the compiler (for advanced user). + +.. tabularcolumns:: | p{3cm} | p{10cm} | + +.. cssclass:: table-striped + ++--------------------------+-----------------------------------------------------------+ +| Options | Description | ++==========================+===========================================================+ +| `compiler_flags` | C++ compiler flags. | ++--------------------------+-----------------------------------------------------------+ +| `link_flags` | C++ linker flags. | ++--------------------------+-----------------------------------------------------------+ +| `build_dir` | cythonize's build_dir. | ++--------------------------+-----------------------------------------------------------+ +| `extra_import` | import or cimport line of code to add to the cython file. | ++--------------------------+-----------------------------------------------------------+ +| `clean_on_error` | Whether to erase the created file if compilation failed. | ++--------------------------+-----------------------------------------------------------+ + + +Lastly some options control how qutip tries to detect C types (for advanced user). + +.. tabularcolumns:: | p{3cm} | p{10cm} | + +.. cssclass:: table-striped + ++--------------------------+-----------------------------------------------------------------------------------------+ +| Options | Description | ++==========================+=========================================================================================+ +| `try_parse` | Whether QuTiP parses the string to detect common patterns. | +| | | +| | When True, "cos(w * t)" and "cos(a * t)" will use the same compiled coefficient. | ++--------------------------+-----------------------------------------------------------------------------------------+ +| `static_types` | If False, every variable will be typed as ``object``, (except ``t`` which is double). | +| | | +| | If True, scalar (int, float, complex), string and Data types are detected. | ++--------------------------+-----------------------------------------------------------------------------------------+ +| `accept_int` | Whether to type ``args`` values which are Python ints as int or float/complex. | +| | | +| | Per default it is True when subscription (``a[i]``) is used. | ++--------------------------+-----------------------------------------------------------------------------------------+ +| `accept_float` | Whether to type ``args`` values which are Python floats as int or float/complex. | +| | | +| | Per default it is True when comparison (``a > b``) is used. | ++--------------------------+-----------------------------------------------------------------------------------------+ + + +These options can be set at a global level in ``qutip.settings.compile`` or by passing a :class:`.CompilationOptions` instance to the :func:`.coefficient` functions. + +>>> qutip.coefficient("cos(t)", compile_opt=CompilationOptions(recompile=True)) diff --git a/doc/guide/guide-states.rst b/doc/guide/guide-states.rst index 4d4efc3c53..28859a3492 100644 --- a/doc/guide/guide-states.rst +++ b/doc/guide/guide-states.rst @@ -696,6 +696,101 @@ the non-zero component is the zeroth-element of the underlying matrix (remember If one wants to create spin operators for higher spin systems, then the :func:`.jmat` function comes in handy. +.. _quantum_gates: + +Gates +===== + +The pre-defined gates are shown in the table below: + + +.. cssclass:: table-striped + ++------------------------------------------------+-------------------------------------------------------+ +| Gate function | Description | ++================================================+=======================================================+ +| :func:`~qutip.core.gates.rx` | Rotation around x axis | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.ry` | Rotation around y axis | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.rz` | Rotation around z axis | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.sqrtnot` | Square root of not gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.sqrtnot` | Square root of not gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.snot` | Hardmard gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.phasegate` | Phase shift gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.qrot` | A qubit rotation under a Rabi pulse | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.cy_gate` | Controlled y gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.cz_gate` | Controlled z gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.s_gate` | Single-qubit rotation | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.t_gate` | Square root of s gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.cs_gate` | Controlled s gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.ct_gate` | Controlled t gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.cphase` | Controlled phase gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.cnot` | Controlled not gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.csign` | Same as cphase | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.berkeley` | Berkeley gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.swapalpha` | Swapalpha gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.swap` | Swap the states of two qubits | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.iswap` | Swap gate with additional phase for 01 and 10 states | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.sqrtswap` | Square root of the swap gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.sqrtiswap` | Square root of the iswap gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.fredkin` | Fredkin gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.molmer_sorensen` | Molmer Sorensen gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.toffoli` | Toffoli gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.hadamard_transform` | Hadamard gate | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.qubit_clifford_group` | Generates the Clifford group on a single qubit | ++------------------------------------------------+-------------------------------------------------------+ +| :func:`~qutip.core.gates.globalphase` | Global phase gate | ++------------------------------------------------+-------------------------------------------------------+ + +To load this qutip module, first you have to import gates: + +.. code-block:: Python + + from qutip import gates + +For example to use the Hadamard Gate: + +.. testcode:: [basics] + + H = gates.hadamard_transform() + print(H) + +**Output**: + +.. testoutput:: [basics] + :options: +NORMALIZE_WHITESPACE + + Quantum object: dims=[[2], [2]], shape=(2, 2), type='oper', dtype=Dense, isherm=True + Qobj data = + [[ 0.70710678 0.70710678] + [0.70710678 -0.70710678]] + .. _states-expect: Expectation values @@ -788,6 +883,7 @@ as well as the composite objects discussed in the next section :ref:`tensor`: np.testing.assert_almost_equal(expect(sz2, two_spins), -1) + .. _states-super: Superoperators and Vectorized Operators diff --git a/doc/index.rst b/doc/index.rst index e49d7f6208..2e01d21075 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -6,6 +6,12 @@ QuTiP: Quantum Toolbox in Python ================================ + +This documentation contains a user guide and automatically generated API documentation for QuTiP. +For more information see the `QuTiP project web page `_. +Here, you can also find a collection of `tutorials for QuTiP `_. + + .. toctree:: :maxdepth: 3 diff --git a/doc/installation.rst b/doc/installation.rst index fbed3cea04..7a91d984ff 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -36,11 +36,11 @@ The following packages are currently required: +----------------+--------------+-----------------------------------------------------+ | Package | Version | Details | +================+==============+=====================================================+ -| **Python** | 3.6+ | | +| **Python** | 3.9+ | 3.6+ for version 4.7 | +----------------+--------------+-----------------------------------------------------+ -| **NumPy** | 1.16+ | | +| **NumPy** | 1.22+ <2.0 | 1.16+ for version 4.7 | +----------------+--------------+-----------------------------------------------------+ -| **SciPy** | 1.0+ | Lower versions may have missing features. | +| **SciPy** | 1.8+ | 1.0+ for version 4.7 | +----------------+--------------+-----------------------------------------------------+ @@ -54,19 +54,21 @@ In addition, there are several optional packages that provide additional functio | ``matplotlib`` | 1.2.1+ | Needed for all visualisation tasks. | +--------------------------+--------------+-----------------------------------------------------+ | ``cython`` | 0.29.20+ | Needed for compiling some time-dependent | -| | | Hamiltonians. | +| ``setuptools`` | | Hamiltonians. Cython needs a working C++ compiler. | +| ``filelock`` | | | +--------------------------+--------------+-----------------------------------------------------+ | ``cvxpy`` | 1.0+ | Needed to calculate diamond norms. | +--------------------------+--------------+-----------------------------------------------------+ -| C++ | GCC 4.7+, | Needed for compiling Cython files, made when | -| Compiler | MS VS 2015 | using string-format time-dependence. | -+--------------------------+--------------+-----------------------------------------------------+ | ``pytest``, | 5.3+ | For running the test suite. | | ``pytest-rerunfailures`` | | | +--------------------------+--------------+-----------------------------------------------------+ | LaTeX | TeXLive 2009+| Needed if using LaTeX in matplotlib figures, or for | | | | nice circuit drawings in IPython. | +--------------------------+--------------+-----------------------------------------------------+ +| ``loky``, ``mpi4py`` | | Extra parallel map back-ends. | ++--------------------------+--------------+-----------------------------------------------------+ +| ``tqdm`` | | Extra progress bars back-end. | ++--------------------------+--------------+-----------------------------------------------------+ In addition, there are several additional packages that are not dependencies, but may give you a better programming experience. `IPython `_ provides an improved text-based Python interpreter that is far more full-featured that the default interpreter, and runs in a terminal. @@ -126,23 +128,6 @@ You activate the new environment by running You can also install any more optional packages you want with ``conda install``, for example ``matplotlib``, ``ipython`` or ``jupyter``. -Installation of the pre-release of version 5 -============================================ - -QuTiP version 5 has been in development for some time and brings many new features, heavily reworks the core functionalities of QuTiP. -It is available as a pre-release on PyPI. Anyone wanting to try the new features can install it with: - -.. code-block:: bash - - pip install --pre qutip - -We expect the pre-release to fully work. -If you find any bugs, confusing documentation or missing features, please tell create an issue on `github `_. - -This version breaks compatibility with QuTiP 4.7 in many small ways. -Please see the :doc:`changelog` for a list of changes, new features and deprecations. - - .. _install-from-source: Installing from Source @@ -182,11 +167,11 @@ Direct Setuptools Source Builds This is the method to have the greatest amount of control over the installation, but it the most error-prone and not recommended unless you know what you are doing. You first need to have all the runtime dependencies installed. The most up-to-date requirements will be listed in ``pyproject.toml`` file, in the ``build-system.requires`` key. -As of the 4.6.0 release, the build requirements can be installed with +As of the 5.0.0 release, the build requirements can be installed with .. code-block:: bash - pip install setuptools wheel packaging 'cython>=0.29.20' 'numpy>=1.16.6,<1.20' 'scipy>=1.0' + pip install setuptools wheel packaging cython 'numpy<2.0.0' scipy or similar with ``conda`` if you prefer. You will also need to have a functional C++ compiler installed on your system. @@ -196,17 +181,7 @@ To install QuTiP from the source code run: .. code-block:: bash - python setup.py install - -To install OpenMP support, if available, run: - -.. code-block:: bash - - python setup.py install --with-openmp - -This will attempt to load up OpenMP libraries during the compilation process, which depends on you having suitable C++ compiler and library support. -If you are on Linux this is probably already done, but the compiler macOS ships with does not have OpenMP support. -You will likely need to refer to external operating-system-specific guides for more detail here, as it may be very non-trivial to correctly configure. + pip install . If you wish to contribute to the QuTiP project, then you will want to create your own fork of `the QuTiP git repository `_, clone this to a local folder, and install it into your Python environment using: @@ -252,12 +227,11 @@ Verifying the Installation QuTiP includes a collection of built-in test scripts to verify that an installation was successful. To run the suite of tests scripts you must also have the ``pytest`` testing library. -After installing QuTiP, leave the installation directory, run Python (or IPython), and call: +After installing QuTiP, leave the installation directory and call: -.. code-block:: python +.. code-block:: bash - import qutip.testing - qutip.testing.run() + pytest qutip/qutip/tests This will take between 10 and 30 minutes, depending on your computer. At the end, the testing report should report a success; it is normal for some tests to be skipped, and for some to be marked "xfail" in yellow. diff --git a/doc/requirements.txt b/doc/requirements.txt index 6cb3ffacf5..4c64df4724 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,17 +1,17 @@ alabaster==0.7.13 Babel==2.12.1 backcall==0.2.0 -certifi==2023.7.22 +certifi==2024.7.4 chardet==4.0.0 cycler==0.10.0 -Cython==0.29.33 +Cython==3.0.8 decorator==5.1.1 docutils==0.18.1 -idna==3.4 +idna==3.7 imagesize==1.4.1 ipython==8.11.0 jedi==0.18.2 -Jinja2==3.1.3 +Jinja2==3.1.4 kiwisolver==1.4.4 MarkupSafe==2.1.2 matplotlib==3.7.1 @@ -21,20 +21,20 @@ packaging==23.0 parso==0.8.3 pexpect==4.8.0 pickleshare==0.7.5 -Pillow==10.2.0 +Pillow==10.3.0 prompt-toolkit==3.0.38 ptyprocess==0.7.0 Pygments==2.15.0 pyparsing==3.0.9 python-dateutil==2.8.2 pytz==2023.3 -requests==2.31.0 -scipy==1.10.1 +requests==2.32.0 +scipy==1.11.4 six==1.16.0 snowballstemmer==2.2.0 Sphinx==6.1.3 sphinx-gallery==0.12.2 -sphinx-rtd-theme==1.2.0 +sphinx-rtd-theme==1.2.1 sphinxcontrib-applehelp==1.0.3 sphinxcontrib-bibtex==2.5.0 sphinxcontrib-devhelp==1.0.2 @@ -43,6 +43,6 @@ sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 traitlets==5.9.0 -urllib3==1.26.18 +urllib3==1.26.19 wcwidth==0.2.6 wheel==0.38.4 diff --git a/doc/rtd-environment.yml b/doc/rtd-environment.yml index 7cafb0adc0..ff209b4a7e 100644 --- a/doc/rtd-environment.yml +++ b/doc/rtd-environment.yml @@ -38,7 +38,7 @@ dependencies: - snowballstemmer==2.2.0 - Sphinx==6.1.3 - sphinx-gallery==0.12.2 -- sphinx-rtd-theme==1.2.0 +- sphinx-rtd-theme==1.2.1 - sphinxcontrib-applehelp==1.0.4 - sphinxcontrib-bibtex==2.5.0 - sphinxcontrib-devhelp==1.0.2 diff --git a/pyproject.toml b/pyproject.toml index 896cd3f115..888bbb161e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,11 +4,11 @@ requires = [ "packaging", "wheel", "cython>=0.29.20; python_version>='3.10'", - "cython>=0.29.20,<3.0.3; python_version<='3.9'", + "cython>=0.29.20,<3.0.0; python_version<='3.9'", # See https://numpy.org/doc/stable/user/depending_on_numpy.html for # the recommended way to build against numpy's C API: - "oldest-supported-numpy", - "scipy>=1.8", + "numpy>=2.0.0", + "scipy>=1.9", ] build-backend = "setuptools.build_meta" diff --git a/qutip/about.py b/qutip/about.py index 0e5eb1662a..918ed77768 100644 --- a/qutip/about.py +++ b/qutip/about.py @@ -25,7 +25,8 @@ def about(): print( "Current admin team: Alexander Pitchford, " "Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, Eric Giguère, " - "Boxi Li, Jake Lishman, Simon Cross and Asier Galicia." + "Boxi Li, Jake Lishman, Simon Cross, Asier Galicia, Paul Menczel, " + "and Patrick Hopf." ) print( "Board members: Daniel Burgarth, Robert Johansson, Anton F. Kockum, " diff --git a/qutip/bloch.py b/qutip/bloch.py index 6a7e38a687..7c1822609c 100644 --- a/qutip/bloch.py +++ b/qutip/bloch.py @@ -1,6 +1,7 @@ __all__ = ['Bloch'] import os +from typing import Literal import numpy as np from numpy import (outer, cos, sin, ones) @@ -314,7 +315,8 @@ def clear(self): self._lines = [] self._arcs = [] - def add_points(self, points, meth='s', colors=None, alpha=1.0): + def add_points(self, points, meth: Literal['s', 'm', 'l'] = 's', + colors=None, alpha=1.0): """Add a list of data points to bloch sphere. Parameters @@ -364,13 +366,14 @@ def add_points(self, points, meth='s', colors=None, alpha=1.0): self.point_alpha.append(alpha) self._inner_point_color.append(colors) - def add_states(self, state, kind='vector', colors=None, alpha=1.0): + def add_states(self, state, kind: Literal['vector', 'point'] = 'vector', + colors=None, alpha=1.0): """Add a state vector Qobj to Bloch sphere. Parameters ---------- - state : :obj:`.Qobj` - Input state vector. + state : :obj:`.Qobj` or array_like + Input state vector or list. kind : {'vector', 'point'} Type of object to plot. @@ -381,10 +384,27 @@ def add_states(self, state, kind='vector', colors=None, alpha=1.0): alpha : float, default=1. Transparency value for the vectors. Values between 0 and 1. """ - if isinstance(state, Qobj): - state = [state] - if not isinstance(colors, (list, np.ndarray)) and colors is not None: - colors = [colors] + state = np.asarray(state) + + if state.ndim == 0: + state = state[np.newaxis] + + if state.ndim != 1: + raise ValueError("The included states are not valid. " + "State should be a Qobj or a list of Qobj.") + + if colors is not None: + colors = np.asarray(colors) + + if colors.ndim == 0: + colors = colors[np.newaxis] + + if colors.shape != state.shape: + raise ValueError("The included colors are not valid. " + "colors must be equivalent to a 1D array " + "with the same size as the number of states.") + else: + colors = np.array([None] * state.size) for k, st in enumerate(state): vec = [expect(sigmax(), st), @@ -392,15 +412,9 @@ def add_states(self, state, kind='vector', colors=None, alpha=1.0): expect(sigmaz(), st)] if kind == 'vector': - if colors is not None: - self.add_vectors(vec, colors=colors[k], alpha=alpha) - else: - self.add_vectors(vec) + self.add_vectors(vec, colors=[colors[k]], alpha=alpha) elif kind == 'point': - if colors is not None: - self.add_points(vec, colors=colors[k], alpha=alpha) - else: - self.add_points(vec) + self.add_points(vec, colors=[colors[k]], alpha=alpha) def add_vectors(self, vectors, colors=None, alpha=1.0): """Add a list of vectors to Bloch sphere. diff --git a/qutip/core/__init__.py b/qutip/core/__init__.py index 4574f20454..296f26fa40 100644 --- a/qutip/core/__init__.py +++ b/qutip/core/__init__.py @@ -12,6 +12,7 @@ from .subsystem_apply import * from .blochredfield import * from .energy_restricted import * +from .properties import * from . import gates del cy # File in cy are not public facing diff --git a/qutip/core/_brtensor.pyx b/qutip/core/_brtensor.pyx index 984e016ede..344562dc68 100644 --- a/qutip/core/_brtensor.pyx +++ b/qutip/core/_brtensor.pyx @@ -35,7 +35,7 @@ cpdef Data _br_term_data(Data A, double[:, ::1] spectrum, cdef Data S, I, AS, AST, out, C cdef type cls = type(A) - S = _data.to(cls, _data.mul(_data.Dense(spectrum, copy=False), 0.5)) + S = _data.to(cls, _data.mul(_data.Dense(spectrum), 0.5)) I = _data.identity[cls](nrows) AS = _data.multiply(A, S) AST = _data.multiply(A, _data.transpose(S)) diff --git a/qutip/core/blochredfield.py b/qutip/core/blochredfield.py index b3c0509572..0d76c635c7 100644 --- a/qutip/core/blochredfield.py +++ b/qutip/core/blochredfield.py @@ -1,20 +1,49 @@ import os import inspect import numpy as np -from qutip.settings import settings as qset +from typing import overload +from qutip.settings import settings as qset from . import Qobj, QobjEvo, liouvillian, coefficient, sprepost from ._brtools import SpectraCoefficient, _EigenBasisTransform from .cy.coefficient import InterCoefficient, Coefficient from ._brtensor import _BlochRedfieldElement - +from ..typing import CoeffProtocol __all__ = ['bloch_redfield_tensor', 'brterm'] -def bloch_redfield_tensor(H, a_ops, c_ops=[], sec_cutoff=0.1, - fock_basis=False, sparse_eigensolver=False, - br_dtype='sparse'): +@overload +def bloch_redfield_tensor( + H: Qobj, + a_ops: list[tuple[Qobj, Coefficient | str | CoeffProtocol]], + c_ops: list[Qobj] = None, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> Qobj: ... + +@overload +def bloch_redfield_tensor( + H: Qobj | QobjEvo, + a_ops: list[tuple[Qobj | QobjEvo, Coefficient | str | CoeffProtocol]], + c_ops: list[Qobj | QobjEvo] = None, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> QobjEvo: ... + +def bloch_redfield_tensor( + H: Qobj | QobjEvo, + a_ops: list[tuple[Qobj | QobjEvo, Coefficient | str | CoeffProtocol]], + c_ops: list[Qobj | QobjEvo] = None, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> Qobj | QobjEvo: """ Calculates the Bloch-Redfield tensor for a system given a set of operators and corresponding spectral functions that describes the @@ -46,10 +75,10 @@ def bloch_redfield_tensor(H, a_ops, c_ops=[], sec_cutoff=0.1, .. code-block:: a_ops = [ - (a+a.dag(), ('w>0', args={"w": 0})), + (a+a.dag(), coefficient('w>0', args={"w": 0})), (QobjEvo(a+a.dag()), 'w > exp(-t)'), (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)), - (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))), + (c+c.dag(), SpectraCoefficient(coefficient(ws, tlist=ts))), ] @@ -101,9 +130,37 @@ def bloch_redfield_tensor(H, a_ops, c_ops=[], sec_cutoff=0.1, False, br_dtype=br_dtype)[0] return R, H_transform.as_Qobj() - -def brterm(H, a_op, spectra, sec_cutoff=0.1, - fock_basis=False, sparse_eigensolver=False, br_dtype='sparse'): +@overload +def brterm( + H: Qobj, + a_op: Qobj, + spectra: Coefficient | CoeffProtocol | str, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> Qobj: ... + +@overload +def brterm( + H: Qobj | QobjEvo, + a_op: Qobj | QobjEvo, + spectra: Coefficient | CoeffProtocol | str, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> QobjEvo: ... + +def brterm( + H: Qobj | QobjEvo, + a_op: Qobj | QobjEvo, + spectra: Coefficient | CoeffProtocol | str, + sec_cutoff: float = 0.1, + fock_basis: bool = False, + sparse_eigensolver: bool = False, + br_dtype: str = 'sparse', +) -> Qobj | QobjEvo: """ Calculates the contribution of one coupling operator to the Bloch-Redfield tensor. diff --git a/qutip/core/coefficient.py b/qutip/core/coefficient.py index cf30326685..3d5698de11 100644 --- a/qutip/core/coefficient.py +++ b/qutip/core/coefficient.py @@ -1,4 +1,8 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + import numpy as np +from numpy.typing import ArrayLike import scipy import scipy.interpolate import os @@ -11,8 +15,8 @@ import warnings import numbers from collections import defaultdict -from setuptools import setup, Extension try: + from setuptools import setup, Extension from Cython.Build import cythonize import filelock except ImportError: @@ -25,6 +29,7 @@ Coefficient, InterCoefficient, FunctionCoefficient, StrFunctionCoefficient, ConjCoefficient, NormCoefficient, ConstantCoefficient ) +from qutip.typing import CoefficientLike __all__ = ["coefficient", "CompilationOptions", "Coefficient", @@ -48,12 +53,22 @@ def _return(base, **kwargs): np.ndarray: InterCoefficient, scipy.interpolate.PPoly: InterCoefficient.from_PPoly, scipy.interpolate.BSpline: InterCoefficient.from_Bspline, + numbers.Number: ConstantCoefficient, } -def coefficient(base, *, tlist=None, args={}, args_ctypes={}, - order=3, compile_opt=None, function_style=None, - boundary_conditions=None, **kwargs): +def coefficient( + base: CoefficientLike, + *, + tlist: ArrayLike = None, + args: dict = {}, + args_ctypes: dict = {}, + order: int = 3, + compile_opt: dict = None, + function_style: str = None, + boundary_conditions: tuple | str = None, + **kwargs +): """Build ``Coefficient`` for time dependent systems: ``` @@ -209,11 +224,30 @@ def const(value): class CompilationOptions(QutipOptions): """ + Options that control compilation of string based coefficient to Cython. + + These options can be set globaly: + + ``settings.compile["compiler_flags"] = "-O1"`` + + In a ``with`` block: + + ``with CompilationOptions(use_cython=False):`` + + Or as an instance: + + ``coefficient(coeff, compile_opt=CompilationOptions(recompile=True))`` + + ******************** Compilation options: + ******************** use_cython: bool Whether to compile strings as cython code or use python's ``exec``. + recompile : bool + Do not use previously made files but build a new one. + try_parse: bool [True] Whether to try parsing the string for reuse and static typing. @@ -229,9 +263,6 @@ class CompilationOptions(QutipOptions): accept_float : bool Whether to use the type ``float`` or upgrade them to ``complex``. - recompile : bool - Do not use previously made files but build a new one. - compiler_flags : str Flags to pass to the compiler, ex: "-Wall -O3"... Flags not matching your comiler and OS may cause compilation to fail. @@ -267,6 +298,7 @@ class CompilationOptions(QutipOptions): try: import cython import filelock + import setuptools _use_cython = True except ImportError: _use_cython = False @@ -277,7 +309,7 @@ class CompilationOptions(QutipOptions): "try_parse": True, "static_types": True, "accept_int": None, - "accept_float": True, + "accept_float": None, "recompile": False, "compiler_flags": _compiler_flags, "link_flags": _link_flags, @@ -293,7 +325,7 @@ class CompilationOptions(QutipOptions): # Version number of the Coefficient -COEFF_VERSION = "1.1" +COEFF_VERSION = "1.2" try: root = os.path.join(qset.tmproot, f"qutip_coeffs_{COEFF_VERSION}") @@ -309,15 +341,17 @@ def clean_compiled_coefficient(all=False): Parameter: ---------- all: bool - If not `all` will remove only previous version. + If not `all`, it will remove only previous version. """ import glob import shutil tmproot = qset.tmproot - root = os.path.join(tmproot, f'qutip_coeffs_{COEFF_VERSION}') + active = qset.coeffroot folders = glob.glob(os.path.join(tmproot, 'qutip_coeffs_') + "*") + if all: + shutil.rmtree(active) for folder in folders: - if all or folder != root: + if folder != active: shutil.rmtree(folder) # Recreate the empty folder. qset.coeffroot = qset.coeffroot @@ -384,8 +418,8 @@ def coeff_from_str(base, args, args_ctypes, compile_opt=None, **_): if not compile_opt['use_cython']: if WARN_MISSING_MODULE[0]: warnings.warn( - "Both `cython` and `filelock` are required for compilation of " - "string coefficents. Falling back on `eval`.") + "`cython`, `setuptools` and `filelock` are required for " + "compilation of string coefficents. Falling back on `eval`.") # Only warns once. WARN_MISSING_MODULE[0] = 0 return StrFunctionCoefficient(base, args) @@ -711,8 +745,11 @@ def parse(code, args, compile_opt): accept_float = compile_opt['accept_float'] if accept_int is None: # If there is a subscript: a[b] int are always accepted to be safe - # with TypeError + # with TypeError. + # Also comparison is not supported for complex. accept_int = "SUBSCR" in dis.Bytecode(code).dis() + if accept_float is None: + accept_float = "COMPARE_OP" in dis.Bytecode(code).dis() for word in code.split(): if word not in names: # syntax diff --git a/qutip/core/cy/coefficient.pyx b/qutip/core/cy/coefficient.pyx index 42a84cfca6..119a0fae00 100644 --- a/qutip/core/cy/coefficient.pyx +++ b/qutip/core/cy/coefficient.pyx @@ -517,7 +517,7 @@ cdef class InterCoefficient(Coefficient): @classmethod def from_PPoly(cls, ppoly, **_): - return cls.restore(ppoly.x, ppoly.c) + return cls.restore(ppoly.x, np.asarray(ppoly.c, complex)) @classmethod def from_Bspline(cls, spline, **_): @@ -528,7 +528,7 @@ cdef class InterCoefficient(Coefficient): poly = np.concatenate([ spline(tlist, i) / fact[i] for i in range(spline.k, -1, -1) - ]).reshape((spline.k+1, -1)) + ]).reshape((spline.k+1, -1)).astype(complex, copy=False) return cls.restore(tlist, poly) cpdef Coefficient copy(self): @@ -745,7 +745,7 @@ cdef class ConstantCoefficient(Coefficient): """ cdef complex value - def __init__(self, complex value): + def __init__(self, complex value, **_): self.value = value def replace_arguments(self, _args=None, **kwargs): diff --git a/qutip/core/cy/qobjevo.pyi b/qutip/core/cy/qobjevo.pyi new file mode 100644 index 0000000000..a9efa87e78 --- /dev/null +++ b/qutip/core/cy/qobjevo.pyi @@ -0,0 +1,73 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + +from qutip.typing import LayerType, ElementType, QobjEvoLike +from qutip.core.qobj import Qobj +from qutip.core.data import Data +from qutip.core.coefficient import Coefficient +from numbers import Number +from numpy.typing import ArrayLike +from typing import Any, overload, Callable + + +class QobjEvo: + dims: list + isbra: bool + isconstant: bool + isket: bool + isoper: bool + isoperbra: bool + isoperket: bool + issuper: bool + num_elements: int + shape: tuple[int, int] + superrep: str + type: str + def __init__( + self, + Q_object: QobjEvoLike, + args: dict[str, Any] = None, + *, + copy: bool = True, + compress: bool = True, + function_style: str = None, + tlist: ArrayLike = None, + order: int = 3, + boundary_conditions: tuple | str = None, + ) -> None: ... + @overload + def arguments(self, new_args: dict[str, Any]) -> None: ... + @overload + def arguments(self, **new_args) -> None: ... + def compress(self) -> QobjEvo: ... + def tidyup(self, atol: Number) -> QobjEvo: ... + def copy(self) -> QobjEvo: ... + def conj(self) -> QobjEvo: ... + def dag(self) -> QobjEvo: ... + def trans(self) -> QobjEvo: ... + def to(self, data_type: LayerType) -> QobjEvo: ... + def linear_map(self, op_mapping: Callable[[Qobj], Qobj]) -> QobjEvo: ... + def expect(self, t: Number, state: Qobj, check_real: bool = True) -> Number: ... + def expect_data(self, t: Number, state: Data) -> Number: ... + def matmul(self, t: Number, state: Qobj) -> Qobj: ... + def matmul_data(self, t: Number, state: Data, out: Data = None) -> Data: ... + def to_list(self) -> list[ElementType]: ... + def __add__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __iadd__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __radd__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __sub__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __isub__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __rsub__(self, other: QobjEvo | Qobj | Number) -> QobjEvo: ... + def __and__(self, other: Qobj | QobjEvo) -> QobjEvo: ... + def __rand__(self, other: Qobj | QobjEvo) -> QobjEvo: ... + def __call__(self, t: float, **new_args) -> Qobj: ... + def __matmul__(self, other: Qobj | QobjEvo) -> QobjEvo: ... + def __imatmul__(self, other: Qobj | QobjEvo) -> QobjEvo: ... + def __rmatmul__(self, other: Qobj | QobjEvo) -> QobjEvo: ... + def __mul__(self, other: Number | Coefficient) -> QobjEvo: ... + def __imul__(self, other: Number | Coefficient) -> QobjEvo: ... + def __rmul__(self, other: Number | Coefficient) -> QobjEvo: ... + def __truediv__(self, other : Number) -> QobjEvo: ... + def __idiv__(self, other : Number) -> QobjEvo: ... + def __neg__(self) -> QobjEvo: ... + def __reduce__(self): ... diff --git a/qutip/core/cy/qobjevo.pyx b/qutip/core/cy/qobjevo.pyx index d4037b72c1..a65a99ebad 100644 --- a/qutip/core/cy/qobjevo.pyx +++ b/qutip/core/cy/qobjevo.pyx @@ -482,7 +482,13 @@ cdef class QobjEvo: f"Desired feedback {key} is not available for the {solver}." ) new_args[key] = solvers_feeds[feed] - self.arguments(**new_args) + + if new_args: + cache = [] + self.elements = [ + element.replace_arguments(new_args, cache=cache) + for element in self.elements + ] def _update_feedback(QobjEvo self, QobjEvo other=None): """ diff --git a/qutip/core/data/csr.pyx b/qutip/core/data/csr.pyx index 5bbce551cc..5e017f8b5f 100644 --- a/qutip/core/data/csr.pyx +++ b/qutip/core/data/csr.pyx @@ -12,16 +12,23 @@ from cpython cimport mem import numbers import warnings - +import builtins import numpy as np cimport numpy as cnp import scipy.sparse from scipy.sparse import csr_matrix as scipy_csr_matrix -try: - from scipy.sparse.data import _data_matrix as scipy_data_matrix -except ImportError: +from functools import partial +from packaging.version import parse as parse_version +if parse_version(scipy.version.version) >= parse_version("1.14.0"): + from scipy.sparse._data import _data_matrix as scipy_data_matrix + # From scipy 1.14.0, a check that the input is not scalar was added for + # sparse arrays. + scipy_data_matrix = partial(scipy_data_matrix, arg1=(0,)) +elif parse_version(scipy.version.version) >= parse_version("1.8.0"): # The file data was renamed to _data from scipy 1.8.0 from scipy.sparse._data import _data_matrix as scipy_data_matrix +else: + from scipy.sparse.data import _data_matrix as scipy_data_matrix from scipy.linalg cimport cython_blas as blas from qutip.core.data cimport base, Dense, Dia @@ -78,7 +85,7 @@ cdef class CSR(base.Data): # single flag that is set as soon as the pointers are assigned. self._deallocate = True - def __init__(self, arg=None, shape=None, bint copy=True, bint tidyup=False): + def __init__(self, arg=None, shape=None, copy=True, bint tidyup=False): # This is the Python __init__ method, so we do not care that it is not # super-fast C access. Typically Cython code will not call this, but # will use a factory method in this module or at worst, call @@ -100,6 +107,9 @@ cdef class CSR(base.Data): raise TypeError("arg must be a scipy matrix or tuple") if len(arg) != 3: raise ValueError("arg must be a (data, col_index, row_index) tuple") + if np.lib.NumpyVersion(np.__version__) < '2.0.0b1': + # np2 accept None which act as np1's False + copy = builtins.bool(copy) data = np.array(arg[0], dtype=np.complex128, copy=copy, order='C') col_index = np.array(arg[1], dtype=idxint_dtype, copy=copy, order='C') row_index = np.array(arg[2], dtype=idxint_dtype, copy=copy, order='C') diff --git a/qutip/core/data/dense.pyx b/qutip/core/data/dense.pyx index 0db9034fa0..22c4360aa1 100644 --- a/qutip/core/data/dense.pyx +++ b/qutip/core/data/dense.pyx @@ -5,7 +5,7 @@ from libc.string cimport memcpy cimport cython import numbers - +import builtins import numpy as np cimport numpy as cnp from scipy.linalg cimport cython_blas as blas @@ -40,6 +40,9 @@ class OrderEfficiencyWarning(EfficiencyWarning): cdef class Dense(base.Data): def __init__(self, data, shape=None, copy=True): + if np.lib.NumpyVersion(np.__version__) < '2.0.0b1': + # np2 accept None which act as np1's False + copy = builtins.bool(copy) base = np.array(data, dtype=np.complex128, order='K', copy=copy) # Ensure that the array is contiguous. # Non contiguous array with copy=False would otherwise slip through @@ -135,8 +138,8 @@ cdef class Dense(base.Data): cdef void _fix_flags(self, object array, bint make_owner=False): cdef int enable = cnp.NPY_ARRAY_OWNDATA if make_owner else 0 cdef int disable = 0 - cdef cnp.Py_intptr_t *dims = cnp.PyArray_DIMS(array) - cdef cnp.Py_intptr_t *strides = cnp.PyArray_STRIDES(array) + cdef cnp.npy_intp *dims = cnp.PyArray_DIMS(array) + cdef cnp.npy_intp *strides = cnp.PyArray_STRIDES(array) # Not necessary when creating a new array because this will already # have been done, but needed for as_ndarray() if we have been mutated. dims[0] = self.shape[0] diff --git a/qutip/core/data/dia.pyx b/qutip/core/data/dia.pyx index 9414298402..e7b2f0c6d6 100644 --- a/qutip/core/data/dia.pyx +++ b/qutip/core/data/dia.pyx @@ -12,16 +12,23 @@ from cpython cimport mem import numbers import warnings - +import builtins import numpy as np cimport numpy as cnp import scipy.sparse from scipy.sparse import dia_matrix as scipy_dia_matrix -try: - from scipy.sparse.data import _data_matrix as scipy_data_matrix -except ImportError: +from packaging.version import parse as parse_version +from functools import partial +if parse_version(scipy.version.version) >= parse_version("1.14.0"): + from scipy.sparse._data import _data_matrix as scipy_data_matrix + # From scipy 1.14.0, a check that the input is not scalar was added for + # sparse arrays. + scipy_data_matrix = partial(scipy_data_matrix, arg1=(0,)) +elif parse_version(scipy.version.version) >= parse_version("1.8.0"): # The file data was renamed to _data from scipy 1.8.0 from scipy.sparse._data import _data_matrix as scipy_data_matrix +else: + from scipy.sparse.data import _data_matrix as scipy_data_matrix from scipy.linalg cimport cython_blas as blas from qutip.core.data cimport base, Dense, CSR @@ -69,7 +76,7 @@ cdef class Dia(base.Data): def __cinit__(self, *args, **kwargs): self._deallocate = True - def __init__(self, arg=None, shape=None, bint copy=True, bint tidyup=False): + def __init__(self, arg=None, shape=None, copy=True, bint tidyup=False): cdef size_t ptr cdef base.idxint col cdef object data, offsets @@ -81,12 +88,14 @@ cdef class Dia(base.Data): "shapes do not match: ", str(shape), " and ", str(arg.shape), ])) shape = arg.shape - # arg = (arg.data, arg.offsets) if not isinstance(arg, tuple): raise TypeError("arg must be a scipy matrix or tuple") if len(arg) != 2: raise ValueError("arg must be a (data, offsets) tuple") + if np.lib.NumpyVersion(np.__version__) < '2.0.0b1': + # np2 accept None which act as np1's False + copy = builtins.bool(copy) data = np.array(arg[0], dtype=np.complex128, copy=copy, order='C') offsets = np.array(arg[1], dtype=idxint_dtype, copy=copy, order='C') diff --git a/qutip/core/data/expm.py b/qutip/core/data/expm.py index 22e69ba5f2..fb0422326e 100644 --- a/qutip/core/data/expm.py +++ b/qutip/core/data/expm.py @@ -12,7 +12,7 @@ __all__ = [ 'expm', 'expm_csr', 'expm_csr_dense', 'expm_dense', 'expm_dia', - 'logm', 'logm_dense', + 'logm', 'logm_dense', 'sqrtm', 'sqrtm_dense' ] @@ -129,4 +129,25 @@ def logm_dense(matrix: Dense) -> Dense: (Dense, Dense, logm_dense), ], _defer=True) + +def sqrtm_dense(matrix) -> Dense: + if matrix.shape[0] != matrix.shape[1]: + raise ValueError("can only compute logarithm square matrix") + return Dense(scipy.linalg.sqrtm(matrix.as_ndarray())) + + +sqrtm = _Dispatcher( + _inspect.Signature([ + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), + ]), + name='sqrtm', + module=__name__, + inputs=('matrix',), + out=True, +) +sqrtm.__doc__ = """Matrix square root `sqrt(A)` for a matrix `A`.""" +sqrtm.add_specialisations([ + (Dense, Dense, sqrtm_dense), +], _defer=True) + del _inspect, _Dispatcher diff --git a/qutip/core/data/make.py b/qutip/core/data/make.py index 39c5247169..c7bfa8f681 100644 --- a/qutip/core/data/make.py +++ b/qutip/core/data/make.py @@ -119,7 +119,7 @@ def one_element_dia(shape, position, value=1.0): data = np.zeros((1, shape[1]), dtype=complex) data[0, position[1]] = value offsets = np.array([position[1]-position[0]]) - return Dia((data, offsets), copy=False, shape=shape) + return Dia((data, offsets), copy=None, shape=shape) one_element = _Dispatcher(one_element_dense, name='one_element', diff --git a/qutip/core/data/matmul.pyx b/qutip/core/data/matmul.pyx index 5f092b0cb7..efaa1047a6 100644 --- a/qutip/core/data/matmul.pyx +++ b/qutip/core/data/matmul.pyx @@ -527,6 +527,10 @@ cpdef CSR multiply_csr(CSR left, CSR right): + " and " + str(right.shape) ) + + left = left.sort_indices() + right = right.sort_indices() + cdef idxint col_left, left_nnz = csr.nnz(left) cdef idxint col_right, right_nnz = csr.nnz(right) cdef idxint ptr_left, ptr_right, ptr_left_max, ptr_right_max diff --git a/qutip/core/data/properties.pyx b/qutip/core/data/properties.pyx index 1a6e25d35b..2ab25ca543 100644 --- a/qutip/core/data/properties.pyx +++ b/qutip/core/data/properties.pyx @@ -7,8 +7,9 @@ from cpython cimport mem from qutip.settings import settings from qutip.core.data.base cimport idxint -from qutip.core.data cimport csr, dense, CSR, Dense, Dia +from qutip.core.data cimport csr, dense, dia, CSR, Dense, Dia from qutip.core.data.adjoint cimport transpose_csr +import numpy as np cdef extern from *: # Not defined in cpython.mem for some reason, but is in pymem.h. @@ -18,6 +19,7 @@ __all__ = [ 'isherm', 'isherm_csr', 'isherm_dense', 'isherm_dia', 'isdiag', 'isdiag_csr', 'isdiag_dense', 'isdiag_dia', 'iszero', 'iszero_csr', 'iszero_dense', 'iszero_dia', + 'isequal', 'isequal_csr', 'isequal_dense', 'isequal_dia', ] cdef inline bint _conj_feq(double complex a, double complex b, double tol) nogil: @@ -36,6 +38,30 @@ cdef inline bint _feq_zero(double complex a, double tol) nogil: cdef inline double _abssq(double complex x) nogil: return x.real*x.real + x.imag*x.imag +cdef inline bint _feq(double complex a, double complex b, double atol, double rtol) nogil: + """ + Follow numpy.allclose tolerance equation: + |a - b| <= (atol + rtol * |b|) + Avoid slow sqrt. + """ + cdef double diff = (a.real - b.real)**2 + (a.imag - b.imag)**2 - atol * atol + if diff <= 0: + # Early exit if under atol. + # |a - b|**2 <= atol**2 + return True + cdef double normb_sq = b.real * b.real + b.imag * b.imag + if normb_sq == 0. or rtol == 0.: + # No rtol term, the previous computation was final. + return False + diff -= rtol * rtol * normb_sq + if diff <= 0: + # Early exit if under atol + rtol without cross term. + # |a - b|**2 <= atol**2 + (rtol * |b|)**2 + return True + # Full computation + # (|a - b|**2 - atol**2 * (rtol * |b|)**2)**2 <= (2* atol * rtol * |b|)**2 + return diff**2 <= 4 * atol * atol * rtol * rtol * normb_sq + cdef bint _isherm_csr_full(CSR matrix, double tol) except 2: """ @@ -300,6 +326,116 @@ cpdef bint iszero_dense(Dense matrix, double tol=-1) nogil: return True +cpdef bint isequal_dia(Dia A, Dia B, double atol=-1, double rtol=-1): + if A.shape[0] != B.shape[0] or A.shape[1] != B.shape[1]: + return False + if atol < 0: + atol = settings.core["atol"] + if rtol < 0: + rtol = settings.core["rtol"] + + cdef idxint diag_a=0, diag_b=0 + cdef double complex *ptr_a + cdef double complex *ptr_b + cdef idxint size=A.shape[1] + + # TODO: + # Works only for a sorted offsets list. + # We don't have a check for whether it's already sorted, but it should be + # in most cases. Could be improved by tracking whether it is or not. + A = dia.clean_dia(A) + B = dia.clean_dia(B) + + ptr_a = A.data + ptr_b = B.data + + with nogil: + while diag_a < A.num_diag and diag_b < B.num_diag: + if A.offsets[diag_a] == B.offsets[diag_b]: + for i in range(size): + if not _feq(ptr_a[i], ptr_b[i], atol, rtol): + return False + ptr_a += size + diag_a += 1 + ptr_b += size + diag_b += 1 + elif A.offsets[diag_a] <= B.offsets[diag_b]: + for i in range(size): + if not _feq(ptr_a[i], 0., atol, rtol): + return False + ptr_a += size + diag_a += 1 + else: + for i in range(size): + if not _feq(0., ptr_b[i], atol, rtol): + return False + ptr_b += size + diag_b += 1 + return True + + +cpdef bint isequal_dense(Dense A, Dense B, double atol=-1, double rtol=-1): + if A.shape[0] != B.shape[0] or A.shape[1] != B.shape[1]: + return False + if atol < 0: + atol = settings.core["atol"] + if rtol < 0: + rtol = settings.core["rtol"] + return np.allclose(A.as_ndarray(), B.as_ndarray(), rtol, atol) + + +cpdef bint isequal_csr(CSR A, CSR B, double atol=-1, double rtol=-1): + if A.shape[0] != B.shape[0] or A.shape[1] != B.shape[1]: + return False + if atol < 0: + atol = settings.core["atol"] + if rtol < 0: + rtol = settings.core["rtol"] + + cdef idxint row, ptr_a, ptr_b, ptr_a_max, ptr_b_max, col_a, col_b + cdef idxint ncols = A.shape[1], prev_col_a, prev_col_b + + # TODO: + # Works only for sorted indices. + # We don't have a check for whether it's already sorted, but it should be + # in most cases. + A = A.sort_indices() + B = B.sort_indices() + + with nogil: + ptr_a_max = ptr_b_max = 0 + for row in range(A.shape[0]): + ptr_a = ptr_a_max + ptr_a_max = A.row_index[row + 1] + ptr_b = ptr_b_max + ptr_b_max = B.row_index[row + 1] + col_a = A.col_index[ptr_a] if ptr_a < ptr_a_max else ncols + 1 + col_b = B.col_index[ptr_b] if ptr_b < ptr_b_max else ncols + 1 + prev_col_a = -1 + prev_col_b = -1 + while ptr_a < ptr_a_max or ptr_b < ptr_b_max: + + if col_a == col_b: + if not _feq(A.data[ptr_a], B.data[ptr_b], atol, rtol): + return False + ptr_a += 1 + ptr_b += 1 + col_a = A.col_index[ptr_a] if ptr_a < ptr_a_max else ncols + 1 + col_b = B.col_index[ptr_b] if ptr_b < ptr_b_max else ncols + 1 + elif col_a < col_b: + if not _feq(A.data[ptr_a], 0., atol, rtol): + return False + ptr_a += 1 + col_a = A.col_index[ptr_a] if ptr_a < ptr_a_max else ncols + 1 + else: + if not _feq(0., B.data[ptr_b], atol, rtol): + return False + ptr_b += 1 + col_b = B.col_index[ptr_b] if ptr_b < ptr_b_max else ncols + 1 + + return True + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect @@ -397,4 +533,48 @@ iszero.add_specialisations([ (Dense, iszero_dense), ], _defer=True) +isequal = _Dispatcher( + _inspect.Signature([ + _inspect.Parameter('A', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('B', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('atol', _inspect.Parameter.POSITIONAL_OR_KEYWORD, + default=-1), + _inspect.Parameter('rtol', _inspect.Parameter.POSITIONAL_OR_KEYWORD, + default=-1), + ]), + name='isequal', + module=__name__, + inputs=('A', 'B',), + out=False, +) +isequal.__doc__ =\ + """ + Test if two matrices are equal up to absolute and relative tolerance: + + |A - B| <= atol + rtol * |b| + + Similar to ``numpy.allclose``. + + Parameters + ---------- + A, B : Data + Matrices to compare. + atol : real, optional + The absolute tolerance to use. If not given, or + less than 0, use the core setting `atol`. + rtol : real, optional + The relative tolerance to use. If not given, or + less than 0, use the core setting `atol`. + + Returns + ------- + bool + Whether the matrix are equal. + """ +isequal.add_specialisations([ + (CSR, CSR, isequal_csr), + (Dia, Dia, isequal_dia), + (Dense, Dense, isequal_dense), +], _defer=True) + del _inspect, _Dispatcher diff --git a/qutip/core/data/reshape.pyx b/qutip/core/data/reshape.pyx index 4b9683ac60..daec008272 100644 --- a/qutip/core/data/reshape.pyx +++ b/qutip/core/data/reshape.pyx @@ -2,7 +2,7 @@ #cython: boundscheck=False, wraparound=False, initializedcheck=False, cdivision=True from libc.string cimport memcpy, memset - +from scipy.linalg cimport cython_blas as blas cimport cython import warnings @@ -52,7 +52,7 @@ cpdef CSR reshape_csr(CSR matrix, idxint n_rows_out, idxint n_cols_out): return out -cdef inline idxint _reshape_dense_reindex(idxint idx, idxint size): +cdef inline size_t _reshape_dense_reindex(size_t idx, size_t size): return (idx // size) + (idx % size) @@ -66,8 +66,9 @@ cpdef Dense reshape_dense(Dense matrix, idxint n_rows_out, idxint n_cols_out): out = dense.zeros(n_rows_out, n_cols_out) cdef size_t idx_in=0, idx_out=0 cdef size_t size = n_rows_out * n_cols_out + cdef size_t tmp = ( matrix.shape[1]) * ( n_rows_out) # TODO: improve the algorithm here. - cdef size_t stride = _reshape_dense_reindex(matrix.shape[1]*n_rows_out, size) + cdef size_t stride = _reshape_dense_reindex(tmp, size) for idx_in in range(size): out.data[idx_out] = matrix.data[idx_in] idx_out = _reshape_dense_reindex(idx_out + stride, size) @@ -99,7 +100,16 @@ cpdef Dense column_stack_dense(Dense matrix, bint inplace=False): return out if inplace: warnings.warn("cannot stack columns inplace for C-ordered matrix") - return reshape_dense(matrix.transpose(), matrix.shape[0]*matrix.shape[1], 1) + out = dense.zeros(matrix.shape[0] * matrix.shape[1], 1) + cdef idxint col + cdef int ONE=1 + for col in range(matrix.shape[1]): + blas.zcopy( + &matrix.shape[0], + &matrix.data[col], &matrix.shape[1], + &out.data[col * matrix.shape[0]], &ONE + ) + return out cpdef Dia column_stack_dia(Dia matrix): diff --git a/qutip/core/dimensions.py b/qutip/core/dimensions.py index f19e3b2dee..e1834e9f2b 100644 --- a/qutip/core/dimensions.py +++ b/qutip/core/dimensions.py @@ -8,7 +8,9 @@ import numbers from operator import getitem from functools import partial +from typing import Literal from qutip.settings import settings +from qutip.typing import SpaceLike, DimensionLike __all__ = ["to_tensor_rep", "from_tensor_rep", "Space", "Dimensions"] @@ -351,13 +353,13 @@ def _frozen(*args, **kwargs): class MetaSpace(type): - def __call__(cls, *args, rep=None): + def __call__(cls, *args: SpaceLike, rep: str = None) -> "Space": """ Select which subclass is instantiated. """ if cls is Space and len(args) == 1 and isinstance(args[0], list): # From a list of int. - return cls.from_list(*args, rep=rep) + return cls.from_list(args[0], rep=rep) elif len(args) == 1 and isinstance(args[0], Space): # Already a Space return args[0] @@ -399,7 +401,11 @@ def __call__(cls, *args, rep=None): cls._stored_dims[args] = instance return cls._stored_dims[args] - def from_list(cls, list_dims, rep=None): + def from_list( + cls, + list_dims: list[int] | list[list[int]], + rep: str = None + ) -> "Space": if len(list_dims) == 0: raise ValueError("Empty list can't be used as dims.") elif ( @@ -449,7 +455,7 @@ def __init__(self, dims): self._pure_dims = True self.__setitem__ = _frozen - def __eq__(self, other): + def __eq__(self, other) -> bool: return self is other or ( type(other) is type(self) and other.size == self.size @@ -458,16 +464,16 @@ def __eq__(self, other): def __hash__(self): return hash(self.size) - def __repr__(self): + def __repr__(self) -> str: return f"Space({self.size})" - def as_list(self): + def as_list(self) -> list[int]: return [self.size] - def __str__(self): + def __str__(self) -> str: return str(self.as_list()) - def dims2idx(self, dims): + def dims2idx(self, dims: list[int]) -> int: """ Transform dimensions indices to full array indices. """ @@ -479,7 +485,7 @@ def dims2idx(self, dims): raise TypeError("Dimensions must be integers") return dims[0] - def idx2dims(self, idx): + def idx2dims(self, idx: int) -> list[int]: """ Transform full array indices to dimensions indices. """ @@ -487,7 +493,7 @@ def idx2dims(self, idx): raise IndexError("Index out of range") return [idx] - def step(self): + def step(self) -> list[int]: """ Get the step in the array between for each dimensions index. @@ -496,11 +502,11 @@ def step(self): """ return [1] - def flat(self): + def flat(self) -> list[int]: """ Dimensions as a flat list. """ return [self.size] - def remove(self, idx): + def remove(self, idx: int): """ Remove a Space from a Dimensons or complex Space. @@ -508,7 +514,7 @@ def remove(self, idx): """ raise RuntimeError("Cannot delete a flat space.") - def replace(self, idx, new): + def replace(self, idx: int, new: int) -> "Space": """ Reshape a Space from a Dimensons or complex Space. @@ -520,10 +526,10 @@ def replace(self, idx, new): ) return Space(new) - def replace_superrep(self, super_rep): + def replace_superrep(self, super_rep: str) -> "Space": return self - def scalar_like(self): + def scalar_like(self) -> "Space": return Field() @@ -537,28 +543,28 @@ def __init__(self): self._pure_dims = True self.__setitem__ = _frozen - def __eq__(self, other): + def __eq__(self, other) -> bool: return type(other) is Field def __hash__(self): return hash(0) - def __repr__(self): + def __repr__(self) -> str: return "Field()" - def as_list(self): + def as_list(self) -> list[int]: return [1] - def step(self): + def step(self) -> list[int]: return [1] - def flat(self): + def flat(self) -> list[int]: return [1] - def remove(self, idx): + def remove(self, idx: int) -> Space: return self - def replace(self, idx, new): + def replace(self, idx: int, new: int) -> Space: return Space(new) @@ -569,16 +575,16 @@ def replace(self, idx, new): class Compound(Space): _stored_dims = {} - def __init__(self, *spaces): - self.spaces = [] + def __init__(self, *spaces: Space): + spaces_ = [] if len(spaces) <= 1: raise ValueError("Compound need multiple space to join.") for space in spaces: if isinstance(space, Compound): - self.spaces += space.spaces + spaces_ += space.spaces else: - self.spaces += [space] - self.spaces = tuple(self.spaces) + spaces_ += [space] + self.spaces = tuple(spaces_) self.size = np.prod([space.size for space in self.spaces]) self.issuper = all(space.issuper for space in self.spaces) if not self.issuper and any(space.issuper for space in self.spaces): @@ -596,7 +602,7 @@ def __init__(self, *spaces): ) self.__setitem__ = _frozen - def __eq__(self, other): + def __eq__(self, other) -> bool: return self is other or ( type(other) is type(self) and self.spaces == other.spaces @@ -605,14 +611,14 @@ def __eq__(self, other): def __hash__(self): return hash(self.spaces) - def __repr__(self): + def __repr__(self) -> str: parts_rep = ", ".join(repr(space) for space in self.spaces) return f"Compound({parts_rep})" - def as_list(self): + def as_list(self) -> list[int]: return sum([space.as_list() for space in self.spaces], []) - def dims2idx(self, dims): + def dims2idx(self, dims: list[int]) -> int: if len(dims) != len(self.spaces): raise ValueError("Length of supplied dims does not match the number of subspaces.") pos = 0 @@ -622,14 +628,14 @@ def dims2idx(self, dims): step *= space.size return pos - def idx2dims(self, idx): + def idx2dims(self, idx: int) -> list[int]: dims = [] for space in self.spaces[::-1]: idx, dim = divmod(idx, space.size) dims = space.idx2dims(dim) + dims return dims - def step(self): + def step(self) -> list[int]: steps = [] step = 1 for space in self.spaces[::-1]: @@ -637,10 +643,10 @@ def step(self): step *= space.size return steps - def flat(self): + def flat(self) -> list[int]: return sum([space.flat() for space in self.spaces], []) - def remove(self, idx): + def remove(self, idx: int) -> Space: new_spaces = [] for space in self.spaces: n_indices = len(space.flat()) @@ -651,7 +657,7 @@ def remove(self, idx): return Compound(*new_spaces) return Field() - def replace(self, idx, new): + def replace(self, idx: int, new: int) -> Space: new_spaces = [] for space in self.spaces: n_indices = len(space.flat()) @@ -662,19 +668,19 @@ def replace(self, idx, new): idx -= n_indices return Compound(*new_spaces) - def replace_superrep(self, super_rep): + def replace_superrep(self, super_rep: str) -> Space: return Compound( *[space.replace_superrep(super_rep) for space in self.spaces] ) - def scalar_like(self): - return [space.scalar_like() for space in self.spaces] + def scalar_like(self) -> Space: + return Space([space.scalar_like() for space in self.spaces]) class SuperSpace(Space): _stored_dims = {} - def __init__(self, oper, rep='super'): + def __init__(self, oper: "Dimensions", rep: str = 'super'): self.oper = oper self.superrep = rep self.size = oper.shape[0] * oper.shape[1] @@ -682,7 +688,7 @@ def __init__(self, oper, rep='super'): self._pure_dims = oper._pure_dims self.__setitem__ = _frozen - def __eq__(self, other): + def __eq__(self, other) -> bool: return ( self is other or self.oper == other @@ -696,47 +702,47 @@ def __eq__(self, other): def __hash__(self): return hash((self.oper, self.superrep)) - def __repr__(self): + def __repr__(self) -> str: return f"Super({repr(self.oper)}, rep={self.superrep})" - def as_list(self): + def as_list(self) -> list[list[int]]: return self.oper.as_list() - def dims2idx(self, dims): + def dims2idx(self, dims: list[int]) -> int: posl, posr = self.oper.dims2idx(dims) return posl + posr * self.oper.shape[0] - def idx2dims(self, idx): + def idx2dims(self, idx: int) -> list[int]: posl = idx % self.oper.shape[0] posr = idx // self.oper.shape[0] return self.oper.idx2dims(posl, posr) - def step(self): + def step(self) -> list[int]: stepl, stepr = self.oper.step() step = self.oper.shape[0] return stepl + [step * N for N in stepr] - def flat(self): + def flat(self) -> list[int]: return sum(self.oper.flat(), []) - def remove(self, idx): + def remove(self, idx: int) -> Space: new_dims = self.oper.remove(idx) if new_dims.type == 'scalar': return Field() return SuperSpace(new_dims, rep=self.superrep) - def replace(self, idx, new): + def replace(self, idx: int, new: int) -> Space: return SuperSpace(self.oper.replace(idx, new), rep=self.superrep) - def replace_superrep(self, super_rep): + def replace_superrep(self, super_rep: str) -> Space: return SuperSpace(self.oper, rep=super_rep) - def scalar_like(self): - return self.oper.scalar_like() + def scalar_like(self) -> Space: + return SuperSpace(self.oper.scalar_like(), rep=self.superrep) class MetaDims(type): - def __call__(cls, *args, rep=None): + def __call__(cls, *args: DimensionLike, rep: str = None) -> "Dimensions": if len(args) == 1 and isinstance(args[0], Dimensions): return args[0] elif len(args) == 1 and len(args[0]) == 2: @@ -761,9 +767,9 @@ def __call__(cls, *args, rep=None): class Dimensions(metaclass=MetaDims): _stored_dims = {} - _type = None + _type: str = None - def __init__(self, from_, to_): + def __init__(self, from_: Space, to_: Space): self.from_ = from_ self.to_ = to_ self.shape = to_.size, from_.size @@ -801,7 +807,7 @@ def __init__(self, from_, to_): self.superrep = 'mixed' self.__setitem__ = _frozen - def __eq__(self, other): + def __eq__(self, other: "Dimensions") -> bool: if isinstance(other, Dimensions): return ( self is other @@ -812,7 +818,7 @@ def __eq__(self, other): ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: "Dimensions") -> bool: if isinstance(other, Dimensions): return not ( self is other @@ -823,7 +829,7 @@ def __ne__(self, other): ) return NotImplemented - def __matmul__(self, other): + def __matmul__(self, other: "Dimensions") -> "Dimensions": if self.from_ != other.to_: raise TypeError(f"incompatible dimensions {self} and {other}") args = other.from_, self.to_ @@ -834,19 +840,19 @@ def __matmul__(self, other): def __hash__(self): return hash((self.to_, self.from_)) - def __repr__(self): + def __repr__(self) -> str: return f"Dimensions({repr(self.from_)}, {repr(self.to_)})" - def __str__(self): + def __str__(self) -> str: return str(self.as_list()) - def as_list(self): + def as_list(self) -> list: """ Return the list representation of the Dimensions object. """ return [self.to_.as_list(), self.from_.as_list()] - def __getitem__(self, key): + def __getitem__(self, key: Literal[0, 1]) -> Space: if key == 0: return self.to_ elif key == 1: @@ -865,7 +871,7 @@ def idx2dims(self, idxl, idxr): """ return [self.to_.idx2dims(idxl), self.from_.idx2dims(idxr)] - def step(self): + def step(self) -> list[list[int]]: """ Get the step in the array between for each dimensions index. @@ -874,7 +880,7 @@ def step(self): """ return [self.to_.step(), self.from_.step()] - def flat(self): + def flat(self) -> list[list[int]]: """ Dimensions as a flat list. """ return [self.to_.flat(), self.from_.flat()] @@ -907,7 +913,7 @@ def _get_tensor_perm(self): np.argsort(stepr)[::-1] + len(stepl) ])) - def remove(self, idx): + def remove(self, idx: int | list[int]) -> "Dimensions": """ Remove a Space from a Dimensons or complex Space. @@ -926,7 +932,7 @@ def remove(self, idx): self.to_.remove(idx_to), ) - def replace(self, idx, new): + def replace(self, idx: int, new: int) -> "Dimensions": """ Reshape a Space from a Dimensons or complex Space. @@ -942,7 +948,7 @@ def replace(self, idx, new): return Dimensions(new_from, new_to) - def replace_superrep(self, super_rep): + def replace_superrep(self, super_rep: str) -> "Dimensions": if not self.issuper and super_rep is not None: raise TypeError("Can't set a superrep of a non super object.") return Dimensions( @@ -950,5 +956,5 @@ def replace_superrep(self, super_rep): self.to_.replace_superrep(super_rep) ) - def scalar_like(self): - return [self.to_.scalar_like(), self.from_.scalar_like()] + def scalar_like(self) -> "Dimensions": + return Dimensions([self.to_.scalar_like(), self.from_.scalar_like()]) diff --git a/qutip/core/energy_restricted.py b/qutip/core/energy_restricted.py index f790b09140..1a12f93b3e 100644 --- a/qutip/core/energy_restricted.py +++ b/qutip/core/energy_restricted.py @@ -247,7 +247,10 @@ def enr_destroy(dims, excitations, *, dtype=None): n2 = state2idx[state2] a_ops[idx][n2, n1] = np.sqrt(s) - return [Qobj(a, dims=enr_dims).to(dtype) for a in a_ops] + return [ + Qobj(a, dims=enr_dims, isunitary=False, isherm=False).to(dtype) + for a in a_ops + ] def enr_identity(dims, excitations, *, dtype=None): diff --git a/qutip/core/expect.py b/qutip/core/expect.py index bb72fccd95..647aba3c69 100644 --- a/qutip/core/expect.py +++ b/qutip/core/expect.py @@ -1,11 +1,34 @@ __all__ = ['expect', 'variance'] import numpy as np +from typing import overload, Sequence from .qobj import Qobj from . import data as _data +from ..settings import settings +@overload +def expect(oper: Qobj, state: Qobj) -> complex: ... + +@overload +def expect( + oper: Qobj, + state: Qobj | Sequence[Qobj], +) -> np.typing.NDArray[complex]: ... + +@overload +def expect( + oper: Qobj | Sequence[Qobj], + state: Qobj, +) -> list[complex]: ... + +@overload +def expect( + oper: Qobj | Sequence[Qobj], + state: Qobj | Sequence[Qobj] +) -> list[np.typing.NDArray[complex]]: ... + def expect(oper, state): """ Calculate the expectation value for operator(s) and state(s). The @@ -15,17 +38,18 @@ def expect(oper, state): Parameters ---------- - oper : qobj/array-like + oper : qobj / list of Qobj A single or a `list` of operators for expectation value. - state : qobj/array-like + state : qobj / list of Qobj A single or a `list` of quantum states or density matrices. Returns ------- - expt : float/complex/array-like - Expectation value. ``real`` if ``oper`` is Hermitian, ``complex`` - otherwise. A (nested) array of expectaction values if ``state`` or + expt : float / complex / list / array + Expectation value(s). ``real`` if ``oper`` is Hermitian, ``complex`` + otherwise. If multiple ``oper`` are passed, a list of array. + A (nested) array of expectaction values if ``state`` or ``oper`` are arrays. Examples @@ -37,16 +61,10 @@ def expect(oper, state): if isinstance(state, Qobj) and isinstance(oper, Qobj): return _single_qobj_expect(oper, state) - elif isinstance(oper, (list, np.ndarray)): - if isinstance(state, Qobj): - dtype = np.complex128 - if all(op.isherm for op in oper) and (state.isket or state.isherm): - dtype = np.float64 - return np.array([_single_qobj_expect(op, state) for op in oper], - dtype=dtype) + elif isinstance(oper, Sequence): return [expect(op, state) for op in oper] - elif isinstance(state, (list, np.ndarray)): + elif isinstance(state, Sequence): dtype = np.complex128 if oper.isherm and all(op.isherm or op.isket for op in state): dtype = np.float64 @@ -71,11 +89,20 @@ def _single_qobj_expect(oper, state): # This ensures that expect can return something that is not a number such # as a `tensorflow.Tensor` in qutip-tensorflow. - return out.real if (oper.isherm - and (state.isket or state.isherm) - and hasattr(out, "real") - ) else out + if ( + settings.core["auto_real_casting"] + and oper.isherm + and (state.isket or state.isherm) + ): + out = out.real + return out + + +@overload +def variance(oper: Qobj, state: Qobj) -> complex: ... +@overload +def variance(oper: Qobj, state: list[Qobj]) -> np.typing.NDArray[complex]: ... def variance(oper, state): """ @@ -83,10 +110,10 @@ def variance(oper, state): Parameters ---------- - oper : qobj + oper : Qobj Operator for expectation value. - state : qobj/list + state : Qobj / list of Qobj A single or ``list`` of quantum states or density matrices.. Returns diff --git a/qutip/core/gates.py b/qutip/core/gates.py index ee97c819f6..05e15bff3e 100644 --- a/qutip/core/gates.py +++ b/qutip/core/gates.py @@ -5,6 +5,10 @@ import numpy as np import scipy.sparse as sp from . import Qobj, qeye, sigmax, fock_dm, qdiags, qeye_like +from .dimensions import Dimensions +from .. import settings +from . import data as _data +from ..typing import LayerType __all__ = [ @@ -35,10 +39,15 @@ "toffoli", "hadamard_transform", "qubit_clifford_group", + "globalphase", ] -def cy_gate(*, dtype="csr"): +_DIMS_2_QB = Dimensions([[2, 2], [2, 2]]) +_DIMS_3_QB = Dimensions([[2, 2, 2], [2, 2, 2]]) + + +def cy_gate(*, dtype: LayerType = None) -> Qobj: """Controlled Y gate. Parameters @@ -52,13 +61,16 @@ def cy_gate(*, dtype="csr"): result : :class:`.Qobj` Quantum object for operator describing the rotation. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=True, + isunitary=True, ).to(dtype) -def cz_gate(*, dtype="csr"): +def cz_gate(*, dtype: LayerType = None) -> Qobj: """Controlled Z gate. Parameters @@ -72,10 +84,11 @@ def cz_gate(*, dtype="csr"): result : :class:`.Qobj` Quantum object for operator describing the rotation. """ - return qdiags([1, 1, 1, -1], dims=[[2, 2], [2, 2]], dtype=dtype) + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return qdiags([1, 1, 1, -1], dims=_DIMS_2_QB, dtype=dtype) -def s_gate(*, dtype="csr"): +def s_gate(*, dtype: LayerType = None) -> Qobj: """Single-qubit rotation also called Phase gate or the Z90 gate. Parameters @@ -91,10 +104,11 @@ def s_gate(*, dtype="csr"): a 90 degree rotation around the z-axis. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags([1, 1j], dtype=dtype) -def cs_gate(*, dtype="csr"): +def cs_gate(*, dtype: LayerType = None) -> Qobj: """Controlled S gate. Parameters @@ -109,10 +123,11 @@ def cs_gate(*, dtype="csr"): Quantum object for operator describing the rotation. """ - return qdiags([1, 1, 1, 1j], dims=[[2, 2], [2, 2]], dtype=dtype) + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return qdiags([1, 1, 1, 1j], dims=_DIMS_2_QB, dtype=dtype) -def t_gate(*, dtype="csr"): +def t_gate(*, dtype: LayerType = None) -> Qobj: """Single-qubit rotation related to the S gate by the relationship S=T*T. Parameters @@ -127,10 +142,11 @@ def t_gate(*, dtype="csr"): Quantum object for operator describing a phase shift of pi/4. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags([1, np.exp(1j * np.pi / 4)], dtype=dtype) -def ct_gate(*, dtype="csr"): +def ct_gate(*, dtype: LayerType = None) -> Qobj: """Controlled T gate. Parameters @@ -145,18 +161,22 @@ def ct_gate(*, dtype="csr"): Quantum object for operator describing the rotation. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags( [1, 1, 1, np.exp(1j * np.pi / 4)], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, dtype=dtype, ) -def rx(phi, *, dtype="dense"): +def rx(phi: float, *, dtype: LayerType = None) -> Qobj: """Single-qubit rotation for operator sigmax with angle phi. Parameters ---------- + phi : float + Rotation angle + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -167,19 +187,25 @@ def rx(phi, *, dtype="dense"): Quantum object for operator describing the rotation. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return Qobj( [ [np.cos(phi / 2), -1j * np.sin(phi / 2)], [-1j * np.sin(phi / 2), np.cos(phi / 2)], - ] + ], + isherm=(phi % (2 * np.pi) <= settings.core["atol"]), + isunitary=True, ).to(dtype) -def ry(phi, *, dtype="dense"): +def ry(phi: float, *, dtype: LayerType = None) -> Qobj: """Single-qubit rotation for operator sigmay with angle phi. Parameters ---------- + phi : float + Rotation angle + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -190,19 +216,25 @@ def ry(phi, *, dtype="dense"): Quantum object for operator describing the rotation. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return Qobj( [ [np.cos(phi / 2), -np.sin(phi / 2)], [np.sin(phi / 2), np.cos(phi / 2)], - ] + ], + isherm=(phi % (2 * np.pi) <= settings.core["atol"]), + isunitary=True, ).to(dtype) -def rz(phi, *, dtype="csr"): +def rz(phi: float, *, dtype: LayerType = None) -> Qobj: """Single-qubit rotation for operator sigmaz with angle phi. Parameters ---------- + phi : float + Rotation angle + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -213,10 +245,11 @@ def rz(phi, *, dtype="csr"): Quantum object for operator describing the rotation. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags([np.exp(-1j * phi / 2), np.exp(1j * phi / 2)], dtype=dtype) -def sqrtnot(*, dtype="dense"): +def sqrtnot(*, dtype: LayerType = None) -> Qobj: """Single-qubit square root NOT gate. Parameters @@ -231,10 +264,15 @@ def sqrtnot(*, dtype="dense"): Quantum object for operator describing the square root NOT gate. """ - return Qobj([[0.5 + 0.5j, 0.5 - 0.5j], [0.5 - 0.5j, 0.5 + 0.5j]]).to(dtype) + dtype = dtype or settings.core["default_dtype"] or _data.Dense + return Qobj( + [[0.5 + 0.5j, 0.5 - 0.5j], [0.5 - 0.5j, 0.5 + 0.5j]], + isherm=False, + isunitary=True, + ).to(dtype) -def snot(*, dtype="dense"): +def snot(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the SNOT (Hadamard) gate. Parameters @@ -249,10 +287,15 @@ def snot(*, dtype="dense"): Quantum object representation of SNOT gate. """ - return Qobj([[1, 1], [1, -1]]).to(dtype) / np.sqrt(2.0) + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return Qobj( + [[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), -np.sqrt(0.5)]], + isherm=True, + isunitary=True, + ).to(dtype) -def phasegate(theta, *, dtype="csr"): +def phasegate(theta: float, *, dtype: LayerType = None) -> Qobj: """ Returns quantum object representing the phase shift gate. @@ -270,10 +313,11 @@ def phasegate(theta, *, dtype="csr"): Quantum object representation of phase shift gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags([1, np.exp(1.0j * theta)], dtype=dtype) -def qrot(theta, phi, *, dtype="dense"): +def qrot(theta: float, phi: float, *, dtype: LayerType = None) -> Qobj: """ Single qubit rotation driving by Rabi oscillation with 0 detune. @@ -293,11 +337,14 @@ def qrot(theta, phi, *, dtype="dense"): Quantum object representation of physical qubit rotation under a rabi pulse. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return Qobj( [ [np.cos(theta / 2), -1j * np.exp(-1j * phi) * np.sin(theta / 2)], [-1j * np.exp(1j * phi) * np.sin(theta / 2), np.cos(theta / 2)], - ] + ], + isherm=(theta % (2 * np.pi) <= settings.core["atol"]), + isunitary=True, ).to(dtype) @@ -306,7 +353,7 @@ def qrot(theta, phi, *, dtype="dense"): # -def cphase(theta, *, dtype="csr"): +def cphase(theta: float, *, dtype: LayerType = None) -> Qobj: """ Returns quantum object representing the controlled phase shift gate. @@ -323,12 +370,13 @@ def cphase(theta, *, dtype="csr"): U : qobj Quantum object representation of controlled phase gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qdiags( - [1, 1, 1, np.exp(1.0j * theta)], dims=[[2, 2], [2, 2]], dtype=dtype + [1, 1, 1, np.exp(1.0j * theta)], dims=_DIMS_2_QB, dtype=dtype ) -def cnot(*, dtype="csr"): +def cnot(*, dtype: LayerType = None) -> Qobj: """ Quantum object representing the CNOT gate. @@ -344,13 +392,16 @@ def cnot(*, dtype="csr"): Quantum object representation of CNOT gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=True, + isunitary=True, ).to(dtype) -def csign(*, dtype="csr"): +def csign(*, dtype: LayerType = None) -> Qobj: """ Quantum object representing the CSIGN gate. @@ -369,7 +420,7 @@ def csign(*, dtype="csr"): return cz_gate(dtype=dtype) -def berkeley(*, dtype="dense"): +def berkeley(*, dtype: LayerType = None) -> Qobj: """ Quantum object representing the Berkeley gate. @@ -385,6 +436,7 @@ def berkeley(*, dtype="dense"): Quantum object representation of Berkeley gate """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return Qobj( [ [np.cos(np.pi / 8), 0, 0, 1.0j * np.sin(np.pi / 8)], @@ -392,16 +444,21 @@ def berkeley(*, dtype="dense"): [0, 1.0j * np.sin(3 * np.pi / 8), np.cos(3 * np.pi / 8), 0], [1.0j * np.sin(np.pi / 8), 0, 0, np.cos(np.pi / 8)], ], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=False, + isunitary=True, ).to(dtype) -def swapalpha(alpha, *, dtype="csr"): +def swapalpha(alpha: float, *, dtype: LayerType = None) -> Qobj: """ Quantum object representing the SWAPalpha gate. Parameters ---------- + alpha : float + Angle of the SWAPalpha gate. + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -411,6 +468,7 @@ def swapalpha(alpha, *, dtype="csr"): swapalpha_gate : qobj Quantum object representation of SWAPalpha gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR phase = np.exp(1.0j * np.pi * alpha) return Qobj( [ @@ -419,11 +477,13 @@ def swapalpha(alpha, *, dtype="csr"): [0, 0.5 * (1 - phase), 0.5 * (1 + phase), 0], [0, 0, 0, 1], ], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=(np.abs(phase.imag) <= settings.core["atol"]), + isunitary=True, ).to(dtype) -def swap(*, dtype="csr"): +def swap(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the SWAP gate. Parameters @@ -438,13 +498,16 @@ def swap(*, dtype="csr"): Quantum object representation of SWAP gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=True, + isunitary=True, ).to(dtype) -def iswap(*, dtype="csr"): +def iswap(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the iSWAP gate. Parameters @@ -458,13 +521,16 @@ def iswap(*, dtype="csr"): iswap_gate : qobj Quantum object representation of iSWAP gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=False, + isunitary=True, ).to(dtype) -def sqrtswap(*, dtype="dense"): +def sqrtswap(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the square root SWAP gate. Parameters @@ -479,6 +545,7 @@ def sqrtswap(*, dtype="dense"): Quantum object representation of square root SWAP gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( np.array( [ @@ -488,11 +555,13 @@ def sqrtswap(*, dtype="dense"): [0, 0, 0, 1], ] ), - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=False, + isunitary=True, ).to(dtype) -def sqrtiswap(*, dtype="dense"): +def sqrtiswap(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the square root iSWAP gate. Parameters @@ -506,6 +575,7 @@ def sqrtiswap(*, dtype="dense"): sqrtiswap_gate : qobj Quantum object representation of square root iSWAP gate """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( np.array( [ @@ -515,11 +585,13 @@ def sqrtiswap(*, dtype="dense"): [0, 0, 0, 1], ] ), - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=False, + isunitary=True, ).to(dtype) -def molmer_sorensen(theta, *, dtype="dense"): +def molmer_sorensen(theta: float, *, dtype: LayerType = None) -> Qobj: """ Quantum object of a Mølmer–Sørensen gate. @@ -527,8 +599,6 @@ def molmer_sorensen(theta, *, dtype="dense"): ---------- theta: float The duration of the interaction pulse. - N: int - Number of qubits in the system. target: int The indices of the target qubits. dtype : str or type, [keyword only] [optional] @@ -540,6 +610,7 @@ def molmer_sorensen(theta, *, dtype="dense"): molmer_sorensen_gate: :class:`.Qobj` Quantum object representation of the Mølmer–Sørensen gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [ [np.cos(theta / 2.0), 0, 0, -1.0j * np.sin(theta / 2.0)], @@ -547,7 +618,9 @@ def molmer_sorensen(theta, *, dtype="dense"): [0, -1.0j * np.sin(theta / 2.0), np.cos(theta / 2.0), 0], [-1.0j * np.sin(theta / 2.0), 0, 0, np.cos(theta / 2.0)], ], - dims=[[2, 2], [2, 2]], + dims=_DIMS_2_QB, + isherm=(theta % (2 * np.pi) <= settings.core["atol"]), + isunitary=True, ).to(dtype) @@ -556,7 +629,7 @@ def molmer_sorensen(theta, *, dtype="dense"): # -def fredkin(*, dtype="csr"): +def fredkin(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the Fredkin gate. Parameters @@ -571,6 +644,7 @@ def fredkin(*, dtype="csr"): Quantum object representation of Fredkin gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [ [1, 0, 0, 0, 0, 0, 0, 0], @@ -582,11 +656,13 @@ def fredkin(*, dtype="csr"): [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], ], - dims=[[2, 2, 2], [2, 2, 2]], + dims=_DIMS_3_QB, + isherm=True, + isunitary=True, ).to(dtype) -def toffoli(*, dtype="csr"): +def toffoli(*, dtype: LayerType = None) -> Qobj: """Quantum object representing the Toffoli gate. Parameters @@ -601,6 +677,7 @@ def toffoli(*, dtype="csr"): Quantum object representation of Toffoli gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return Qobj( [ [1, 0, 0, 0, 0, 0, 0, 0], @@ -612,7 +689,9 @@ def toffoli(*, dtype="csr"): [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0], ], - dims=[[2, 2, 2], [2, 2, 2]], + dims=_DIMS_3_QB, + isherm=True, + isunitary=True, ).to(dtype) @@ -621,7 +700,7 @@ def toffoli(*, dtype="csr"): # -def globalphase(theta, N=1, *, dtype="csr"): +def globalphase(theta: float, N: int = 1, *, dtype: LayerType = None) -> Qobj: """ Returns quantum object representing the global phase shift gate. @@ -630,6 +709,9 @@ def globalphase(theta, N=1, *, dtype="csr"): theta : float Phase rotation angle. + N : int: + Number of qubits + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -640,6 +722,7 @@ def globalphase(theta, N=1, *, dtype="csr"): Quantum object representation of global phase shift gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return qeye([2] * N, dtype=dtype) * np.exp(1.0j * theta) @@ -660,11 +743,14 @@ def _hamming_distance(x): return tot -def hadamard_transform(N=1, *, dtype="dense"): +def hadamard_transform(N: int = 1, *, dtype: LayerType = None) -> Qobj: """Quantum object representing the N-qubit Hadamard gate. Parameters ---------- + N : int: + Number of qubits + dtype : str or type, [keyword only] [optional] Storage representation. Any data-layer known to `qutip.data.to` is accepted. @@ -675,6 +761,7 @@ def hadamard_transform(N=1, *, dtype="dense"): Quantum object representation of the N-qubit Hadamard gate. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense data = 2 ** (-N / 2) * np.array( [ [(-1) ** _hamming_distance(i & j) for i in range(2**N)] @@ -682,7 +769,9 @@ def hadamard_transform(N=1, *, dtype="dense"): ] ) - return Qobj(data, dims=[[2] * N, [2] * N]).to(dtype) + return Qobj(data, dims=[[2] * N, [2] * N], isherm=True, isunitary=True).to( + dtype + ) def _powers(op, N): @@ -698,7 +787,7 @@ def _powers(op, N): yield acc -def qubit_clifford_group(*, dtype="dense"): +def qubit_clifford_group(*, dtype: LayerType = None) -> list[Qobj]: """ Generates the Clifford group on a single qubit, using the presentation of the group given by Ross and Selinger @@ -716,6 +805,7 @@ def qubit_clifford_group(*, dtype="dense"): Clifford operators, represented as Qobj instances. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense # The Ross-Selinger presentation of the single-qubit Clifford # group expresses each element in the form C_{ijk} = E^i X^j S^k @@ -729,7 +819,7 @@ def qubit_clifford_group(*, dtype="dense"): X = sigmax() S = phasegate(np.pi / 2) - E = H * (S**3) * w**3 + E = H @ (S**3) * w**3 # partial(reduce, mul) returns a function that takes products # of its argument, by analogy to sum. Note that by analogy, @@ -738,10 +828,14 @@ def qubit_clifford_group(*, dtype="dense"): # product(...) yields the Cartesian product of its arguments. # Here, each element is a tuple (E**i, X**j, S**k) such that # partial(reduce, mul) acting on the tuple yields E**i * X**j * S**k. - return [ + gates = [ op.to(dtype) for op in map( partial(reduce, mul), product(_powers(E, 3), _powers(X, 2), _powers(S, 4)), ) ] + for gate in gates: + gate.isherm + gate._isunitary = True + return gates diff --git a/qutip/core/metrics.py b/qutip/core/metrics.py index 0df7b19f36..432f693f00 100644 --- a/qutip/core/metrics.py +++ b/qutip/core/metrics.py @@ -434,12 +434,21 @@ def hellinger_dist(A, B, sparse=False, tol=0): def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False, sparse=True): - """ + r""" Calculates the diamond norm of the quantum map q_oper, using the simplified semidefinite program of [Wat13]_. The diamond norm SDP is solved by using `CVXPY `_. + If B is provided and both A and B are unitary, then the diamond norm + of the difference is calculated more efficiently using the following + geometric interpretation: + :math:`\|A - B\|_{\diamond}` equals :math:`2 \sqrt(1 - d^2)`, where + :math:`d`is the distance between the origin and the convex hull of the + eigenvalues of :math:`A B^{\dagger}`. + See [AKN98]_ page 18, in the paragraph immediately below the proof of 12.6, + as a reference. + Parameters ---------- A : Qobj @@ -472,59 +481,40 @@ def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False, if cvxpy is None: # pragma: no cover raise ImportError("dnorm() requires CVXPY to be installed.") + if B is not None and A.dims != B.dims: + raise TypeError("A and B do not have the same dimensions.") + # We follow the strategy of using Watrous' simpler semidefinite # program in its primal form. This is the same strategy used, # for instance, by both pyGSTi and SchattenNorms.jl. (By contrast, # QETLAB uses the dual problem.) - # Check if A and B are both unitaries. If so, then we can without - # loss of generality choose B to be the identity by using the - # unitary invariance of the diamond norm, - # || A - B ||_♢ = || A B⁺ - I ||_♢. - # Then, using the technique mentioned by each of Johnston and - # da Silva, - # || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |, - # where \lambda_i(U) is the ith eigenvalue of U. - - # There's a lot of conditions to check for this path. Only check if they - # aren't superoperators. The difference of unitaries optimization is - # currently only implemented for d == 2. Much of the code below is more - # general, though, in anticipation of generalizing the optimization. + # Check if A and B are both unitaries. If so we can use the geometric + # interpretation mentioned in D. Aharonov, A. Kitaev, and N. Nisan. (1998). + # We find the eigenvalues of AB⁺ and the distance d between the origin + # and the complex hull of these. Plugging this into 2√1-d² gives the + # diamond norm. + if ( not force_solve + and A.isunitary and B is not None - and A.isoper and B.isoper - and A.shape[0] == 2 - ): - # Make an identity the same size as A and B to - # compare against. - I = qeye_like(A) - # Compare to B first, so that an error is raised - # as soon as possible. - Bd = B.dag() - if ( - (B * Bd - I).norm() < 1e-6 and - (A * A.dag() - I).norm() < 1e-6 - ): - # Now we are on the fast path, so let's compute the - # eigenvalues, then find the diameter of the smallest circle - # containing all of them. - # - # For now, this is only implemented for dim = 2, such that - # generalizing here will allow for generalizing the optimization. - # A reasonable approach would probably be to use Welzl's algorithm - # (https://en.wikipedia.org/wiki/Smallest-circle_problem). - U = A * B.dag() - eigs = U.eigenenergies() - eig_distances = np.abs(eigs[:, None] - eigs[None, :]) - return np.max(eig_distances) - - # Force the input superoperator to be a Choi matrix. + and B.isunitary + ): # Special optimisation for a difference of unitaries. + U = A * B.dag() + eigs = U.eigenenergies() + d = _find_poly_distance(eigs) + return 2 * np.sqrt(1 - d**2) # plug d into formula + J = to_choi(A) - if B is not None: + if B is not None: # If B is provided, calculate difference J -= to_choi(B) + if not force_solve and J.iscptp: + # diamond norm of a CPTP map is 1 (Prop 3.44 Watrous 2018) + return 1.0 + # Watrous 2012 also points out that the diamond norm of Lambda # is the same as the completely-bounded operator-norm (∞-norm) # of the dual map of Lambda. We can evaluate that norm much more @@ -583,3 +573,34 @@ def unitarity(oper): """ Eu = _to_superpauli(oper).full()[1:, 1:] return np.linalg.norm(Eu, 'fro')**2 / len(Eu) + + +def _find_poly_distance(eigenvals: np.ndarray) -> float: + """ + Returns the distance between the origin and the convex hull of eigenvalues. + + The complex eigenvalues must have unit length (i.e. lie on the circle + about the origin). + """ + phases = np.angle(eigenvals) + phase_max = phases.max() + phase_min = phases.min() + + if phase_min > 0: # all eigenvals have pos phase: hull is above x axis + return np.cos((phase_max - phase_min) / 2) + + if phase_max <= 0: # all eigenvals have neg phase: hull is below x axis + return np.cos((np.abs(phase_min) - np.abs(phase_max)) / 2) + + pos_phase_min = np.where(phases > 0, phases, np.inf).min() + neg_phase_max = np.where(phases <= 0, phases, -np.inf).max() + + big_angle = phase_max - phase_min + small_angle = pos_phase_min - neg_phase_max + if big_angle >= np.pi: + if small_angle <= np.pi: # hull contains the origin + return 0 + else: # hull is left of y axis + return np.cos((2 * np.pi - small_angle) / 2) + else: # hull is right of y axis + return np.cos(big_angle / 2) diff --git a/qutip/core/operators.py b/qutip/core/operators.py index c89cde6104..2bba17e386 100644 --- a/qutip/core/operators.py +++ b/qutip/core/operators.py @@ -12,28 +12,31 @@ 'tunneling', 'qft', 'qzero_like', 'qeye_like', 'swap', ] -import numbers - import numpy as np -import scipy.sparse - +from typing import Literal, overload from . import data as _data from .qobj import Qobj -from .dimensions import flatten, Space +from .dimensions import Space from .. import settings - - -def qdiags(diagonals, offsets=None, dims=None, shape=None, *, - dtype=None): +from ..typing import DimensionLike, SpaceLike, LayerType + +def qdiags( + diagonals: np.typing.ArrayLike | list[np.typing.ArrayLike], + offsets: int | list[int] = None, + dims: DimensionLike = None, + shape: tuple[int, int] = None, + *, + dtype: LayerType = None, +) -> Qobj: """ Constructs an operator from an array of diagonals. Parameters ---------- - diagonals : sequence of array_like + diagonals : array_like or sequence of array_like Array of elements to place along the selected diagonals. - offsets : sequence of ints, optional + offsets : int or sequence of ints, optional Sequence for diagonals to be set: - k=0 main diagonal - k>0 kth upper diagonal @@ -64,11 +67,46 @@ def qdiags(diagonals, offsets=None, dims=None, shape=None, *, """ dtype = dtype or settings.core["default_dtype"] or _data.Dia offsets = [0] if offsets is None else offsets + if not isinstance(offsets, list): + offsets = [offsets] + if len(offsets) == 1 and offsets[0] != 0: + isherm = False + isunitary = False + elif offsets == [0]: + isherm = np.all(np.imag(diagonals) <= settings.core["atol"]) + isunitary = np.all(np.abs(diagonals) - 1 <= settings.core["atol"]) + else: + isherm = None + isunitary = None data = _data.diag[dtype](diagonals, offsets, shape) - return Qobj(data, dims=dims, copy=False) + return Qobj( + data, copy=False, + dims=dims, isherm=isherm, isunitary=isunitary + ) -def jmat(j, which=None, *, dtype=None): +@overload +def jmat( + j: float, + which: Literal[None], + *, + dtype: LayerType = None +) -> tuple[Qobj]: ... + +@overload +def jmat( + j: float, + which: Literal["x", "y", "z", "+", "-"], + *, + dtype: LayerType = None +) -> Qobj: ... + +def jmat( + j: float, + which: Literal["x", "y", "z", "+", "-", None] = None, + *, + dtype: LayerType = None +) -> Qobj | tuple[Qobj]: """Higher-order spin operators: Parameters @@ -132,8 +170,8 @@ def jmat(j, which=None, *, dtype=None): isherm=False, isunitary=False, copy=False) if which == 'x': A = _jplus(j, dtype=dtype) - return Qobj(_data.add(A, A.adjoint()), dims=dims, - isherm=True, isunitary=False, copy=False) * 0.5 + return Qobj(_data.add(A, A.adjoint()) * 0.5, dims=dims, + isherm=True, isunitary=False, copy=False) if which == 'y': A = _data.mul(_jplus(j, dtype=dtype), -0.5j) return Qobj(_data.add(A, A.adjoint()), dims=dims, @@ -167,7 +205,7 @@ def _jz(j, *, dtype=None): # # Spin j operators: # -def spin_Jx(j, *, dtype=None): +def spin_Jx(j: float, *, dtype: LayerType = None) -> Qobj: """Spin-j x operator Parameters @@ -188,7 +226,7 @@ def spin_Jx(j, *, dtype=None): return jmat(j, 'x', dtype=dtype) -def spin_Jy(j, *, dtype=None): +def spin_Jy(j: float, *, dtype: LayerType = None) -> Qobj: """Spin-j y operator Parameters @@ -209,7 +247,7 @@ def spin_Jy(j, *, dtype=None): return jmat(j, 'y', dtype=dtype) -def spin_Jz(j, *, dtype=None): +def spin_Jz(j: float, *, dtype: LayerType = None) -> Qobj: """Spin-j z operator Parameters @@ -230,7 +268,7 @@ def spin_Jz(j, *, dtype=None): return jmat(j, 'z', dtype=dtype) -def spin_Jm(j, *, dtype=None): +def spin_Jm(j: float, *, dtype: LayerType = None) -> Qobj: """Spin-j annihilation operator Parameters @@ -251,7 +289,7 @@ def spin_Jm(j, *, dtype=None): return jmat(j, '-', dtype=dtype) -def spin_Jp(j, *, dtype=None): +def spin_Jp(j: float, *, dtype: LayerType = None) -> Qobj: """Spin-j creation operator Parameters @@ -272,7 +310,7 @@ def spin_Jp(j, *, dtype=None): return jmat(j, '+', dtype=dtype) -def spin_J_set(j, *, dtype=None): +def spin_J_set(j: float, *, dtype: LayerType = None) -> tuple[Qobj]: """Set of spin-j operators (x, y, z) Parameters @@ -286,7 +324,7 @@ def spin_J_set(j, *, dtype=None): Returns ------- - list : list of Qobj + list : tuple of Qobj list of ``qobj`` representating of the spin operator. """ @@ -301,13 +339,22 @@ def spin_J_set(j, *, dtype=None): _SIGMAP = jmat(0.5, '+') _SIGMAM = jmat(0.5, '-') _SIGMAX = 2 * jmat(0.5, 'x') +_SIGMAX._isunitary = True _SIGMAY = 2 * jmat(0.5, 'y') +_SIGMAY._isunitary = True _SIGMAZ = 2 * jmat(0.5, 'z') +_SIGMAZ._isunitary = True -def sigmap(): +def sigmap(*, dtype: LayerType = None) -> Qobj: """Creation operator for Pauli spins. + Parameters + ---------- + dtype : type or str, optional + Storage representation. Any data-layer known to ``qutip.data.to`` is + accepted. + Examples -------- >>> sigmap() # doctest: +SKIP @@ -318,12 +365,19 @@ def sigmap(): [ 0. 0.]] """ - return _SIGMAP.copy() + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return _SIGMAP.to(dtype, True) -def sigmam(): +def sigmam(*, dtype: LayerType = None) -> Qobj: """Annihilation operator for Pauli spins. + Parameters + ---------- + dtype : type or str, optional + Storage representation. Any data-layer known to ``qutip.data.to`` is + accepted. + Examples -------- >>> sigmam() # doctest: +SKIP @@ -334,12 +388,19 @@ def sigmam(): [ 1. 0.]] """ - return _SIGMAM.copy() + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return _SIGMAM.to(dtype, True) -def sigmax(): +def sigmax(*, dtype: LayerType = None) -> Qobj: """Pauli spin 1/2 sigma-x operator + Parameters + ---------- + dtype : type or str, optional + Storage representation. Any data-layer known to ``qutip.data.to`` is + accepted. + Examples -------- >>> sigmax() # doctest: +SKIP @@ -350,12 +411,19 @@ def sigmax(): [ 1. 0.]] """ - return _SIGMAX.copy() + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return _SIGMAX.to(dtype, True) -def sigmay(): +def sigmay(*, dtype: LayerType = None) -> Qobj: """Pauli spin 1/2 sigma-y operator. + Parameters + ---------- + dtype : type or str, optional + Storage representation. Any data-layer known to ``qutip.data.to`` is + accepted. + Examples -------- >>> sigmay() # doctest: +SKIP @@ -366,12 +434,19 @@ def sigmay(): [ 0.+1.j 0.+0.j]] """ - return _SIGMAY.copy() + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return _SIGMAY.to(dtype, True) -def sigmaz(): +def sigmaz(*, dtype: LayerType = None) -> Qobj: """Pauli spin 1/2 sigma-z operator. + Parameters + ---------- + dtype : type or str, optional + Storage representation. Any data-layer known to ``qutip.data.to`` is + accepted. + Examples -------- >>> sigmaz() # doctest: +SKIP @@ -382,10 +457,11 @@ def sigmaz(): [ 0. -1.]] """ - return _SIGMAZ.copy() + dtype = dtype or settings.core["default_dtype"] or _data.CSR + return _SIGMAZ.to(dtype, True) -def destroy(N, offset=0, *, dtype=None): +def destroy(N: int, offset: int = 0, *, dtype: LayerType = None) -> Qobj: """ Destruction (lowering) operator. @@ -424,7 +500,7 @@ def destroy(N, offset=0, *, dtype=None): return qdiags(data, 1, dtype=dtype) -def create(N, offset=0, *, dtype=None): +def create(N: int, offset: int = 0, *, dtype: LayerType = None) -> Qobj: """ Creation (raising) operator. @@ -463,7 +539,7 @@ def create(N, offset=0, *, dtype=None): return qdiags(data, -1, dtype=dtype) -def fdestroy(n_sites, site, dtype=None): +def fdestroy(n_sites: int, site, dtype: LayerType = None) -> Qobj: """ Fermionic destruction operator. We use the Jordan-Wigner transformation, @@ -481,7 +557,7 @@ def fdestroy(n_sites, site, dtype=None): n_sites : int Number of sites in Fock space. - site : int, default: 0 + site : int The site in Fock space to add a fermion to. Corresponds to j in the above JW transform. @@ -508,7 +584,7 @@ def fdestroy(n_sites, site, dtype=None): return _f_op(n_sites, site, 'destruction', dtype=dtype) -def fcreate(n_sites, site, dtype=None): +def fcreate(n_sites: int, site, dtype: LayerType = None) -> Qobj: """ Fermionic creation operator. We use the Jordan-Wigner transformation, @@ -554,7 +630,7 @@ def fcreate(n_sites, site, dtype=None): return _f_op(n_sites, site, 'creation', dtype=dtype) -def _f_op(n_sites, site, action, dtype=None): +def _f_op(n_sites, site, action, dtype: LayerType = None,): """ Makes fermionic creation and destruction operators. We use the Jordan-Wigner transformation, making use of the Jordan-Wigner ZZ..Z strings, @@ -615,10 +691,18 @@ def _f_op(n_sites, site, action, dtype=None): eye = identity(2, dtype=dtype) opers = [s_z] * site + [operator] + [eye] * (n_sites - site - 1) - return tensor(opers).to(dtype) + out = tensor(opers).to(dtype) + out.isherm = False + out._isunitary = False + return out -def qzero(dimensions, dims_right=None, *, dtype=None): +def qzero( + dimensions: SpaceLike, + dims_right: SpaceLike = None, + *, + dtype: LayerType = None +) -> Qobj: """ Zero operator. @@ -659,7 +743,7 @@ def qzero(dimensions, dims_right=None, *, dtype=None): isherm=True, isunitary=False, copy=False) -def qzero_like(qobj): +def qzero_like(qobj: Qobj) -> Qobj: """ Zero operator of the same dims and type as the reference. @@ -681,7 +765,7 @@ def qzero_like(qobj): ) -def qeye(dimensions, *, dtype=None): +def qeye(dimensions: SpaceLike, *, dtype: LayerType = None) -> Qobj: """ Identity operator. @@ -731,7 +815,7 @@ def qeye(dimensions, *, dtype=None): identity = qeye -def qeye_like(qobj): +def qeye_like(qobj: Qobj) -> Qobj: """ Identity operator with the same dims and type as the reference quantum object. @@ -757,7 +841,7 @@ def qeye_like(qobj): ) -def position(N, offset=0, *, dtype=None): +def position(N: int, offset: int = 0, *, dtype: LayerType = None) -> Qobj: """ Position operator :math:`x = 1 / sqrt(2) * (a + a.dag())` @@ -783,10 +867,11 @@ def position(N, offset=0, *, dtype=None): a = destroy(N, offset=offset, dtype=dtype) position = np.sqrt(0.5) * (a + a.dag()) position.isherm = True + position._isunitary = False return position.to(dtype) -def momentum(N, offset=0, *, dtype=None): +def momentum(N: int, offset: int = 0, *, dtype: LayerType = None) -> Qobj: """ Momentum operator p=-1j/sqrt(2)*(a-a.dag()) @@ -812,10 +897,11 @@ def momentum(N, offset=0, *, dtype=None): a = destroy(N, offset=offset, dtype=dtype) momentum = -1j * np.sqrt(0.5) * (a - a.dag()) momentum.isherm = True + momentum._isunitary = False return momentum.to(dtype) -def num(N, offset=0, *, dtype=None): +def num(N: int, offset: int = 0, *, dtype: LayerType = None) -> Qobj: """ Quantum object for number operator. @@ -852,7 +938,13 @@ def num(N, offset=0, *, dtype=None): return qdiags(data, 0, dtype=dtype) -def squeeze(N, z, offset=0, *, dtype=None): +def squeeze( + N: int, + z: float, + offset: int = 0, + *, + dtype: LayerType = None, +) -> Qobj: """Single-mode squeezing operator. Parameters @@ -889,12 +981,16 @@ def squeeze(N, z, offset=0, *, dtype=None): [ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense asq = destroy(N, offset=offset, dtype=dtype) ** 2 op = 0.5*np.conj(z)*asq - 0.5*z*asq.dag() - return op.expm(dtype=dtype) + out = op.expm(dtype=dtype) + out.isherm = (N == 2) or (z == 0.) + out._isunitary = True + return out -def squeezing(a1, a2, z): +def squeezing(a1: Qobj, a2: Qobj, z: float) -> Qobj: """Generalized squeezing operator. .. math:: @@ -923,7 +1019,13 @@ def squeezing(a1, a2, z): return b.expm() -def displace(N, alpha, offset=0, *, dtype=None): +def displace( + N: int, + alpha: float, + offset: int = 0, + *, + dtype: LayerType = None, +) -> Qobj: """Single-mode displacement operator. Parameters @@ -961,10 +1063,17 @@ def displace(N, alpha, offset=0, *, dtype=None): """ dtype = dtype or settings.core["default_dtype"] or _data.Dense a = destroy(N, offset=offset) - return (alpha * a.dag() - np.conj(alpha) * a).expm(dtype=dtype) + out = (alpha * a.dag() - np.conj(alpha) * a).expm(dtype=dtype) + out.isherm = (alpha == 0.) + out._isunitary = True + return out -def commutator(A, B, kind="normal"): +def commutator( + A: Qobj, + B: Qobj, + kind: Literal["normal", "anti"] = "normal" +) -> Qobj: """ Return the commutator of kind `kind` (normal, anti) of the two operators A and B. @@ -987,7 +1096,7 @@ def commutator(A, B, kind="normal"): raise TypeError("Unknown commutator kind '%s'" % kind) -def qutrit_ops(*, dtype=None): +def qutrit_ops(*, dtype: LayerType = None) -> list[Qobj]: """ Operators for a three level system (qutrit). @@ -1006,18 +1115,22 @@ def qutrit_ops(*, dtype=None): from .states import qutrit_basis dtype = dtype or settings.core["default_dtype"] or _data.CSR - out = np.empty((6,), dtype=object) - one, two, three = qutrit_basis(dtype=dtype) - out[0] = one * one.dag() - out[1] = two * two.dag() - out[2] = three * three.dag() - out[3] = one * two.dag() - out[4] = two * three.dag() - out[5] = three * one.dag() + out = [] + basis = qutrit_basis(dtype=dtype) + for i in range(3): + op = basis[i] @ basis[i].dag() + op.isherm = True + op._isunitary = False + out.append(op) + for i in range(3): + op = basis[i] @ basis[(i+1)%3].dag() + op.isherm = False + op._isunitary = False + out.append(op) return out -def phase(N, phi0=0, *, dtype=None): +def phase(N: int, phi0: float = 0, *, dtype: LayerType = None) -> Qobj: """ Single-mode Pegg-Barnett phase operator. @@ -1049,10 +1162,22 @@ def phase(N, phi0=0, *, dtype=None): states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1j * n * kk) for kk in phim]) ops = np.sum([np.outer(st, st.conj()) for st in states], axis=0) - return Qobj(ops, dims=[[N], [N]], copy=False).to(dtype) + return Qobj( + ops, + isherm=True, + isunitary=False, + dims=[[N], [N]], + copy=False + ).to(dtype) -def charge(Nmax, Nmin=None, frac=1, *, dtype=None): +def charge( + Nmax: int, + Nmin: int = None, + frac: float = 1, + *, + dtype: LayerType = None +) -> Qobj: """ Generate the diagonal charge operator over charge states from Nmin to Nmax. @@ -1087,11 +1212,11 @@ def charge(Nmax, Nmin=None, frac=1, *, dtype=None): Nmin = -Nmax diag = frac * np.arange(Nmin, Nmax+1, dtype=float) out = qdiags(diag, 0, dtype=dtype) - out.isherm = True + out._isunitary = (len(diag) <= 2) and np.all(np.abs(diag) == 1.) return out -def tunneling(N, m=1, *, dtype=None): +def tunneling(N: int, m: int = 1, *, dtype: LayerType = None) -> Qobj: r""" Tunneling operator with elements of the form :math:`\\sum |N> Qobj: """ Quantum Fourier Transform operator. @@ -1130,7 +1256,7 @@ def qft(dimensions, *, dtype="dense"): ints, then the dimension is the product over this list, but the ``dims`` property of the new Qobj are set to this list. - dtype : str or type, [keyword only] [optional] + dtype : type or str, optional Storage representation. Any data-layer known to ``qutip.data.to`` is accepted. @@ -1140,17 +1266,19 @@ def qft(dimensions, *, dtype="dense"): Quantum Fourier transform operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense dimensions = Space(dimensions) + dims = [dimensions]*2 N2 = dimensions.size phase = 2.0j * np.pi / N2 arr = np.arange(N2) L, M = np.meshgrid(arr, arr) data = np.exp(phase * (L * M)) / np.sqrt(N2) - return Qobj(data, dims=[dimensions]*2).to(dtype) + return Qobj(data, isherm=False, isunitary=True, dims=dims).to(dtype) -def swap(N, M, *, dtype=None): +def swap(N: int, M: int, *, dtype: LayerType = None) -> Qobj: """ Operator that exchanges the order of tensored spaces: @@ -1174,5 +1302,7 @@ def swap(N, M, *, dtype=None): cols = np.ravel(M * np.arange(N)[None, :] + np.arange(M)[:, None]) return Qobj( _data.CSR((data, cols, rows), (N * M, N * M)), - dims=[[M, N], [N, M]] + dims=[[M, N], [N, M]], + isherm=(N == M), + isunitary=True, ).to(dtype) diff --git a/qutip/core/options.py b/qutip/core/options.py index 4748dd3612..640d4f7f16 100644 --- a/qutip/core/options.py +++ b/qutip/core/options.py @@ -1,4 +1,6 @@ from ..settings import settings +from typing import overload, Literal, Any +import types __all__ = ["CoreOptions"] @@ -10,7 +12,8 @@ class QutipOptions: Define basic method to wrap an ``options`` dict. Default options are in a class _options dict. """ - _options = {} + + _options: dict[str, Any] = {} _settings_name = None # Where the default is in settings def __init__(self, **options): @@ -20,24 +23,24 @@ def __init__(self, **options): if options: raise KeyError(f"Options {set(options)} are not supported.") - def __contains__(self, key): + def __contains__(self, key: str) -> bool: return key in self.options - def __getitem__(self, key): + def __getitem__(self, key: str) -> Any: # Let the dict catch the KeyError return self.options[key] - def __setitem__(self, key, value): + def __setitem__(self, key: str, value: Any) -> None: # Let the dict catch the KeyError self.options[key] = value - def __repr__(self, full=True): + def __repr__(self, full: bool = True) -> str: out = [f"<{self.__class__.__name__}("] for key, value in self.options.items(): if full or value != self._options[key]: out += [f" '{key}': {repr(value)},"] out += [")>"] - if len(out)-2: + if len(out) - 2: return "\n".join(out) else: return "".join(out) @@ -46,7 +49,12 @@ def __enter__(self): self._backup = getattr(settings, self._settings_name) setattr(settings, self._settings_name, self) - def __exit__(self, exc_type, exc_value, exc_traceback): + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + exc_traceback: types.TracebackType | None, + ) -> None: setattr(settings, self._settings_name, self._backup) @@ -56,22 +64,25 @@ class CoreOptions(QutipOptions): comparison or coefficient's format. Values can be changed in ``qutip.settings.core`` or by using context: - ``with CoreOptions(atol=1e-6): ...``. - Options - ------- + ``with CoreOptions(atol=1e-6): ...`` + + ******** + Options: + ******** + auto_tidyup : bool Whether to tidyup during sparse operations. auto_tidyup_dims : bool [False] Use auto tidyup dims on multiplication, tensor, etc. Without auto_tidyup_dims: + ``basis([2, 2]).dims == [[2, 2], [1, 1]]`` + With auto_tidyup_dims: - ``basis([2, 2]).dims == [[2, 2], [1]]`` - auto_herm : boolTrue - detect hermiticity + ``basis([2, 2]).dims == [[2, 2], [1]]`` atol : float {1e-12} General absolute tolerance @@ -107,13 +118,12 @@ class CoreOptions(QutipOptions): known to ``qutip.data.to`` is accepted. When ``None``, these functions will default to a sensible data type. """ + _options = { # use auto tidyup "auto_tidyup": True, # use auto tidyup dims on multiplication "auto_tidyup_dims": False, - # detect hermiticity - "auto_herm": True, # general absolute tolerance "atol": 1e-12, # general relative tolerance @@ -124,9 +134,61 @@ class CoreOptions(QutipOptions): "function_coefficient_style": "auto", # Default Qobj dtype for Qobj create function "default_dtype": None, + # Expect, trace, etc. will return real for hermitian matrices. + # Hermiticity checks can be slow, stop jitting, etc. + "auto_real_casting": True, } _settings_name = "core" + @overload + def __getitem__( + self, + key: Literal["auto_tidyup", "auto_tidyup_dims", "auto_real_casting"], + ) -> bool: ... + + @overload + def __getitem__( + self, key: Literal["atol", "rtol", "auto_tidyup_atol"] + ) -> float: ... + + @overload + def __getitem__( + self, key: Literal["function_coefficient_style"] + ) -> str: ... + + @overload + def __getitem__(self, key: Literal["default_dtype"]) -> str | None: ... + + def __getitem__(self, key: str) -> Any: + # Let the dict catch the KeyError + return self.options[key] + + @overload + def __setitem__( + self, + key: Literal["auto_tidyup", "auto_tidyup_dims", "auto_real_casting"], + value: bool, + ) -> None: ... + + @overload + def __setitem__( + self, key: Literal["atol", "rtol", "auto_tidyup_atol"], value: float + ) -> None: ... + + @overload + def __setitem__( + self, key: Literal["function_coefficient_style"], value: str + ) -> None: ... + + @overload + def __setitem__( + self, key: Literal["default_dtype"], value: str | None + ) -> None: ... + + def __setitem__(self, key: str, value: Any) -> None: + # Let the dict catch the KeyError + self.options[key] = value + # Creating the instance of core options to use everywhere. settings.core = CoreOptions() diff --git a/qutip/core/properties.py b/qutip/core/properties.py new file mode 100644 index 0000000000..8e9ade0270 --- /dev/null +++ b/qutip/core/properties.py @@ -0,0 +1,35 @@ +from . import Qobj, QobjEvo + +__all__ = [ + 'isbra', 'isket', 'isoper', 'issuper', 'isoperbra', 'isoperket', 'isherm' +] + + +def isbra(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['bra', 'scalar'] + + +def isket(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['ket', 'scalar'] + + +def isoper(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['oper', 'scalar'] + + +def isoperbra(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['operator-bra'] + + +def isoperket(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['operator-ket'] + + +def issuper(x: Qobj | QobjEvo) -> bool: + return isinstance(x, (Qobj, QobjEvo)) and x.type in ['super'] + + +def isherm(x: Qobj) -> bool: + if not isinstance(x, Qobj): + raise TypeError(f"Invalid input type, got {type(x)}, exected Qobj") + return x.isherm diff --git a/qutip/core/qobj.py b/qutip/core/qobj.py index 0b52ad6480..811477120e 100644 --- a/qutip/core/qobj.py +++ b/qutip/core/qobj.py @@ -1,26 +1,27 @@ """The Quantum Object (Qobj) class, for representing quantum states and operators, and related functions. """ - -__all__ = [ - 'Qobj', 'isbra', 'isket', 'isoper', 'issuper', 'isoperbra', 'isoperket', - 'isherm', 'ptrace', -] +from __future__ import annotations import functools import numbers import warnings - +from typing import Any, Literal import numpy as np +from numpy.typing import ArrayLike import scipy.sparse from .. import __version__ from ..settings import settings from . import data as _data +from qutip.typing import LayerType, DimensionLike +import qutip from .dimensions import ( enumerate_flat, collapse_dims_super, flatten, unflatten, Dimensions ) +__all__ = ['Qobj', 'ptrace'] + _NORM_FUNCTION_LOOKUP = { 'tr': _data.norm.trace, @@ -39,40 +40,6 @@ } -def isbra(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['bra', 'scalar'] - - -def isket(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['ket', 'scalar'] - - -def isoper(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['oper', 'scalar'] - - -def isoperbra(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['operator-bra'] - - -def isoperket(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['operator-ket'] - - -def issuper(x): - from .cy.qobjevo import QobjEvo - return isinstance(x, (Qobj, QobjEvo)) and x.type in ['super'] - - -def isherm(x): - return isinstance(x, Qobj) and x.isherm - - def _require_equal_type(method): """ Decorate a binary Qobj method to ensure both operands are Qobj and of the @@ -147,12 +114,10 @@ class Qobj: Parameters ---------- - inpt: array_like, data object or :obj:`.Qobj` + arg: array_like, data object or :obj:`.Qobj` Data for vector/matrix representation of the quantum object. dims: list Dimensions of object used for tensor products. - shape: list - Shape of underlying data structure (matrix shape). copy: bool Flag specifying whether Qobj should get a copy of the input data, or use the original. @@ -300,8 +265,15 @@ def _initialize_data(self, arg, dims, copy): raise ValueError('Provided dimensions do not match the data: ' + f"{self._dims.shape} vs {self._data.shape}") - def __init__(self, arg=None, dims=None, - copy=True, superrep=None, isherm=None, isunitary=None): + def __init__( + self, + arg: ArrayLike | Any = None, + dims: DimensionLike = None, + copy: bool = True, + superrep: str = None, + isherm: bool = None, + isunitary: bool = None + ): self._isherm = isherm self._isunitary = isunitary self._initialize_data(arg, dims, copy) @@ -309,7 +281,7 @@ def __init__(self, arg=None, dims=None, if superrep is not None: self.superrep = superrep - def copy(self): + def copy(self) -> Qobj: """Create identical copy""" return Qobj(arg=self._data, dims=self._dims, @@ -318,11 +290,11 @@ def copy(self): copy=True) @property - def dims(self): + def dims(self) -> list[list[int]] | list[list[list[int]]]: return self._dims.as_list() @dims.setter - def dims(self, dims): + def dims(self, dims: list[list[int]] | list[list[list[int]]] | Dimensions): dims = Dimensions(dims, rep=self.superrep) if dims.shape != self._data.shape: raise ValueError('Provided dimensions do not match the data: ' + @@ -330,27 +302,23 @@ def dims(self, dims): self._dims = dims @property - def type(self): + def type(self) -> str: return self._dims.type @property - def superrep(self): + def superrep(self) -> str: return self._dims.superrep @superrep.setter - def superrep(self, super_rep): + def superrep(self, super_rep: str): self._dims = self._dims.replace_superrep(super_rep) @property - def data(self): + def data(self) -> _data.Data: return self._data - @property - def dtype(self): - return type(self._data) - @data.setter - def data(self, data): + def data(self, data: _data.Data): if not isinstance(data, _data.Data): raise TypeError('Qobj data must be a data-layer format.') if self._dims.shape != data.shape: @@ -358,7 +326,11 @@ def data(self, data): f"{self._dims.shape} vs {data.shape}") self._data = data - def to(self, data_type): + @property + def dtype(self): + return type(self._data) + + def to(self, data_type: LayerType, copy: bool=False) -> Qobj: """ Convert the underlying data store of this `Qobj` into a different storage representation. @@ -371,36 +343,36 @@ def to(self, data_type): algorithms and operations may be faster or more accurate when using a more appropriate data store. - If the data store is already in the format requested, the function - returns `self`. Otherwise, it returns a copy of itself with the data - store in the new type. - Parameters ---------- - data_type : type - The data-layer type that the data of this :class:`Qobj` should be - converted to. + data_type : type, str + The data-layer type or its string alias that the data of this + :class:`Qobj` should be converted to. + + copy : Bool + If the data store is already in the format requested, whether the + function should return returns `self` or a copy. Returns ------- Qobj - A new :class:`Qobj` if a type conversion took place with the data - stored in the requested format, or `self` if not. + A :class:`Qobj` with the data stored in the requested format. """ - try: - converter = _data.to[data_type] - except (KeyError, TypeError): - raise ValueError("Unknown conversion type: " + str(data_type)) - if type(self._data) is data_type: + data_type = _data.to.parse(data_type) + if type(self._data) is data_type and copy: + return self.copy() + elif type(self._data) is data_type: return self - return Qobj(converter(self._data), - dims=self._dims, - isherm=self._isherm, - isunitary=self._isunitary, - copy=False) + return Qobj( + _data.to(data_type, self._data), + dims=self._dims, + isherm=self._isherm, + isunitary=self._isunitary, + copy=False + ) @_require_equal_type - def __add__(self, other): + def __add__(self, other: Qobj | complex) -> Qobj: if other == 0: return self.copy() return Qobj(_data.add(self._data, other._data), @@ -408,11 +380,11 @@ def __add__(self, other): isherm=(self._isherm and other._isherm) or None, copy=False) - def __radd__(self, other): + def __radd__(self, other: Qobj | complex) -> Qobj: return self.__add__(other) @_require_equal_type - def __sub__(self, other): + def __sub__(self, other: Qobj | complex) -> Qobj: if other == 0: return self.copy() return Qobj(_data.sub(self._data, other._data), @@ -420,10 +392,10 @@ def __sub__(self, other): isherm=(self._isherm and other._isherm) or None, copy=False) - def __rsub__(self, other): + def __rsub__(self, other: Qobj | complex) -> Qobj: return self.__neg__().__add__(other) - def __mul__(self, other): + def __mul__(self, other: complex) -> Qobj: """ If other is a Qobj, we dispatch to __matmul__. If not, we check that other is a valid complex scalar, i.e., we can do @@ -457,12 +429,12 @@ def __mul__(self, other): isunitary=isunitary, copy=False) - def __rmul__(self, other): + def __rmul__(self, other: complex) -> Qobj: # Shouldn't be here unless `other.__mul__` has already been tried, so # we _shouldn't_ check that `other` is `Qobj`. return self.__mul__(other) - def __matmul__(self, other): + def __matmul__(self, other: Qobj) -> Qobj: if not isinstance(other, Qobj): try: other = Qobj(other) @@ -479,10 +451,10 @@ def __matmul__(self, other): copy=False ) - def __truediv__(self, other): + def __truediv__(self, other: complex) -> Qobj: return self.__mul__(1 / other) - def __neg__(self): + def __neg__(self) -> Qobj: return Qobj(_data.neg(self._data), dims=self._dims, isherm=self._isherm, @@ -505,15 +477,15 @@ def __getitem__(self, ind): pass return data.to_array()[ind] - def __eq__(self, other): + def __eq__(self, other) -> bool: if self is other: return True if not isinstance(other, Qobj) or self._dims != other._dims: return False - return _data.iszero(_data.sub(self._data, other._data), - tol=settings.core['atol']) + # isequal uses both atol and rtol from settings.core + return _data.isequal(self._data, other._data) - def __pow__(self, n, m=None): # calculates powers of Qobj + def __pow__(self, n: int, m=None) -> Qobj: # calculates powers of Qobj if ( self.type not in ('oper', 'super') or self._dims[0] != self._dims[1] @@ -533,6 +505,7 @@ def _str_header(self): "Quantum object: dims=" + str(self.dims), "shape=" + str(self._data.shape), "type=" + repr(self.type), + "dtype=" + self.dtype.__name__, ]) if self.type in ('oper', 'super'): out += ", isherm=" + str(self.isherm) @@ -559,7 +532,7 @@ def __repr__(self): # so we simply return the informal __str__ representation instead.) return self.__str__() - def __call__(self, other): + def __call__(self, other: Qobj) -> Qobj: """ Acts this Qobj on another Qobj either by left-multiplication, or by vectorization and devectorization, as @@ -572,7 +545,7 @@ def __call__(self, other): if self.issuper: if other.isket: other = other.proj() - return vector_to_operator(self @ operator_to_vector(other)) + return qutip.vector_to_operator(self @ qutip.operator_to_vector(other)) return self.__matmul__(other) def __getstate__(self): @@ -614,14 +587,14 @@ def _repr_latex_(self): data += r'\end{array}\right)$$' return self._str_header() + data - def __and__(self, other): + def __and__(self, other: Qobj) -> Qobj: """ Syntax shortcut for tensor: A & B ==> tensor(A, B) """ - return tensor(self, other) + return qutip.tensor(self, other) - def dag(self): + def dag(self) -> Qobj: """Get the Hermitian adjoint of the quantum object.""" if self._isherm: return self.copy() @@ -631,7 +604,7 @@ def dag(self): isunitary=self._isunitary, copy=False) - def conj(self): + def conj(self) -> Qobj: """Get the element-wise conjugation of the quantum object.""" return Qobj(_data.conj(self._data), dims=self._dims, @@ -639,7 +612,7 @@ def conj(self): isunitary=self._isunitary, copy=False) - def trans(self): + def trans(self) -> Qobj: """Get the matrix transpose of the quantum operator. Returns @@ -653,7 +626,7 @@ def trans(self): isunitary=self._isunitary, copy=False) - def dual_chan(self): + def dual_chan(self) -> Qobj: """Dual channel of quantum object representing a completely positive map. """ @@ -661,16 +634,20 @@ def dual_chan(self): # is only valid for completely positive maps. if not self.iscp: raise ValueError("Dual channels are only implemented for CP maps.") - J = to_choi(self) + J = qutip.to_choi(self) tensor_idxs = enumerate_flat(J.dims) - J_dual = tensor_swap(J, *( + J_dual = qutip.tensor_swap(J, *( list(zip(tensor_idxs[0][1], tensor_idxs[0][0])) + list(zip(tensor_idxs[1][1], tensor_idxs[1][0])) )).trans() J_dual.superrep = 'choi' return J_dual - def norm(self, norm=None, kwargs=None): + def norm( + self, + norm: Literal["l2", "max", "fro", "tr", "one"] = None, + kwargs: dict[str, Any] = None + ) -> float: """ Norm of a quantum object. @@ -708,7 +685,7 @@ def norm(self, norm=None, kwargs=None): kwargs = kwargs or {} return _NORM_FUNCTION_LOOKUP[norm](self._data, **kwargs) - def proj(self): + def proj(self) -> Qobj: """Form the projector from a given ket or bra vector. Parameters @@ -730,7 +707,7 @@ def proj(self): isherm=True, copy=False) - def tr(self): + def tr(self) -> complex: """Trace of a quantum object. Returns @@ -742,11 +719,11 @@ def tr(self): out = _data.trace(self._data) # This ensures that trace can return something that is not a number such # as a `tensorflow.Tensor` in qutip-tensorflow. - return out.real if (self.isherm - and hasattr(out, "real") - ) else out + if settings.core["auto_real_casting"] and self.isherm: + out = out.real + return out - def purity(self): + def purity(self) -> complex: """Calculate purity of a quantum object. Returns @@ -763,7 +740,11 @@ def purity(self): return _data.norm.l2(self._data)**2 return _data.trace(_data.matmul(self._data, self._data)).real - def full(self, order='C', squeeze=False): + def full( + self, + order: Literal['C', 'F'] = 'C', + squeeze: bool = False + ) -> np.ndarray: """Dense array from quantum object. Parameters @@ -781,7 +762,7 @@ def full(self, order='C', squeeze=False): out = np.asarray(self.data.to_array(), order=order) return out.squeeze() if squeeze else out - def data_as(self, format=None, copy=True): + def data_as(self, format: str = None, copy: bool = True) -> Any: """Matrix from quantum object. Parameters @@ -801,7 +782,7 @@ def data_as(self, format=None, copy=True): """ return _data.extract(self._data, format, copy) - def diag(self): + def diag(self) -> np.ndarray: """Diagonal elements of quantum object. Returns @@ -812,12 +793,11 @@ def diag(self): """ # TODO: add a `diagonal` method to the data layer? out = _data.to(_data.CSR, self.data).as_scipy().diagonal() - if np.any(np.imag(out) > settings.core['atol']) or not self.isherm: - return out - else: - return np.real(out) + if settings.core["auto_real_casting"] and self.isherm: + out = np.real(out) + return out - def expm(self, dtype=_data.Dense): + def expm(self, dtype: LayerType = None) -> Qobj: """Matrix exponential of quantum operator. Input operator must be square. @@ -825,9 +805,7 @@ def expm(self, dtype=_data.Dense): Parameters ---------- dtype : type - The data-layer type that should be output. As the matrix - exponential is almost dense, this defaults to outputting dense - matrices. + The data-layer type that should be output. Returns ------- @@ -841,12 +819,14 @@ def expm(self, dtype=_data.Dense): """ if not self._dims.issquare: raise TypeError("expm is only valid for square operators") + if dtype is None and isinstance(self.data, (_data.CSR, _data.Dia)): + dtype = _data.Dense return Qobj(_data.expm(self._data, dtype=dtype), dims=self._dims, isherm=self._isherm, copy=False) - def logm(self): + def logm(self) -> Qobj: """Matrix logarithm of quantum operator. Input operator must be square. @@ -868,7 +848,7 @@ def logm(self): isherm=self._isherm, copy=False) - def check_herm(self): + def check_herm(self) -> bool: """Check if the quantum object is hermitian. Returns @@ -879,7 +859,12 @@ def check_herm(self): self._isherm = None return self.isherm - def sqrtm(self, sparse=False, tol=0, maxiter=100000): + def sqrtm( + self, + sparse: bool = False, + tol: float = 0, + maxiter: int = 100000 + ) -> Qobj: """ Sqrt of a quantum operator. Operator must be square. @@ -909,26 +894,11 @@ def sqrtm(self, sparse=False, tol=0, maxiter=100000): """ if self._dims[0] != self._dims[1]: raise TypeError('sqrt only valid on square matrices') - if isinstance(self.data, _data.CSR) and sparse: - evals, evecs = _data.eigs_csr(self.data, - isherm=self._isherm, - tol=tol, maxiter=maxiter) - elif isinstance(self.data, _data.CSR): - evals, evecs = _data.eigs(_data.to(_data.Dense, self.data), - isherm=self._isherm) - else: - evals, evecs = _data.eigs(self.data, isherm=self._isherm) - - dV = _data.diag([np.sqrt(evals, dtype=complex)], 0) - if self.isherm: - spDv = _data.matmul(dV, evecs.conj().transpose()) - else: - spDv = _data.matmul(dV, _data.inv(evecs)) - return Qobj(_data.matmul(evecs, spDv), + return Qobj(_data.sqrtm(self._data), dims=self._dims, copy=False) - def cosm(self): + def cosm(self) -> Qobj: """Cosine of a quantum operator. Operator must be square. @@ -952,7 +922,7 @@ def cosm(self): raise TypeError('invalid operand for matrix cosine') return 0.5 * ((1j * self).expm() + (-1j * self).expm()) - def sinm(self): + def sinm(self) -> Qobj: """Sine of a quantum operator. Operator must be square. @@ -975,7 +945,7 @@ def sinm(self): raise TypeError('invalid operand for matrix sine') return -0.5j * ((1j * self).expm() - (-1j * self).expm()) - def inv(self, sparse=False): + def inv(self, sparse: bool = False) -> Qobj: """Matrix inverse of a quantum operator Operator must be square. @@ -1001,7 +971,12 @@ def inv(self, sparse=False): dims=[self._dims[1], self._dims[0]], copy=False) - def unit(self, inplace=False, norm=None, kwargs=None): + def unit( + self, + inplace: bool = False, + norm: Literal["l2", "max", "fro", "tr", "one"] = None, + kwargs: dict[str, Any] = None + ) -> Qobj: """ Operator or state normalized to unity. Uses norm from Qobj.norm(). @@ -1020,19 +995,19 @@ def unit(self, inplace=False, norm=None, kwargs=None): obj : :class:`.Qobj` Normalized quantum object. Will be the `self` object if in place. """ - norm = self.norm(norm=norm, kwargs=kwargs) + norm_ = self.norm(norm=norm, kwargs=kwargs) if inplace: - self.data = _data.mul(self.data, 1 / norm) - self._isherm = self._isherm if norm.imag == 0 else None + self.data = _data.mul(self.data, 1 / norm_) + self._isherm = self._isherm if norm_.imag == 0 else None self._isunitary = (self._isunitary - if abs(norm) - 1 < settings.core['atol'] + if abs(norm_) - 1 < settings.core['atol'] else None) out = self else: - out = self / norm + out = self / norm_ return out - def ptrace(self, sel, dtype=None): + def ptrace(self, sel: int | list[int], dtype: LayerType = None) -> Qobj: """ Take the partial trace of the quantum object leaving the selected subspaces. In other words, trace out all subspaces which are _not_ @@ -1081,10 +1056,10 @@ def ptrace(self, sel, dtype=None): sel = [sel] if self.isoperket: dims = self.dims[0] - data = vector_to_operator(self).data + data = qutip.vector_to_operator(self).data elif self.isoperbra: dims = self.dims[1] - data = vector_to_operator(self.dag()).data + data = qutip.vector_to_operator(self.dag()).data elif self.issuper or self.isoper: dims = self.dims data = self.data @@ -1098,12 +1073,12 @@ def ptrace(self, sel, dtype=None): new_dims = [[dims[x] for x in sel]] * 2 if sel else None out = Qobj(new_data, dims=new_dims, copy=False) if self.isoperket: - return operator_to_vector(out) + return qutip.operator_to_vector(out) if self.isoperbra: - return operator_to_vector(out).dag() + return qutip.operator_to_vector(out).dag() return out - def contract(self, inplace=False): + def contract(self, inplace: bool = False) -> Qobj: """ Contract subspaces of the tensor structure which are 1D. Not defined on superoperators. If all dimensions are scalar, a Qobj of dimension @@ -1154,7 +1129,7 @@ def contract(self, inplace=False): return self return Qobj(self.data.copy(), dims=dims, copy=False) - def permute(self, order): + def permute(self, order: list) -> Qobj: """ Permute the tensor structure of a quantum object. For example, @@ -1242,7 +1217,7 @@ def permute(self, order): superrep=self.superrep, copy=False) - def tidyup(self, atol=None): + def tidyup(self, atol: float = None) -> Qobj: """ Removes small elements from the quantum object. @@ -1261,7 +1236,11 @@ def tidyup(self, atol=None): self.data = _data.tidyup(self.data, atol) return self - def transform(self, inpt, inverse=False): + def transform( + self, + inpt: list[Qobj] | ArrayLike, + inverse: bool = False + ) -> Qobj: """Basis transform defined by input array. Input array can be a ``matrix`` defining the transformation, @@ -1318,7 +1297,7 @@ def transform(self, inpt, inverse=False): superrep=self.superrep, copy=False) - def trunc_neg(self, method="clip"): + def trunc_neg(self, method: Literal["clip", "sgs"] = "clip") -> Qobj: """Truncates negative eigenvalues and renormalizes. Returns a new Qobj by removing the negative eigenvalues @@ -1375,7 +1354,7 @@ def trunc_neg(self, method="clip"): out_data = _data.mul(out_data, 1/_data.norm.trace(out_data)) return Qobj(out_data, dims=self._dims, isherm=True, copy=False) - def matrix_element(self, bra, ket): + def matrix_element(self, bra: Qobj, ket: Qobj) -> Qobj: """Calculates a matrix element. Gives the matrix element for the quantum object sandwiched between a @@ -1410,7 +1389,7 @@ def matrix_element(self, bra, ket): right = right.adjoint() return _data.inner_op(left, op, right, bra.isket) - def overlap(self, other): + def overlap(self, other: Qobj) -> complex: """ Overlap between two state vectors or two operators. @@ -1463,8 +1442,15 @@ def overlap(self, other): out = np.conj(out) return out - def eigenstates(self, sparse=False, sort='low', eigvals=0, - tol=0, maxiter=100000, phase_fix=None): + def eigenstates( + self, + sparse: bool = False, + sort: Literal["low", "high"] = 'low', + eigvals: int = 0, + tol: float = 0, + maxiter: int = 100000, + phase_fix: int = None + ) -> tuple[np.ndarray, list[Qobj]]: """Eigenstates and eigenenergies. Eigenstates and eigenenergies are defined for operators and @@ -1536,8 +1522,14 @@ def eigenstates(self, sparse=False, sort='low', eigvals=0, for ket in ekets]) return evals, ekets / norms * phase - def eigenenergies(self, sparse=False, sort='low', - eigvals=0, tol=0, maxiter=100000): + def eigenenergies( + self, + sparse: bool = False, + sort: Literal["low", "high"] = 'low', + eigvals: int = 0, + tol: float = 0, + maxiter: int = 100000, + ) -> np.ndarray: """Eigenenergies of a quantum object. Eigenenergies (eigenvalues) are defined for operators or superoperators @@ -1584,7 +1576,13 @@ def eigenenergies(self, sparse=False, sort='low', vecs=False, isherm=self._isherm, sort=sort, eigvals=eigvals) - def groundstate(self, sparse=False, tol=0, maxiter=100000, safe=True): + def groundstate( + self, + sparse: bool = False, + tol: float = 0, + maxiter: int = 100000, + safe: bool = True + ) -> tuple[float, Qobj]: """Ground state Eigenvalue and Eigenvector. Defined for quantum operators or superoperators only. @@ -1625,7 +1623,7 @@ def groundstate(self, sparse=False, tol=0, maxiter=100000, safe=True): warnings.warn("Ground state may be degenerate.", UserWarning) return evals[0], evecs[0] - def dnorm(self, B=None): + def dnorm(self, B: Qobj = None) -> float: """Calculates the diamond norm, or the diamond distance to another operator. @@ -1642,14 +1640,14 @@ def dnorm(self, B=None): from this operator to B. """ - return mts.dnorm(self, B) + return qutip.dnorm(self, B) @property - def ishp(self): + def ishp(self) -> bool: # FIXME: this needs to be cached in the same ways as isherm. if self.type in ["super", "oper"]: try: - J = to_choi(self) + J = qutip.to_choi(self) return J.isherm except: return False @@ -1657,14 +1655,14 @@ def ishp(self): return False @property - def iscp(self): + def iscp(self) -> bool: # FIXME: this needs to be cached in the same ways as isherm. if self.type not in ["super", "oper"]: return False # We can test with either Choi or chi, since the basis # transformation between them is unitary and hence preserves # the CP and TP conditions. - J = self if self.superrep in ('choi', 'chi') else to_choi(self) + J = self if self.superrep in ('choi', 'chi') else qutip.to_choi(self) # If J isn't hermitian, then that could indicate either that J is not # normal, or is normal, but has complex eigenvalues. In either case, # it makes no sense to then demand that the eigenvalues be @@ -1672,7 +1670,7 @@ def iscp(self): return J.isherm and np.all(J.eigenenergies() >= -settings.core['atol']) @property - def istp(self): + def istp(self) -> bool: if self.type not in ['super', 'oper']: return False # Normalize to a super of type choi or chi. @@ -1682,7 +1680,7 @@ def istp(self): if self.issuper and self.superrep in ('choi', 'chi'): qobj = self else: - qobj = to_choi(self) + qobj = qutip.to_choi(self) # Possibly collapse dims. if any([len(index) > 1 for super_index in qobj.dims @@ -1700,22 +1698,22 @@ def istp(self): atol=settings.core['atol']) @property - def iscptp(self): + def iscptp(self) -> bool: if not (self.issuper or self.isoper): return False reps = ('choi', 'chi') - q_oper = to_choi(self) if self.superrep not in reps else self + q_oper = qutip.to_choi(self) if self.superrep not in reps else self return q_oper.iscp and q_oper.istp @property - def isherm(self): + def isherm(self) -> bool: if self._isherm is not None: return self._isherm self._isherm = _data.isherm(self._data) return self._isherm @isherm.setter - def isherm(self, isherm): + def isherm(self, isherm: bool): self._isherm = isherm def _calculate_isunitary(self): @@ -1730,49 +1728,49 @@ def _calculate_isunitary(self): tol=settings.core['atol']) @property - def isunitary(self): + def isunitary(self) -> bool: if self._isunitary is not None: return self._isunitary self._isunitary = self._calculate_isunitary() return self._isunitary @property - def shape(self): + def shape(self) -> tuple[int, int]: """Return the shape of the Qobj data.""" return self._data.shape @property - def isoper(self): + def isoper(self) -> bool: """Indicates if the Qobj represents an operator.""" return self._dims.type in ['oper', 'scalar'] @property - def isbra(self): + def isbra(self) -> bool: """Indicates if the Qobj represents a bra state.""" return self._dims.type in ['bra', 'scalar'] @property - def isket(self): + def isket(self) -> bool: """Indicates if the Qobj represents a ket state.""" return self._dims.type in ['ket', 'scalar'] @property - def issuper(self): + def issuper(self) -> bool: """Indicates if the Qobj represents a superoperator.""" return self._dims.type == 'super' @property - def isoperket(self): + def isoperket(self) -> bool: """Indicates if the Qobj represents a operator-ket state.""" return self._dims.type == 'operator-ket' @property - def isoperbra(self): + def isoperbra(self) -> bool: """Indicates if the Qobj represents a operator-bra state.""" return self._dims.type == 'operator-bra' -def ptrace(Q, sel): +def ptrace(Q: Qobj, sel: int | list[int]) -> Qobj: """ Partial trace of the Qobj with selected components remaining. @@ -1797,11 +1795,3 @@ def ptrace(Q, sel): if not isinstance(Q, Qobj): raise TypeError("Input is not a quantum object") return Q.ptrace(sel) - - -# TRAILING IMPORTS -# We do a few imports here to avoid circular dependencies. -from qutip.core.superop_reps import to_choi -from qutip.core.superoperator import vector_to_operator, operator_to_vector -from qutip.core.tensor import tensor_swap, tensor -from qutip.core import metrics as mts diff --git a/qutip/core/states.py b/qutip/core/states.py index 2075dfe7b6..99b305b5e3 100644 --- a/qutip/core/states.py +++ b/qutip/core/states.py @@ -9,10 +9,10 @@ import itertools import numbers import warnings - +from collections.abc import Iterator +from typing import Literal import numpy as np import scipy.sparse as sp -import itertools from . import data as _data from .qobj import Qobj @@ -20,6 +20,7 @@ from .tensor import tensor from .dimensions import Space from .. import settings +from ..typing import SpaceLike, LayerType def _promote_to_zero_list(arg, length): @@ -60,7 +61,13 @@ def _to_space(dimensions): return Space([dimensions]) -def basis(dimensions, n=None, offset=None, *, dtype=None): +def basis( + dimensions: SpaceLike, + n: int | list[int] = None, + offset: int | list[int] = None, + *, + dtype: LayerType = None, +) -> Qobj: """Generates the vector representation of a Fock state. Parameters @@ -162,7 +169,7 @@ def basis(dimensions, n=None, offset=None, *, dtype=None): copy=False) -def qutrit_basis(*, dtype=None): +def qutrit_basis(*, dtype: LayerType = None) -> list[Qobj]: """Basis states for a three level system (qutrit) dtype : type or str, optional @@ -176,8 +183,7 @@ def qutrit_basis(*, dtype=None): """ dtype = dtype or settings.core["default_dtype"] or _data.Dense - out = np.empty((3,), dtype=object) - out[:] = [ + out = [ basis(3, 0, dtype=dtype), basis(3, 1, dtype=dtype), basis(3, 2, dtype=dtype), @@ -188,7 +194,14 @@ def qutrit_basis(*, dtype=None): _COHERENT_METHODS = ('operator', 'analytic') -def coherent(N, alpha, offset=0, method=None, *, dtype=None): +def coherent( + N: int, + alpha: float, + offset: int = 0, + method: str = None, + *, + dtype: LayerType = None, +) -> Qobj: """Generates a coherent state with eigenvalue alpha. Constructed using displacement operator on vacuum state. @@ -255,7 +268,7 @@ def coherent(N, alpha, offset=0, method=None, *, dtype=None): "The method 'operator' does not support offset != 0. Please" " select another method or set the offset to zero." ) - return (displace(N, alpha, dtype=dtype) * basis(N, 0)).to(dtype) + return (displace(N, alpha, dtype=dtype) @ basis(N, 0)).to(dtype) elif method == "analytic": sqrtn = np.sqrt(np.arange(offset, offset+N, dtype=complex)) @@ -273,7 +286,14 @@ def coherent(N, alpha, offset=0, method=None, *, dtype=None): ) -def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=None): +def coherent_dm( + N: int, + alpha: float, + offset: int = 0, + method: str = None, + *, + dtype: LayerType = None, +) -> Qobj: """Density matrix representation of a coherent state. Constructed via outer product of :func:`coherent` @@ -332,7 +352,13 @@ def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=None): ).proj().to(dtype) -def fock_dm(dimensions, n=None, offset=None, *, dtype=None): +def fock_dm( + dimensions: int | list[int] | Space, + n: int | list[int] = None, + offset: int | list[int] = None, + *, + dtype: LayerType = None, +) -> Qobj: """Density matrix representation of a Fock state Constructed via outer product of :func:`basis`. @@ -377,7 +403,13 @@ def fock_dm(dimensions, n=None, offset=None, *, dtype=None): return basis(dimensions, n, offset=offset, dtype=dtype).proj().to(dtype) -def fock(dimensions, n=None, offset=None, *, dtype=None): +def fock( + dimensions: SpaceLike, + n: int | list[int] = None, + offset: int | list[int] = None, + *, + dtype: LayerType = None, +) -> Qobj: """Bosonic Fock (number) state. Same as :func:`basis`. @@ -420,7 +452,13 @@ def fock(dimensions, n=None, offset=None, *, dtype=None): return basis(dimensions, n, offset=offset, dtype=dtype) -def thermal_dm(N, n, method='operator', *, dtype=None): +def thermal_dm( + N: int, + n: float, + method: Literal['operator', 'analytic'] = 'operator', + *, + dtype: LayerType = None, +) -> Qobj: """Density matrix for a thermal state of n particles Parameters @@ -499,7 +537,11 @@ def thermal_dm(N, n, method='operator', *, dtype=None): return out -def maximally_mixed_dm(dimensions, *, dtype=None): +def maximally_mixed_dm( + dimensions: SpaceLike, + *, + dtype: LayerType = None +) -> Qobj: """ Returns the maximally mixed density matrix for a Hilbert space of dimension N. @@ -528,7 +570,7 @@ def maximally_mixed_dm(dimensions, *, dtype=None): isherm=True, isunitary=(N == 1), copy=False) -def ket2dm(Q): +def ket2dm(Q: Qobj) -> Qobj: """ Takes input ket or bra vector and returns density matrix formed by outer product. This is completely identical to calling ``Q.proj()``. @@ -560,7 +602,14 @@ def ket2dm(Q): raise TypeError("Input is not a ket or bra vector.") -def projection(dimensions, n, m, offset=None, *, dtype=None): +def projection( + dimensions: int | list[int], + n: int | list[int], + m: int | list[int], + offset: int | list[int] = None, + *, + dtype: LayerType = None, +) -> Qobj: r""" The projection operator that projects state :math:`\lvert m\rangle` on state :math:`\lvert n\rangle`. @@ -571,7 +620,7 @@ def projection(dimensions, n, m, offset=None, *, dtype=None): Number of basis states in Hilbert space. If a list, then the resultant object will be a tensor product over spaces with those dimensions. - n, m : float + n, m : int The number states in the projection. offset : int, default: 0 @@ -594,7 +643,7 @@ def projection(dimensions, n, m, offset=None, *, dtype=None): ).to(dtype) -def qstate(string, *, dtype=None): +def qstate(string: str, *, dtype: LayerType = None) -> Qobj: r"""Creates a tensor product for a set of qubits in either the 'up' :math:`\lvert0\rangle` or 'down' :math:`\lvert1\rangle` state. @@ -652,14 +701,19 @@ def qstate(string, *, dtype=None): } -def _character_to_qudit(x): +def _character_to_qudit(x: int | str) -> int: """ Converts a character representing a one-particle state into int. """ return _qubit_dict[x] if x in _qubit_dict else int(x) -def ket(seq, dim=2, *, dtype=None): +def ket( + seq: list[int | str] | str, + dim: int | list[int] = 2, + *, + dtype: LayerType = None, +) -> Qobj: """ Produces a multiparticle ket state for a list or string, where each element stands for state of the respective particle. @@ -743,7 +797,12 @@ def ket(seq, dim=2, *, dtype=None): return basis(dim, ns, dtype=dtype) -def bra(seq, dim=2, *, dtype=None): +def bra( + seq: list[int | str] | str, + dim: int | list[int] = 2, + *, + dtype: LayerType = None, +) -> Qobj: """ Produces a multiparticle bra state for a list or string, where each element stands for state of the respective particle. @@ -801,7 +860,10 @@ def bra(seq, dim=2, *, dtype=None): return ket(seq, dim=dim, dtype=dtype).dag() -def state_number_enumerate(dims, excitations=None): +def state_number_enumerate( + dims: list[int], + excitations: int = None +) -> Iterator[tuple]: """ An iterator that enumerates all the state number tuples (quantum numbers of the form (n1, n2, n3, ...)) for a system with dimensions given by dims. @@ -817,7 +879,7 @@ def state_number_enumerate(dims, excitations=None): Parameters ---------- - dims : list or array + dims : list The quantum state dimensions array, as it would appear in a Qobj. excitations : integer, optional @@ -859,7 +921,10 @@ def state_number_enumerate(dims, excitations=None): state = state[:idx] + (state[idx]+1, 0) + state[idx+2:] -def state_number_index(dims, state): +def state_number_index( + dims: list[int], + state: list[int], +) -> int: """ Return the index of a quantum state corresponding to state, given a system with dimensions given by dims. @@ -871,7 +936,7 @@ def state_number_index(dims, state): Parameters ---------- - dims : list or array + dims : list The quantum state dimensions array, as it would appear in a Qobj. state : list @@ -887,7 +952,10 @@ def state_number_index(dims, state): return np.ravel_multi_index(state, dims) -def state_index_number(dims, index): +def state_index_number( + dims: list[int], + index: int, +) -> tuple: """ Return a quantum number representation given a state index, for a system of composite structure defined by dims. @@ -915,7 +983,12 @@ def state_index_number(dims, index): return np.unravel_index(index, dims) -def state_number_qobj(dims, state, *, dtype=None): +def state_number_qobj( + dims: SpaceLike, + state: int | list[int] = None, + *, + dtype: LayerType = None, +) -> Qobj: """ Return a Qobj representation of a quantum state specified by the state array `state`. @@ -961,7 +1034,13 @@ def state_number_qobj(dims, state, *, dtype=None): return basis(dims, state, dtype=dtype) -def phase_basis(N, m, phi0=0, *, dtype=None): +def phase_basis( + N: int, + m: int, + phi0: float = 0, + *, + dtype: LayerType = None, +) -> Qobj: """ Basis vector for the mth phase of the Pegg-Barnett phase operator. @@ -999,7 +1078,7 @@ def phase_basis(N, m, phi0=0, *, dtype=None): return Qobj(data, dims=[[N], [1]], copy=False).to(dtype) -def zero_ket(dimensions, *, dtype=None): +def zero_ket(dimensions: SpaceLike, *, dtype: LayerType = None) -> Qobj: """ Creates the zero ket vector with shape Nx1 and dimensions `dims`. @@ -1026,7 +1105,13 @@ def zero_ket(dimensions, *, dtype=None): dims=[dimensions, dimensions.scalar_like()], copy=False) -def spin_state(j, m, type='ket', *, dtype=None): +def spin_state( + j: float, + m: float, + type: Literal["ket", "bra", "dm"] = "ket", + *, + dtype: LayerType = None, +) -> Qobj: r"""Generates the spin state :math:`\lvert j, m\rangle`, i.e. the eigenstate of the spin-j Sz operator with eigenvalue m. @@ -1035,7 +1120,7 @@ def spin_state(j, m, type='ket', *, dtype=None): j : float The spin of the state (). - m : int + m : float Eigenvalue of the spin-j Sz operator. type : string {'ket', 'bra', 'dm'}, default: 'ket' @@ -1063,7 +1148,14 @@ def spin_state(j, m, type='ket', *, dtype=None): raise ValueError(f"Invalid value keyword argument type='{type}'") -def spin_coherent(j, theta, phi, type='ket', *, dtype=None): +def spin_coherent( + j: float, + theta: float, + phi: float, + type: Literal["ket", "bra", "dm"] = "ket", + *, + dtype: LayerType = None, +) -> Qobj: r"""Generate the coherent spin state :math:`\lvert \theta, \phi\rangle`. Parameters @@ -1112,7 +1204,11 @@ def spin_coherent(j, theta, phi, type='ket', *, dtype=None): '11': np.sqrt(0.5) * (basis([2, 2], [0, 1]) - basis([2, 2], [1, 0])), } -def bell_state(state='00', *, dtype=None): +def bell_state( + state: Literal["00", "01", "10", "11"] = "00", + *, + dtype: LayerType = None, +) -> Qobj: r""" Returns the selected Bell state: @@ -1148,7 +1244,7 @@ def bell_state(state='00', *, dtype=None): return _BELL_STATES[state].copy().to(dtype) -def singlet_state(*, dtype=None): +def singlet_state(*, dtype: LayerType = None) -> Qobj: r""" Returns the two particle singlet-state: @@ -1173,7 +1269,7 @@ def singlet_state(*, dtype=None): return bell_state('11').to(dtype) -def triplet_states(*, dtype=None): +def triplet_states(*, dtype: LayerType = None) -> list[Qobj]: r""" Returns a list of the two particle triplet-states: @@ -1207,7 +1303,7 @@ def triplet_states(*, dtype=None): ] -def w_state(N_qubit, *, dtype=None): +def w_state(N_qubit: int, *, dtype: LayerType = None) -> Qobj: """ Returns the N-qubit W-state: ``[ |100..0> + |010..0> + |001..0> + ... |000..1> ] / sqrt(n)`` @@ -1236,7 +1332,7 @@ def w_state(N_qubit, *, dtype=None): return (np.sqrt(1 / N_qubit) * state).to(dtype) -def ghz_state(N_qubit, *, dtype=None): +def ghz_state(N_qubit: int, *, dtype: LayerType = None) -> Qobj: """ Returns the N-qubit GHZ-state: ``[ |00...00> + |11...11> ] / sqrt(2)`` diff --git a/qutip/core/subsystem_apply.py b/qutip/core/subsystem_apply.py index e7bea30a2f..123b6ea2d8 100644 --- a/qutip/core/subsystem_apply.py +++ b/qutip/core/subsystem_apply.py @@ -13,7 +13,12 @@ from . import data as _data -def subsystem_apply(state, channel, mask, reference=False): +def subsystem_apply( + state: Qobj, + channel: Qobj, + mask: list[bool], + reference: bool=False +)-> Qobj: """ Returns the result of applying the propagator `channel` to the subsystems indicated in `mask`, which comprise the density operator diff --git a/qutip/core/superop_reps.py b/qutip/core/superop_reps.py index 9a5d7222c3..4413cfafb5 100644 --- a/qutip/core/superop_reps.py +++ b/qutip/core/superop_reps.py @@ -81,7 +81,7 @@ def _nq(dims): return nq -def isqubitdims(dims): +def isqubitdims(dims: list[list[int]] | list[list[list[int]]]) -> bool: """ Checks whether all entries in a dims list are integer powers of 2. @@ -138,7 +138,7 @@ def _choi_to_kraus(q_oper, tol=1e-9): # Individual conversions from Kraus operators are public because the output # list of Kraus operators is not itself a quantum object. -def kraus_to_choi(kraus_ops): +def kraus_to_choi(kraus_ops: list[Qobj]) -> Qobj: r""" Convert a list of Kraus operators into Choi representation of the channel. @@ -176,7 +176,7 @@ def kraus_to_choi(kraus_ops): return Qobj(choi_array, choi_dims, superrep="choi", copy=False) -def kraus_to_super(kraus_list): +def kraus_to_super(kraus_list: list[Qobj]) -> Qobj: """ Convert a list of Kraus operators to a superoperator. @@ -346,7 +346,7 @@ def _choi_to_stinespring(q_oper, threshold=1e-10): return A, B -def to_choi(q_oper): +def to_choi(q_oper: Qobj) -> Qobj: """ Converts a Qobj representing a quantum map to the Choi representation, such that the trace of the returned operator is equal to the dimension @@ -389,7 +389,7 @@ def to_choi(q_oper): ) -def to_chi(q_oper): +def to_chi(q_oper: Qobj) -> Qobj: """ Converts a Qobj representing a quantum map to a representation as a chi (process) matrix in the Pauli basis, such that the trace of the returned @@ -432,7 +432,7 @@ def to_chi(q_oper): ) -def to_super(q_oper): +def to_super(q_oper: Qobj) -> Qobj: """ Converts a Qobj representing a quantum map to the supermatrix (Liouville) representation. @@ -476,7 +476,7 @@ def to_super(q_oper): ) -def to_kraus(q_oper, tol=1e-9): +def to_kraus(q_oper: Qobj, tol: float=1e-9) -> list[Qobj]: """ Converts a Qobj representing a quantum map to a list of quantum objects, each representing an operator in the Kraus decomposition of the given map. @@ -515,7 +515,7 @@ def to_kraus(q_oper, tol=1e-9): ) -def to_stinespring(q_oper, threshold=1e-10): +def to_stinespring(q_oper: Qobj, threshold: float=1e-10) -> tuple[Qobj, Qobj]: r""" Converts a Qobj representing a quantum map :math:`\Lambda` to a pair of partial isometries ``A`` and ``B`` such that diff --git a/qutip/core/superoperator.py b/qutip/core/superoperator.py index 68488a8cd9..706c2826b7 100644 --- a/qutip/core/superoperator.py +++ b/qutip/core/superoperator.py @@ -5,10 +5,11 @@ ] import functools - +from typing import TypeVar, overload import numpy as np from .qobj import Qobj +from .cy.qobjevo import QobjEvo from . import data as _data from .dimensions import Compound, SuperSpace, Space @@ -30,7 +31,28 @@ def out(qobj): return out -def liouvillian(H=None, c_ops=None, data_only=False, chi=None): +@overload +def liouvillian( + H: Qobj, + c_ops: list[Qobj], + data_only: bool, + chi: list[float] +) -> Qobj: ... + +@overload +def liouvillian( + H: Qobj | QobjEvo, + c_ops: list[Qobj | QobjEvo], + data_only: bool, + chi: list[float] +) -> QobjEvo: ... + +def liouvillian( + H: Qobj | QobjEvo = None, + c_ops: list[Qobj | QobjEvo] = None, + data_only: bool = False, + chi: list[float] = None, +) -> Qobj | QobjEvo: """Assembles the Liouvillian superoperator from a Hamiltonian and a ``list`` of collapse operators. @@ -118,7 +140,28 @@ def liouvillian(H=None, c_ops=None, data_only=False, chi=None): copy=False) -def lindblad_dissipator(a, b=None, data_only=False, chi=None): +@overload +def lindblad_dissipator( + a: Qobj, + b: Qobj, + data_only: bool, + chi: list[float] +) -> Qobj: ... + +@overload +def lindblad_dissipator( + a: Qobj | QobjEvo, + b: Qobj | QobjEvo, + data_only: bool, + chi: list[float] +) -> QobjEvo: ... + +def lindblad_dissipator( + a: Qobj | QobjEvo, + b: Qobj | QobjEvo = None, + data_only: bool = False, + chi: list[float] = None, +) -> Qobj | QobjEvo: """ Lindblad dissipator (generalized) for a single pair of collapse operators (a, b), or for a single collapse operator (a) when b is not specified: @@ -180,7 +223,7 @@ def lindblad_dissipator(a, b=None, data_only=False, chi=None): @_map_over_compound_operators -def operator_to_vector(op): +def operator_to_vector(op: Qobj) -> Qobj: """ Create a vector representation given a quantum operator in matrix form. The passed object should have a ``Qobj.type`` of 'oper' or 'super'; this @@ -208,7 +251,7 @@ def operator_to_vector(op): @_map_over_compound_operators -def vector_to_operator(op): +def vector_to_operator(op: Qobj) -> Qobj: """ Create a matrix representation given a quantum operator in vector form. The passed object should have a ``Qobj.type`` of 'operator-ket'; this @@ -236,7 +279,10 @@ def vector_to_operator(op): copy=False) -def stack_columns(matrix): +QobjOrArray = TypeVar("QobjOrArray", Qobj, np.ndarray) + + +def stack_columns(matrix: QobjOrArray) -> QobjOrArray: """ Stack the columns in a data-layer type, useful for converting an operator into a superoperator representation. @@ -250,7 +296,10 @@ def stack_columns(matrix): return _data.column_stack(matrix) -def unstack_columns(vector, shape=None): +def unstack_columns( + vector: QobjOrArray, + shape: tuple[int, int] = None, +) -> QobjOrArray: """ Unstack the columns in a data-layer type back into a 2D shape, useful for converting an operator in vector form back into a regular operator. If @@ -295,8 +344,11 @@ def stacked_index(size, row, col): return row + size*col +AnyQobj = TypeVar("AnyQobj", Qobj, QobjEvo) + + @_map_over_compound_operators -def spost(A): +def spost(A: AnyQobj) -> AnyQobj: """ Superoperator formed from post-multiplication by operator A @@ -321,7 +373,7 @@ def spost(A): @_map_over_compound_operators -def spre(A): +def spre(A: AnyQobj) -> AnyQobj: """Superoperator formed from pre-multiplication by operator A. Parameters @@ -352,6 +404,12 @@ def _drop_projected_dims(dims): return [d for d in dims if d != 1] +@overload +def sprepost(A: Qobj, B: Qobj) -> Qobj: ... + +@overload +def sprepost(A: Qobj | QobjEvo, B: Qobj | QobjEvo) -> QobjEvo: ... + def sprepost(A, B): """ Superoperator formed from pre-multiplication by A and post-multiplication @@ -468,7 +526,7 @@ def _to_tensor_of_super(q_oper): return q_oper.permute(perm_idxs) -def reshuffle(q_oper): +def reshuffle(q_oper: Qobj) -> Qobj: """ Column-reshuffles a super operator or a operator-ket Qobj. """ diff --git a/qutip/core/tensor.py b/qutip/core/tensor.py index b9fde85fb9..013fb75589 100644 --- a/qutip/core/tensor.py +++ b/qutip/core/tensor.py @@ -9,14 +9,19 @@ import numpy as np from functools import partial +from typing import TypeVar, overload + from .operators import qeye from .qobj import Qobj +from .cy.qobjevo import QobjEvo from .superoperator import operator_to_vector, reshuffle from .dimensions import ( flatten, enumerate_flat, unflatten, deep_remove, dims_to_tensor_shape, dims_idxs_to_tensor_idxs ) from . import data as _data +from .. import settings +from ..typing import LayerType class _reverse_partial_tensor: @@ -28,7 +33,13 @@ def __call__(self, op): return tensor(op, self.right) -def tensor(*args): +@overload +def tensor(*args: Qobj) -> Qobj: ... + +@overload +def tensor(*args: Qobj | QobjEvo) -> QobjEvo: ... + +def tensor(*args: Qobj | QobjEvo) -> Qobj | QobjEvo: """Calculates the tensor product of input operators. Parameters @@ -105,7 +116,13 @@ def tensor(*args): copy=False) -def super_tensor(*args): +@overload +def super_tensor(*args: Qobj) -> Qobj: ... + +@overload +def super_tensor(*args: Qobj | QobjEvo) -> QobjEvo: ... + +def super_tensor(*args: Qobj | QobjEvo) -> Qobj | QobjEvo: """ Calculate the tensor product of input superoperators, by tensoring together the underlying Hilbert spaces on which each vectorized operator acts. @@ -173,6 +190,12 @@ def _isbralike(q): return q.isbra or q.isoperbra +@overload +def composite(*args: Qobj) -> Qobj: ... + +@overload +def composite(*args: Qobj | QobjEvo) -> QobjEvo: ... + def composite(*args): """ Given two or more operators, kets or bras, returns the Qobj @@ -244,13 +267,18 @@ def _tensor_contract_dense(arr, *pairs): return arr -def tensor_swap(q_oper, *pairs): +def tensor_swap(q_oper: Qobj, *pairs: tuple[int, int]) -> Qobj: """Transposes one or more pairs of indices of a Qobj. - Note that this uses dense representations and thus - should *not* be used for very large Qobjs. + + .. note:: + + Note that this uses dense representations and thus + should *not* be used for very large Qobjs. Parameters ---------- + q_oper : Qobj + Operator to swap dims. pairs : tuple One or more tuples ``(i, j)`` indicating that the @@ -283,10 +311,13 @@ def tensor_swap(q_oper, *pairs): return Qobj(data, dims=dims, superrep=q_oper.superrep, copy=False) -def tensor_contract(qobj, *pairs): +def tensor_contract(qobj: Qobj, *pairs: tuple[int, int]) -> Qobj: """Contracts a qobj along one or more index pairs. - Note that this uses dense representations and thus - should *not* be used for very large Qobjs. + + .. note:: + + Note that this uses dense representations and thus + should *not* be used for very large Qobjs. Parameters ---------- @@ -413,7 +444,15 @@ def _targets_to_list(targets, oper=None, N=None): return targets -def expand_operator(oper, dims, targets): +QobjOrQobjEvo = TypeVar("QobjOrQobjEvo", Qobj, QobjEvo) + + +def expand_operator( + oper: QobjOrQobjEvo, + dims: list[int], + targets: int, + dtype: LayerType = None +) -> QobjOrQobjEvo: """ Expand an operator to one that acts on a system with desired dimensions. e.g. @@ -435,13 +474,19 @@ def expand_operator(oper, dims, targets): E.g ``[2, 3, 2, 3, 4]``. targets : int or list of int The indices of subspace that are acted on. + dtype : str, optional + Data type of the output :class:`.Qobj`. By default it uses the data + type specified in settings. If no data type is specified + in settings it uses the ``CSR`` data type. Returns ------- expanded_oper : :class:`.Qobj` - The expanded operator acting on a system with desired dimension. + The expanded operator acting on a system with the desired dimension. """ from .operators import identity + dtype = dtype or settings.core["default_dtype"] or _data.CSR + oper = oper.to(dtype) N = len(dims) targets = _targets_to_list(targets, oper=oper, N=N) _check_oper_dims(oper, dims=dims, targets=targets) diff --git a/qutip/entropy.py b/qutip/entropy.py index 6f3f5cc7a6..7a5d8a7dc0 100644 --- a/qutip/entropy.py +++ b/qutip/entropy.py @@ -4,7 +4,8 @@ from numpy import conj, e, inf, imag, inner, real, sort, sqrt from numpy.lib.scimath import log, log2 -from . import (ptrace, ket2dm, tensor, sigmay, partial_transpose, +from .partial_transpose import partial_transpose +from . import (ptrace, tensor, sigmay, ket2dm, expand_operator) from .core import data as _data @@ -129,6 +130,8 @@ def negativity(rho, subsys, method='tracenorm', logarithmic=False): Experimental. """ + if rho.isket or rho.isbra: + rho = ket2dm(rho) mask = [idx == subsys for idx, n in enumerate(rho.dims[0])] rho_pt = partial_transpose(rho, mask) @@ -140,6 +143,7 @@ def negativity(rho, subsys, method='tracenorm', logarithmic=False): else: raise ValueError("Unknown method %s" % method) +# Return the negativity value (or its logarithm if specified) if logarithmic: return log2(2 * N + 1) else: diff --git a/qutip/measurement.py b/qutip/measurement.py index 5b29c40836..570c55bc2e 100644 --- a/qutip/measurement.py +++ b/qutip/measurement.py @@ -239,7 +239,7 @@ def measurement_statistics_observable(state, op, tol=None): if probability >= tol: probabilities.append(probability) - values.append(np.mean(eigenvalues[present_group])) + values.append(np.mean(eigenvalues[np.array(present_group)])) projectors.append(projector) present_group = [] diff --git a/qutip/piqs/piqs.py b/qutip/piqs/piqs.py index 73b3995581..c06ec2b145 100644 --- a/qutip/piqs/piqs.py +++ b/qutip/piqs/piqs.py @@ -268,7 +268,7 @@ def dicke_function_trace(f, rho): normalized_block = block / dj eigenvals_block = eigvalsh(normalized_block) for val in eigenvals_block: - eigenvals_degeneracy.append(val) + eigenvals_degeneracy.append(abs(val)) deg.append(dj) eigenvalue = np.array(eigenvals_degeneracy) diff --git a/qutip/random_objects.py b/qutip/random_objects.py index 545c9639b8..83f1082476 100644 --- a/qutip/random_objects.py +++ b/qutip/random_objects.py @@ -19,12 +19,14 @@ from numpy.random import Generator, SeedSequence, default_rng import scipy.linalg import scipy.sparse as sp +from typing import Literal, Sequence from . import (Qobj, create, destroy, jmat, basis, to_super, to_choi, to_chi, to_kraus, to_stinespring) from .core import data as _data -from .core.dimensions import flatten, Dimensions +from .core.dimensions import flatten, Dimensions, Space from . import settings +from .typing import SpaceLike, LayerType _RAND = default_rng() @@ -52,6 +54,8 @@ def _implicit_tensor_dimensions(dimensions, superoper=False): dimensions : list Dimension list in the form required by ``Qobj`` creation. """ + if isinstance(dimensions, Space): + dimensions = dimensions.as_list() if not isinstance(dimensions, list): dimensions = [dimensions] flat = flatten(dimensions) @@ -210,8 +214,15 @@ def _merge_shuffle_blocks(blocks, generator): return _data.create(matrix, copy=False) -def rand_herm(dimensions, density=0.30, distribution="fill", *, - eigenvalues=(), seed=None, dtype=None): +def rand_herm( + dimensions: SpaceLike, + density: float = 0.30, + distribution: Literal["fill", "pos_def", "eigen"] = "fill", + *, + eigenvalues: Sequence[float] = (), + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """Creates a random sparse Hermitian quantum object. Parameters @@ -335,8 +346,14 @@ def _rand_herm_dense(N, density, pos_def, generator): return _data.create(M) -def rand_unitary(dimensions, density=1, distribution="haar", *, - seed=None, dtype=None): +def rand_unitary( + dimensions: SpaceLike, + density: float = 1, + distribution: Literal["haar", "exp"] = "haar", + *, + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): r"""Creates a random sparse unitary quantum object. Parameters @@ -438,8 +455,14 @@ def _rand_unitary_haar(N, generator): return Q * Lambda -def rand_ket(dimensions, density=1, distribution="haar", *, - seed=None, dtype=None): +def rand_ket( + dimensions: SpaceLike, + density: float = 1, + distribution: Literal["haar", "fill"] = "haar", + *, + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """Creates a random ket vector. Parameters @@ -501,9 +524,17 @@ def rand_ket(dimensions, density=1, distribution="haar", *, return ket.to(dtype) -def rand_dm(dimensions, density=0.75, distribution="ginibre", *, - eigenvalues=(), rank=None, seed=None, - dtype=None): +def rand_dm( + dimensions: SpaceLike, + density: float = 0.75, + distribution: Literal["ginibre", "hs", "pure", "eigen", "uniform"] \ + = "ginibre", + *, + eigenvalues: Sequence[float] = (), + rank: int = None, + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): r"""Creates a random density matrix of the desired dimensions. Parameters @@ -631,7 +662,12 @@ def _rand_dm_ginibre(N, rank, generator): return rho -def rand_kraus_map(dimensions, *, seed=None, dtype=None): +def rand_kraus_map( + dimensions: SpaceLike, + *, + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """ Creates a random CPTP map on an N-dimensional Hilbert space in Kraus form. @@ -671,7 +707,13 @@ def rand_kraus_map(dimensions, *, seed=None, dtype=None): return [Qobj(x, dims=dims, copy=False).to(dtype) for x in oper_list] -def rand_super(dimensions, *, superrep="super", seed=None, dtype=None): +def rand_super( + dimensions: SpaceLike, + *, + superrep: Literal["super", "choi", "chi"] = "super", + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """ Returns a randomly drawn superoperator acting on operators acting on N dimensions. @@ -712,9 +754,15 @@ def rand_super(dimensions, *, superrep="super", seed=None, dtype=None): return out -def rand_super_bcsz(dimensions, enforce_tp=True, rank=None, *, - superrep="super", seed=None, - dtype=None): +def rand_super_bcsz( + dimensions: SpaceLike, + enforce_tp: bool = True, + rank: int = None, + *, + superrep: Literal["super", "choi", "chi"] = "super", + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """ Returns a random superoperator drawn from the Bruzda et al ensemble for CPTP maps [BCSZ08]_. Note that due to @@ -816,8 +864,14 @@ def rand_super_bcsz(dimensions, enforce_tp=True, rank=None, *, return out -def rand_stochastic(dimensions, density=0.75, kind='left', - *, seed=None, dtype=None): +def rand_stochastic( + dimensions: SpaceLike, + density: float = 0.75, + kind: Literal["left", "right"] = "left", + *, + seed: int | SeedSequence | Generator = None, + dtype: LayerType = None, +): """Generates a random stochastic matrix. Parameters diff --git a/qutip/settings.py b/qutip/settings.py index 29ee053b75..884c9f3de0 100644 --- a/qutip/settings.py +++ b/qutip/settings.py @@ -34,7 +34,7 @@ def _in_libaries(name): return blas -def available_cpu_count(): +def available_cpu_count() -> int: """ Get the number of cpus. It tries to only get the number available to qutip. @@ -135,19 +135,19 @@ def __init__(self): self._colorblind_safe = False @property - def has_mkl(self): + def has_mkl(self) -> bool: """ Whether qutip found an mkl installation. """ return self.mkl_lib is not None @property - def mkl_lib(self): + def mkl_lib(self) -> str | None: """ Location of the mkl installation. """ if self._mkl_lib == "": self._mkl_lib = _find_mkl() return _find_mkl() @property - def ipython(self): + def ipython(self) -> bool: """ Whether qutip is running in ipython. """ try: __IPYTHON__ @@ -156,7 +156,7 @@ def ipython(self): return False @property - def eigh_unsafe(self): + def eigh_unsafe(self) -> bool: """ Whether `eigh` call is reliable. Some implementation of blas have some issues on some OS. @@ -175,7 +175,7 @@ def eigh_unsafe(self): ) @property - def tmproot(self): + def tmproot(self) -> str: """ Location in which qutip place cython string coefficient folders. The default is "$HOME/.qutip". @@ -184,13 +184,13 @@ def tmproot(self): return self._tmproot @tmproot.setter - def tmproot(self, root): + def tmproot(self, root: str) -> None: if not os.path.exists(root): os.mkdir(root) self._tmproot = root @property - def coeffroot(self): + def coeffroot(self) -> str: """ Location in which qutip save cython string coefficient files. Usually "{qutip.settings.tmproot}/qutip_coeffs_X.X". @@ -199,7 +199,7 @@ def coeffroot(self): return self._coeffroot @coeffroot.setter - def coeffroot(self, root): + def coeffroot(self, root: str) -> None: if not os.path.exists(root): os.mkdir(root) if root not in sys.path: @@ -207,18 +207,18 @@ def coeffroot(self, root): self._coeffroot = root @property - def coeff_write_ok(self): + def coeff_write_ok(self) -> bool: """ Whether qutip has write acces to ``qutip.settings.coeffroot``.""" return os.access(self.coeffroot, os.W_OK) @property - def has_openmp(self): + def _has_openmp(self) -> bool: return False # We keep this as a reminder for when openmp is restored: see Pull #652 # os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @property - def idxint_size(self): + def idxint_size(self) -> int: """ Integer type used by ``CSR`` data. Sparse ``CSR`` matrices can contain at most ``2**idxint_size`` @@ -228,7 +228,7 @@ def idxint_size(self): return data.base.idxint_size @property - def num_cpus(self): + def num_cpus(self) -> int: """ Number of cpu detected. Use the solver options to control the number of cpus used. @@ -241,33 +241,7 @@ def num_cpus(self): return num_cpus @property - def debug(self): - """ - Debug mode for development. - """ - return self._debug - - @debug.setter - def debug(self, value): - self._debug = value - - @property - def log_handler(self): - """ - Define whether log handler should be: - - default: switch based on IPython detection - - stream: set up non-propagating StreamHandler - - basic: call basicConfig - - null: leave logging to the user - """ - return self._log_handler - - @log_handler.setter - def log_handler(self, value): - self._log_handler = value - - @property - def colorblind_safe(self): + def colorblind_safe(self) -> bool: """ Allow for a colorblind mode that uses different colormaps and plotting options by default. @@ -275,10 +249,10 @@ def colorblind_safe(self): return self._colorblind_safe @colorblind_safe.setter - def colorblind_safe(self, value): + def colorblind_safe(self, value: bool) -> None: self._colorblind_safe = value - def __str__(self): + def __str__(self) -> str: lines = ["Qutip settings:"] for attr in self.__dir__(): if not attr.startswith('_') and attr not in ["core", "compile"]: @@ -286,7 +260,7 @@ def __str__(self): lines.append(f" compile: {self.compile.__repr__(full=False)}") return '\n'.join(lines) - def __repr__(self): + def __repr__(self) -> str: return self.__str__() diff --git a/qutip/simdiag.py b/qutip/simdiag.py index 63d2a6e107..9277a38ccc 100644 --- a/qutip/simdiag.py +++ b/qutip/simdiag.py @@ -79,15 +79,15 @@ def simdiag(ops, evals: bool = True, *, A = ops[jj] shape = A.shape if shape[0] != shape[1]: - raise TypeError('Matricies must be square.') + raise TypeError('Matrices must be square.') if shape[0] != N: raise TypeError('All matrices. must be the same shape') if not A.isherm: - raise TypeError('Matricies must be Hermitian') + raise TypeError('Matrices must be Hermitian') for kk in range(jj): B = ops[kk] if (A * B - B * A).norm() / (A * B).norm() > tol: - raise TypeError('Matricies must commute.') + raise TypeError('Matrices must commute.') # TODO: rewrite using Data object eigvals, eigvecs = _data.eigs(ops[0].data, True, True) diff --git a/qutip/solver/__init__.py b/qutip/solver/__init__.py index 6714cae70b..9725b3855c 100644 --- a/qutip/solver/__init__.py +++ b/qutip/solver/__init__.py @@ -1,4 +1,5 @@ from .result import * +from .multitrajresult import * from .options import * import qutip.solver.integrator as integrator from .integrator import IntegratorException diff --git a/qutip/solver/_feedback.py b/qutip/solver/_feedback.py index 79d82259cc..2d80d9ff16 100644 --- a/qutip/solver/_feedback.py +++ b/qutip/solver/_feedback.py @@ -133,18 +133,18 @@ def __repr__(self): return "CollapseFeedback" -def _default_weiner(t): +def _default_wiener(t): return np.zeros(1) -class _WeinerFeedback(_Feedback): - code = "WeinerFeedback" +class _WienerFeedback(_Feedback): + code = "WienerFeedback" def __init__(self, default=None): - self.default = default or _default_weiner + self.default = default or _default_wiener def check_consistency(self, dims): pass def __repr__(self): - return "WeinerFeedback" + return "WienerFeedback" diff --git a/qutip/solver/brmesolve.py b/qutip/solver/brmesolve.py index 4b54a70e8c..7fc5982041 100644 --- a/qutip/solver/brmesolve.py +++ b/qutip/solver/brmesolve.py @@ -5,7 +5,9 @@ __all__ = ['brmesolve', 'BRSolver'] +from typing import Any import numpy as np +from numpy.typing import ArrayLike import inspect from time import time from .. import Qobj, QobjEvo, coefficient, Coefficient @@ -15,10 +17,21 @@ from .solver_base import Solver, _solver_deprecation from .options import _SolverOptions from ._feedback import _QobjFeedback, _DataFeedback - - -def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), - args=None, sec_cutoff=0.1, options=None, **kwargs): +from ..typing import EopsLike, QobjEvoLike, CoefficientLike + + +def brmesolve( + H: QobjEvoLike, + psi0: Qobj, + tlist: ArrayLike, + a_ops: list[tuple[QobjEvoLike, CoefficientLike]] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + c_ops: list[QobjEvoLike] = None, + args: dict[str, Any] = None, + sec_cutoff: float = 0.1, + options: dict[str, Any] = None, + **kwargs +): """ Solves for the dynamics of a system using the Bloch-Redfield master equation, given an input Hamiltonian, Hermitian bath-coupling terms and @@ -42,7 +55,7 @@ def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), Nested list of system operators that couple to the environment, and the corresponding bath spectra. - a_op : :obj:`.Qobj`, :obj:`.QobjEvo` + a_op : :obj:`.Qobj`, :obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format The operator coupling to the environment. Must be hermitian. spectra : :obj:`.Coefficient`, str, func @@ -62,7 +75,7 @@ def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), a_ops = [ (a+a.dag(), ('w>0', args={"w": 0})), (QobjEvo(a+a.dag()), 'w > exp(-t)'), - (QobjEvo([b+b.dag(), lambda t: ...]), lambda w: ...)), + ([[b+b.dag(), lambda t: ...]], lambda w: ...)), (c+c.dag(), SpectraCoefficient(coefficient(array, tlist=ws))), ] @@ -74,11 +87,11 @@ def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), the operator: :obj:`.Qobj` vs :obj:`.QobjEvo` instead of the type of the spectra. - e_ops : list of :obj:`.Qobj` / callback function, optional - Single operator or list of operators for which to evaluate - expectation values or callable or list of callable. + e_ops : list, dict, :obj:`.Qobj` or callback function, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. Callable signature must be, `f(t: float, state: Qobj)`. - See :func:`expect` for more detail of operator expectation c_ops : list of (:obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format), optional List of collapse operators. @@ -102,7 +115,8 @@ def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), On `None` the states will be saved if no expectation operators are given. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -146,6 +160,7 @@ def brmesolve(H, psi0, tlist, a_ops=(), e_ops=(), c_ops=(), c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops] new_a_ops = [] + a_ops = a_ops or [] for (a_op, spectra) in a_ops: aop = QobjEvo(a_op, args=args, tlist=tlist) if isinstance(spectra, str): @@ -236,7 +251,15 @@ class BRSolver(Solver): } _avail_integrators = {} - def __init__(self, H, a_ops, c_ops=None, sec_cutoff=0.1, *, options=None): + def __init__( + self, + H: Qobj | QobjEvo, + a_ops: list[tuple[Qobj | QobjEvo, Coefficient]], + c_ops: Qobj | QobjEvo | list[Qobj | QobjEvo] = None, + sec_cutoff: float = 0.1, + *, + options: dict[str, Any] = None, + ): _time_start = time() self.rhs = None diff --git a/qutip/solver/floquet.py b/qutip/solver/floquet.py index d5d1d20b60..6deb8e499f 100644 --- a/qutip/solver/floquet.py +++ b/qutip/solver/floquet.py @@ -6,7 +6,9 @@ "FMESolver", ] +from typing import Any, overload, TypeVar, Literal, Callable import numpy as np +from numpy.typing import ArrayLike from qutip.core import data as _data from qutip.core.data import Data from qutip import Qobj, QobjEvo @@ -17,6 +19,7 @@ from .result import Result from time import time from ..ui.progressbar import progress_bars +from ..typing import EopsLike, QobjEvoLike, QobjOrData class FloquetBasis: @@ -37,13 +40,14 @@ class FloquetBasis: def __init__( self, - H, - T, - args=None, - options=None, - sparse=False, - sort=True, - precompute=None, + H: QobjEvoLike, + T: float, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + sparse: bool = False, + sort: bool = True, + precompute: ArrayLike = None, + times: ArrayLike = None, ): """ Parameters @@ -73,10 +77,14 @@ def __init__( for later use when computing modes and states. Default is ``linspace(0, T, 101)`` corresponding to the default integration steps used for the floquet tensor computation. + + times : ArrayLike [None] + Time for array """ if not T > 0: raise ValueError("The period need to be a positive number.") self.T = T + H = QobjEvo(H, args=args, tlist=times) if precompute is not None: tlist = np.unique(np.atleast_1d(precompute) % self.T) memoize = len(tlist) @@ -88,12 +96,9 @@ def __init__( # Default computation tlist = np.linspace(0, T, 101) memoize = 101 - if ( - isinstance(H, QobjEvo) - and (H._feedback_functions or H._solver_only_feedback) - ): + if H._feedback_functions or H._solver_only_feedback: raise NotImplementedError("FloquetBasis does not support feedback") - self.U = Propagator(H, args=args, options=options, memoize=memoize) + self.U = Propagator(H, options=options, memoize=memoize) for t in tlist: # Do the evolution by steps to save the intermediate results. self.U(t) @@ -120,7 +125,13 @@ def _as_ketlist(self, kets_mat): for ket in _data.split_columns(kets_mat) ] - def mode(self, t, data=False): + @overload + def mode(self, t: float, data: Literal[False]) -> Qobj: ... + + @overload + def mode(self, t: float, data: Literal[True]) -> Data: ... + + def mode(self, t: float, data: bool = False): """ Calculate the Floquet modes at time ``t``. @@ -151,7 +162,13 @@ def mode(self, t, data=False): else: return self._as_ketlist(kets_mat) - def state(self, t, data=False): + @overload + def state(self, t: float, data: Literal[False]) -> Qobj: ... + + @overload + def state(self, t: float, data: Literal[True]) -> Data: ... + + def state(self, t: float, data: bool = False): """ Evaluate the floquet states at time t. @@ -180,7 +197,11 @@ def state(self, t, data=False): else: return self._as_ketlist(states_mat) - def from_floquet_basis(self, floquet_basis, t=0): + def from_floquet_basis( + self, + floquet_basis: QobjOrData, + t: float = 0 + ) -> QobjOrData: """ Transform a ket or density matrix from the Floquet basis at time ``t`` to the lab basis. @@ -218,7 +239,11 @@ def from_floquet_basis(self, floquet_basis, t=0): return Qobj(lab_basis, dims=dims) return lab_basis - def to_floquet_basis(self, lab_basis, t=0): + def to_floquet_basis( + self, + lab_basis: QobjOrData, + t: float = 0 + ) -> QobjOrData: """ Transform a ket or density matrix in the lab basis to the Floquet basis at time ``t``. @@ -444,7 +469,15 @@ def _floquet_master_equation_tensor(A): return _data.add(R, S) -def floquet_tensor(H, c_ops, spectra_cb, T=0, w_th=0.0, kmax=5, nT=100): +def floquet_tensor( + H: QobjEvo | FloquetBasis, + c_ops: list[Qobj], + spectra_cb: list[Callable[[float], complex]], + T: float = 0, + w_th: float = 0.0, + kmax: int = 5, + nT: int = 100, +) -> Qobj: """ Construct a tensor that represents the master equation in the floquet basis. @@ -456,10 +489,6 @@ def floquet_tensor(H, c_ops, spectra_cb, T=0, w_th=0.0, kmax=5, nT=100): H : :obj:`.QobjEvo`, :obj:`.FloquetBasis` Periodic Hamiltonian a floquet basis system. - T : float, optional - The period of the time-dependence of the hamiltonian. Optional if ``H`` - is a ``FloquetBasis`` object. - c_ops : list of :class:`.Qobj` list of collapse operators. @@ -467,6 +496,10 @@ def floquet_tensor(H, c_ops, spectra_cb, T=0, w_th=0.0, kmax=5, nT=100): List of callback functions that compute the noise power spectrum as a function of frequency for the collapse operators in `c_ops`. + T : float, optional + The period of the time-dependence of the hamiltonian. Optional if ``H`` + is a ``FloquetBasis`` object. + w_th : float, default: 0.0 The temperature in units of frequency. @@ -496,7 +529,15 @@ def floquet_tensor(H, c_ops, spectra_cb, T=0, w_th=0.0, kmax=5, nT=100): return Qobj(r, dims=[dims, dims], superrep="super", copy=False) -def fsesolve(H, psi0, tlist, e_ops=None, T=0.0, args=None, options=None): +def fsesolve( + H: QobjEvoLike | FloquetBasis, + psi0: Qobj, + tlist: ArrayLike, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + T: float = 0.0, + args: dict[str, Any] = None, + options: dict[str, Any] = None, +) -> Result: """ Solve the Schrodinger equation using the Floquet formalism. @@ -513,10 +554,12 @@ def fsesolve(H, psi0, tlist, e_ops=None, T=0.0, args=None, options=None): tlist : *list* / *array* List of times for :math:`t`. - e_ops : list of :class:`.Qobj` / callback function, optional - List of operators for which to evaluate expectation values. If this - list is empty, the state vectors for each time in `tlist` will be - returned instead of expectation values. + e_ops : list or dict of :class:`.Qobj` / callback function, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. + See :func:`~qutip.core.expect.expect` for more detail of operator + expectation. T : float, default=tlist[-1] The period of the time-dependence of the hamiltonian. @@ -535,7 +578,8 @@ def fsesolve(H, psi0, tlist, e_ops=None, T=0.0, args=None, options=None): On `None` the states will be saved if no expectation operators are given. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. Returns ------- @@ -550,7 +594,8 @@ def fsesolve(H, psi0, tlist, e_ops=None, T=0.0, args=None, options=None): T = T or tlist[-1] # `fsesolve` is a fallback from `fmmesolve`, for the later, options # are for the open system evolution. - floquet_basis = FloquetBasis(H, T, args, precompute=tlist) + H = QobjEvo(H, args=args, tlist=tlist, copy=False) + floquet_basis = FloquetBasis(H, T, precompute=tlist) f_coeff = floquet_basis.to_floquet_basis(psi0) result_options = { @@ -568,17 +613,17 @@ def fsesolve(H, psi0, tlist, e_ops=None, T=0.0, args=None, options=None): def fmmesolve( - H, - rho0, - tlist, - c_ops=None, - e_ops=None, - spectra_cb=None, - T=0, - w_th=0.0, - args=None, - options=None, -): + H: QobjEvoLike | FloquetBasis, + rho0: Qobj, + tlist: ArrayLike, + c_ops: list[Qobj] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + spectra_cb: list[Callable[[float], complex]] = None, + T: float = 0.0, + w_th: float = 0.0, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + ) -> "FloquetResult": """ Solve the dynamics for the system using the Floquet-Markov master equation. @@ -600,8 +645,13 @@ def fmmesolve( supported. Fall back on :func:`fsesolve` if not provided. e_ops : list of :class:`.Qobj` / callback function, optional - List of operators for which to evaluate expectation values. - The states are reverted to the lab basis before applying the + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. + See :func:`~qutip.core.expect.expect` for more detail of operator + expectation. + The states are reverted to the lab basis before computing the + expectation values. spectra_cb : list callback functions, default: ``lambda w: (w > 0)`` List of callback functions that compute the noise power spectrum as @@ -638,7 +688,8 @@ def fmmesolve( | Whether or not to store the density matrices in the floquet basis in ``result.floquet_states``. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -685,7 +736,8 @@ def fmmesolve( t_precompute = np.concatenate([tlist, np.linspace(0, T, 101)]) # `fsesolve` is a fallback from `fmmesolve`, for the later, options # are for the open system evolution. - floquet_basis = FloquetBasis(H, T, args, precompute=t_precompute) + H = QobjEvo(H, args=args, tlist=tlist, copy=False) + floquet_basis = FloquetBasis(H, T, precompute=t_precompute) if not w_th and args: w_th = args.get("w_th", 0.0) @@ -771,7 +823,14 @@ class FMESolver(MESolver): } def __init__( - self, floquet_basis, a_ops, w_th=0.0, *, kmax=5, nT=None, options=None + self, + floquet_basis: FloquetBasis, + a_ops: list[tuple[Qobj, Callable[[float], float]]], + w_th: float = 0.0, + *, + kmax: int = 5, + nT: int = None, + options: dict[str, Any] = None, ): self.options = options if isinstance(floquet_basis, FloquetBasis): @@ -816,7 +875,7 @@ def _argument(self, args): if args: raise ValueError("FMESolver cannot update arguments") - def start(self, state0, t0, *, floquet=False): + def start(self, state0: Qobj, t0: float, *, floquet: bool = False) -> None: """ Set the initial state and time for a step evolution. ``options`` for the evolutions are read at this step. @@ -837,7 +896,14 @@ def start(self, state0, t0, *, floquet=False): state0 = self.floquet_basis.to_floquet_basis(state0, t0) super().start(state0, t0) - def step(self, t, *, args=None, copy=True, floquet=False): + def step( + self, + t: float, + *, + args: dict[str, Any] = None, + copy: bool = True, + floquet: bool = False, + ) -> Qobj: """ Evolve the state to ``t`` and return the state as a :obj:`.Qobj`. @@ -871,7 +937,15 @@ def step(self, t, *, args=None, copy=True, floquet=False): state = state.copy() return state - def run(self, state0, tlist, *, floquet=False, args=None, e_ops=None): + def run( + self, + state0: Qobj, + tlist: ArrayLike, + *, + floquet: bool = False, + args: dict[str, Any] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + ) -> FloquetResult: """ Calculate the evolution of the quantum system. @@ -894,13 +968,13 @@ def run(self, state0, tlist, *, floquet=False, args=None, e_ops=None): floquet : bool, optional {False} Whether the initial state in the floquet basis or laboratory basis. - args : dict, optional {None} + args : dict, optional Not supported - e_ops : list {None} - List of Qobj, QobjEvo or callable to compute the expectation - values. Function[s] must have the signature - f(t : float, state : Qobj) -> expect. + e_ops : list or dict, optional + List or dict of Qobj, QobjEvo or callable to compute the + expectation values. Function[s] must have the signature + ``f(t : float, state : Qobj) -> expect``. Returns ------- diff --git a/qutip/solver/heom/bofin_solvers.py b/qutip/solver/heom/bofin_solvers.py index 1061c1c492..a6ecfc321d 100644 --- a/qutip/solver/heom/bofin_solvers.py +++ b/qutip/solver/heom/bofin_solvers.py @@ -498,7 +498,8 @@ def heomsolve( - | store_ados : bool | Whether or not to store the HEOM ADOs. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error diff --git a/qutip/solver/integrator/explicit_rk.pyx b/qutip/solver/integrator/explicit_rk.pyx index a643de4a44..3417fede39 100644 --- a/qutip/solver/integrator/explicit_rk.pyx +++ b/qutip/solver/integrator/explicit_rk.pyx @@ -199,6 +199,15 @@ cdef class Explicit_RungeKutta: self.b_factor_np = np.empty(self.rk_extra_step, dtype=np.float64) self.b_factor = self.b_factor_np + def __reduce__(self): + """ + Helper for pickle to serialize the object + """ + return (self.__class__, ( + self.qevo, self.rtol, self.atol, self.max_numsteps, self.first_step, + self.min_step, self.max_step, self.interpolate, self.method + )) + cpdef void set_initial_value(self, Data y0, double t) except *: """ Set the initial state and time of the integration. diff --git a/qutip/solver/integrator/krylov.py b/qutip/solver/integrator/krylov.py index 5559020982..7e2d9e1d79 100644 --- a/qutip/solver/integrator/krylov.py +++ b/qutip/solver/integrator/krylov.py @@ -51,7 +51,7 @@ def _prepare(self): krylov_tridiag, krylov_basis = \ self._lanczos_algorithm(rand_ket(N).data) if ( - krylov_tridiag.shape[0] < self.options["krylov_dim"] + krylov_tridiag.shape[0] < krylov_dim or krylov_tridiag.shape[0] == N ): self._max_step = np.inf @@ -138,20 +138,22 @@ def krylov_error(t): self._compute_psi(t, *reduced_state) ) / self.options["atol"]) - dt = self.options["min_step"] + # Under 0 will cause an infinite loop in the while loop bellow. + dt = max(self.options["min_step"], 1e-14) + max_step = max(self.options["max_step"], dt) err = krylov_error(dt) if err > 0: - ValueError( + raise ValueError( f"With the krylov dim of {self.options['krylov_dim']}, the " f"error with the minimum step {dt} is {err}, higher than the " f"desired tolerance of {self.options['atol']}." ) - while krylov_error(dt * 10) < 0 and dt < self.options["max_step"]: + while krylov_error(dt * 10) < 0 and dt < max_step: dt *= 10 - if dt > self.options["max_step"]: - return self.options["max_step"] + if dt > max_step: + return max_step sol = root_scalar(f=krylov_error, bracket=[dt, dt * 10], method="brentq", xtol=self.options['atol']) diff --git a/qutip/solver/krylovsolve.py b/qutip/solver/krylovsolve.py index 6143aae06c..9b94ec3773 100644 --- a/qutip/solver/krylovsolve.py +++ b/qutip/solver/krylovsolve.py @@ -1,12 +1,24 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['krylovsolve'] -from .. import QobjEvo +from .. import QobjEvo, Qobj from .sesolve import SESolver +from .result import Result +from numpy.typing import ArrayLike +from typing import Any, Callable def krylovsolve( - H, psi0, tlist, krylov_dim, e_ops=None, args=None, options=None -): + H: Qobj, + psi0: Qobj, + tlist: ArrayLike, + krylov_dim: int, + e_ops: dict[Any, Qobj | QobjEvo | Callable[[float, Qobj], Any]] = None, + args: dict[str, Any] = None, + options: dict[str, Any] = None, +) -> Result: """ Schrodinger equation evolution of a state vector for time independent Hamiltonians using Krylov method. @@ -61,7 +73,8 @@ def krylovsolve( On `None` the states will be saved if no expectation operators are given. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error diff --git a/qutip/solver/mcsolve.py b/qutip/solver/mcsolve.py index 3c100906d4..0f0fd23b99 100644 --- a/qutip/solver/mcsolve.py +++ b/qutip/solver/mcsolve.py @@ -1,19 +1,40 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['mcsolve', "MCSolver"] import numpy as np -from ..core import QobjEvo, spre, spost, Qobj, unstack_columns -from .multitraj import MultiTrajSolver, _MTSystem +from numpy.typing import ArrayLike +from numpy.random import SeedSequence +from time import time +from typing import Any +import warnings + +from ..core import QobjEvo, spre, spost, Qobj, unstack_columns, qzero_like +from ..typing import QobjEvoLike, EopsLike +from .multitraj import MultiTrajSolver, _MultiTrajRHS, _InitialConditions from .solver_base import Solver, Integrator, _solver_deprecation -from .result import McResult, McTrajectoryResult, McResultImprovedSampling +from .multitrajresult import McResult from .mesolve import mesolve, MESolver from ._feedback import _QobjFeedback, _DataFeedback, _CollapseFeedback import qutip.core.data as _data -from time import time -def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, - args=None, options=None, seeds=None, target_tol=None, timeout=None, - **kwargs): +def mcsolve( + H: QobjEvoLike, + state: Qobj, + tlist: ArrayLike, + c_ops: QobjEvoLike | list[QobjEvoLike] = (), + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + ntraj: int = 500, + *, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + seeds: int | SeedSequence | list[int | SeedSequence] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + **kwargs, +) -> McResult: r""" Monte Carlo evolution of a state vector :math:`|\psi \rangle` for a given Hamiltonian and sets of collapse operators. Options for the @@ -28,7 +49,7 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, operators are to be treated deterministically. state : :class:`.Qobj` - Initial state vector. + Initial state vector or density matrix. tlist : array_like Times at which results are recorded. @@ -39,10 +60,10 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, even if ``H`` is a superoperator. If none are given, the solver will defer to ``sesolve`` or ``mesolve``. - e_ops : list, optional - A ``list`` of operator as Qobj, QobjEvo or callable with signature of - (t, state: Qobj) for calculating expectation values. When no ``e_ops`` - are given, the solver will default to save the states. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. ntraj : int, default: 500 Maximum number of trajectories to run. Can be cut short if a time limit @@ -132,11 +153,16 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, Object storing all results from the simulation. Which results is saved depends on the presence of ``e_ops`` and the options used. ``collapse`` and ``photocurrent`` is available to Monte Carlo simulation results. + If the initial condition is mixed, the result has additional attributes + ``initial_states`` and ``ntraj_per_initial_state``. Notes ----- The simulation will end when the first end condition is reached between - ``ntraj``, ``timeout`` and ``target_tol``. + ``ntraj``, ``timeout`` and ``target_tol``. If the initial condition is + mixed, ``target_tol`` is not supported. If the initial condition is mixed, + and the end condition is not ``ntraj``, the results returned by this + function should be considered invalid. """ options = _solver_deprecation(kwargs, options, "mc") H = QobjEvo(H, args=args, tlist=tlist) @@ -155,6 +181,11 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, return mesolve(H, state, tlist, e_ops=e_ops, args=args, options=options) + if not isinstance(state, Qobj): + raise TypeError( + "The initial state for mcsolve must be a Qobj. Use the MCSolver " + "class for more options of specifying mixed initial states." + ) if isinstance(ntraj, (list, tuple)): raise TypeError( "ntraj must be an integer. " @@ -167,27 +198,19 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, return result -class _MCSystem(_MTSystem): +class _MCRHS(_MultiTrajRHS): """ Container for the operators of the solver. """ - def __init__(self, rhs, c_ops, n_ops): - self.rhs = rhs + def __init__(self, H, c_ops, n_ops): + self.rhs = H self.c_ops = c_ops self.n_ops = n_ops - self._collapse_key = "" def __call__(self): return self.rhs - def __getattr__(self, attr): - if attr == "rhs": - raise AttributeError - if hasattr(self.rhs, attr): - return getattr(self.rhs, attr) - raise AttributeError - def arguments(self, args): self.rhs.arguments(args) for c_op in self.c_ops: @@ -409,7 +432,7 @@ class MCSolver(MultiTrajSolver): Options for the evolution. """ name = "mcsolve" - _trajectory_resultclass = McTrajectoryResult + _resultclass = McResult _mc_integrator_class = MCIntegrator solver_options = { "progress_bar": "text", @@ -429,7 +452,13 @@ class MCSolver(MultiTrajSolver): "improved_sampling": False, } - def __init__(self, H, c_ops, *, options=None): + def __init__( + self, + H: Qobj | QobjEvo, + c_ops: Qobj | QobjEvo | list[Qobj | QobjEvo], + *, + options: dict[str, Any] = None, + ): _time_start = time() if isinstance(c_ops, (Qobj, QobjEvo)): @@ -456,13 +485,15 @@ def __init__(self, H, c_ops, *, options=None): self._num_collapse = len(self._c_ops) self.options = options - system = _MCSystem(rhs, self._c_ops, self._n_ops) + system = _MCRHS(rhs, self._c_ops, self._n_ops) super().__init__(system, options=options) def _restore_state(self, data, *, copy=True): """ Retore the Qobj state from its data. """ + # Duplicated from the Solver class, but removed the check for the + # normalize_output option, since MCSolver doesn't have that option. if self._state_metadata['dims'] == self.rhs._dims[1]: state = Qobj(unstack_columns(data), **self._state_metadata, copy=False) @@ -480,58 +511,204 @@ def _initialize_stats(self): }) return stats - def _initialize_run_one_traj(self, seed, state, tlist, e_ops, - no_jump=False, jump_prob_floor=0.0): - result = self._trajectory_resultclass(e_ops, self.options) - generator = self._get_generator(seed) - self._integrator.set_state(tlist[0], state, generator, - no_jump=no_jump, - jump_prob_floor=jump_prob_floor) - result.add(tlist[0], self._restore_state(state, copy=False)) - return result + def _no_jump_simulation(self, state, tlist, e_ops, seed=None): + """ + Simulates the no-jump trajectory from the initial state `state0`. + Returns a tuple containing the seed, the `TrajectoryResult` describing + this trajectory, and the trajectory's probability. + Note that a seed for the integrator may be provided, but is expected to + be ignored in the no-jump simulation. + """ + seed, no_jump_result = self._run_one_traj( + seed, state, tlist, e_ops, no_jump=True) + _, state, _ = self._integrator.get_state(copy=False) + no_jump_prob = self._integrator._prob_func(state) - def _run_one_traj(self, seed, state, tlist, e_ops, no_jump=False, - jump_prob_floor=0.0): + no_jump_result.add_absolute_weight(no_jump_prob) + + return seed, no_jump_result, no_jump_prob + + def _run_one_traj(self, seed, state, tlist, e_ops, **integrator_kwargs): """ Run one trajectory and return the result. """ - result = self._initialize_run_one_traj(seed, state, tlist, e_ops, - no_jump=no_jump, - jump_prob_floor=jump_prob_floor) - seed, result = self._integrate_one_traj(seed, tlist, result) + jump_prob_floor = integrator_kwargs.get('jump_prob_floor', 0) + if jump_prob_floor >= 1 - self.options["norm_tol"]: + # The no-jump probability is one, but we are asked to generate + # a trajectory with at least one jump. + # This can happen when a user uses "improved sampling" with a dark + # initial state, or a mixed initial state containing a dark state. + # Our best option is to return a trajectory result containing only + # zeroes. This also ensures that the final multi-trajectory + # result will contain the requested number of trajectories. + zero = qzero_like(self._restore_state(state, copy=False)) + result = self._trajectory_resultclass(e_ops, self.options) + result.collapse = [] + result.add_relative_weight(0) + for t in tlist: + result.add(t, zero) + return seed, result + + seed, result = super()._run_one_traj(seed, state, tlist, e_ops, + **integrator_kwargs) + if jump_prob_floor > 0: + result.add_relative_weight(1 - jump_prob_floor) result.collapse = self._integrator.collapses return seed, result - def run(self, state, tlist, ntraj=1, *, - args=None, e_ops=(), timeout=None, target_tol=None, seeds=None): + def run( + self, + state: Qobj | list[tuple[Qobj, float]], + tlist: ArrayLike, + ntraj: int | list[int] = None, + *, + args: dict[str, Any] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + seeds: int | SeedSequence | list[int | SeedSequence] = None, + ) -> McResult: """ Do the evolution of the Quantum system. - See the overridden method for further details. The modification - here is to sample the no-jump trajectory first. Then, the no-jump - probability is used as a lower-bound for random numbers in future - monte carlo runs + + For a ``state`` at time ``tlist[0]``, do up to ``ntraj`` simulations of + the Monte-Carlo evolution. For each time in ``tlist`` store the state + and/or expectation values in a :class:`.MultiTrajResult`. The evolution + method and stored results are determined by ``options``. + + Parameters + ---------- + state : {:obj:`.Qobj`, list of (:obj:`.Qobj`, float)} + Initial state of the evolution. May be either a pure state or a + statistical ensemble. An ensemble can be provided either as a + density matrix, or as a list of tuples. In the latter case, the + first element of each tuple is a pure state, and the second element + is its weight, i.e., a number between 0 and 1 describing the + fraction of the ensemble in that state. The sum of all weights must + be one. + + tlist : list of double + Time for which to save the results (state and/or expect) of the + evolution. The first element of the list is the initial time of the + evolution. Time in the list must be in increasing order, but does + not need to be uniformly distributed. + + ntraj : {int, list of int} + Number of trajectories to add. If the initial state is pure, this + must be single number. If the initial state is a mixed ensemble, + specified as a list of pure states, this parameter may also be a + list of numbers with the same number of entries. It then specifies + the number of trajectories for each pure state. If the initial + state is mixed and this parameter is a single number, it specifies + the total number of trajectories, which are distributed over the + initial ensemble automatically. + + args : dict, optional + Change the ``args`` of the rhs for the evolution. + + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to + evaluate expectation values. Operator can be Qobj, QobjEvo or + callables with the signature `f(t: float, state: Qobj) -> Any`. + + timeout : float, optional + Maximum time in seconds for the trajectories to run. Once this time + is reached, the simulation will end even if the number + of trajectories is less than ``ntraj``. The map function, set in + options, can interupt the running trajectory or wait for it to + finish. Set to an arbitrary high number to disable. + + target_tol : {float, tuple, list}, optional + Target tolerance of the evolution. The evolution will compute + trajectories until the error on the expectation values is lower + than this tolerance. The maximum number of trajectories employed is + given by ``ntraj``. The error is computed using jackknife + resampling. ``target_tol`` can be an absolute tolerance or a pair + of absolute and relative tolerance, in that order. Lastly, it can + be a list of pairs of (atol, rtol) for each e_ops. + + seeds : {int, SeedSequence, list}, optional + Seed or list of seeds for each trajectories. + + Returns + ------- + results : :class:`.McResult` + Results of the evolution. States and/or expect will be saved. You + can control the saved data in the options. If the initial condition + is mixed, the result has additional attributes ``initial_states`` + and ``ntraj_per_initial_state``. + + .. note: + The simulation will end when the first end condition is reached + between ``ntraj``, ``timeout`` and ``target_tol``. If the initial + condition is mixed, ``target_tol`` is not supported. If the initial + condition is mixed, and the end condition is not ``ntraj``, the + results returned by this function should be considered invalid. """ - if not self.options.get("improved_sampling", False): - return super().run(state, tlist, ntraj=ntraj, args=args, - e_ops=e_ops, timeout=timeout, - target_tol=target_tol, seeds=seeds) - stats, seeds, result, map_func, map_kw, state0 = self._initialize_run( - state, - ntraj, - args=args, - e_ops=e_ops, - timeout=timeout, - target_tol=target_tol, - seeds=seeds, + # We process the arguments and pass on to other functions depending on + # whether "improved sampling" is turned on, and whether the initial + # state is mixed. + if isinstance(state, (list, tuple)): + is_mixed = True + else: # state is Qobj, either pure state or dm + if isinstance(ntraj, (list, tuple)): + raise ValueError('The ntraj parameter can only be a list if ' + 'the initial conditions are mixed and given ' + 'in the form of a list of pure states') + is_mixed = state.isoper and not self.rhs.issuper + if is_mixed: + # Mixed state given as density matrix. Decompose into list + # format, i.e., into eigenstates and eigenvalues + eigenvalues, eigenstates = state.eigenstates() + state = [(psi, p) for psi, p + in zip(eigenstates, eigenvalues) if p > 0] + + if is_mixed and target_tol is not None: + warnings.warn('Monte Carlo simulations with mixed initial ' + 'state do not support target tolerance') + + # Default value for ntraj: as small as possible + # (2 per init. state for improved sampling, 1 per state otherwise) + if ntraj is None: + if is_mixed: + ntraj = len(state) + else: + ntraj = 1 + if self.options["improved_sampling"]: + ntraj *= 2 + + if not self.options["improved_sampling"]: + if is_mixed: + return super()._run_mixed( + state, tlist, ntraj, args=args, e_ops=e_ops, + timeout=timeout, seeds=seeds) + else: + return super().run( + state, tlist, ntraj, args=args, e_ops=e_ops, + target_tol=target_tol, timeout=timeout, seeds=seeds) + if is_mixed: + return self._run_improved_sampling_mixed( + state, tlist, ntraj, args=args, e_ops=e_ops, + timeout=timeout, seeds=seeds) + return self._run_improved_sampling( + state, tlist, ntraj, args=args, e_ops=e_ops, + target_tol=target_tol, timeout=timeout, seeds=seeds) + + def _run_improved_sampling( + self, state, tlist, ntraj, *, + args, e_ops, target_tol, timeout, seeds): + # Sample the no-jump trajectory first. Then, the no-jump probability + # is used as a lower-bound for random numbers in future MC runs + seeds, result, map_func, map_kw, state0 = self._initialize_run( + state, ntraj, args=args, e_ops=e_ops, + timeout=timeout, target_tol=target_tol, seeds=seeds ) + # first run the no-jump trajectory start_time = time() - seed0, no_jump_result = self._run_one_traj(seeds[0], state0, tlist, - e_ops, no_jump=True) - _, state, _ = self._integrator.get_state(copy=False) - no_jump_prob = self._integrator._prob_func(state) - result.no_jump_prob = no_jump_prob - result.add((seed0, no_jump_result)) + seed0, no_jump_traj, no_jump_prob = ( + self._no_jump_simulation(state0, tlist, e_ops, seeds[0])) + result.add((seed0, no_jump_traj)) result.stats['no jump run time'] = time() - start_time # run the remaining trajectories with the random number floor @@ -540,7 +717,8 @@ def run(self, state, tlist, ntraj=1, *, start_time = time() map_func( self._run_one_traj, seeds[1:], - (state0, tlist, e_ops, False, no_jump_prob), + task_args=(state0, tlist, e_ops), + task_kwargs={'no_jump': False, 'jump_prob_floor': no_jump_prob}, reduce_func=result.add, map_kw=map_kw, progress_bar=self.options["progress_bar"], progress_bar_kwargs=self.options["progress_kwargs"] @@ -548,6 +726,74 @@ def run(self, state, tlist, ntraj=1, *, result.stats['run time'] = time() - start_time return result + def _run_improved_sampling_mixed( + self, initial_conditions, tlist, ntraj, *, + args, e_ops, timeout, seeds): + seeds, result, map_func, map_kw, prepared_ics = self._initialize_run( + initial_conditions, np.sum(ntraj), args=args, e_ops=e_ops, + timeout=timeout, seeds=seeds) + + # For improved sampling, we need to run at least 2 trajectories + # per initial state (the no-jump trajectory and one other). + # We reduce `ntraj` by one for each initial state to account for the + # no-jump trajectories + num_states = len(prepared_ics) + if isinstance(ntraj, (list, tuple)): + if len(ntraj) != num_states: + raise ValueError('The length of the `ntraj` list must equal ' + 'the number of states in the initial mixture') + if np.any(np.less(ntraj, 2)): + raise ValueError('For the improved sampling algorithm, at ' + 'least 2 trajectories for each member of the ' + 'initial mixture are required') + ntraj = [n - 1 for n in ntraj] + else: + if ntraj < 2 * num_states: + raise ValueError('For the improved sampling algorithm, at ' + 'least 2 trajectories for each member of the ' + 'initial mixture are required') + ntraj -= num_states + + # Run the no-jump trajectories + start_time = time() + no_jump_results = map_func( + _unpack_arguments(self._no_jump_simulation, ('state', 'seed')), + [(state, seed) for seed, (state, _) in zip(seeds, prepared_ics)], + task_kwargs={'tlist': tlist, 'e_ops': e_ops}, map_kw=map_kw) + if None in no_jump_results: # timeout reached + return result + + # Process results of no-traj runs + no_jump_probs = [] + for (seed, res, prob), (_, weight) in ( + zip(no_jump_results, prepared_ics)): + res.add_relative_weight(weight) + result.add((seed, res)) + no_jump_probs.append(prob) + result.stats['no jump run time'] = time() - start_time + + # Run the remaining trajectories + start_time = time() + ics_info = _InitialConditions(prepared_ics, ntraj) + arguments = [(id, no_jump_probs[ics_info.get_state_index(id)]) + for id in range(ics_info.ntraj_total)] + map_func( + _unpack_arguments(self._run_one_traj_mixed, + ('id', 'jump_prob_floor')), + arguments, + task_kwargs={'seeds': seeds[num_states:], 'ics': ics_info, + 'tlist': tlist, 'e_ops': e_ops, 'no_jump': False}, + reduce_func=result.add, map_kw=map_kw, + progress_bar=self.options["progress_bar"], + progress_bar_kwargs=self.options["progress_kwargs"] + ) + result.stats['run time'] = time() - start_time + result.initial_states = [self._restore_state(state, copy=False) + for state, _ in ics_info.state_list] + # add back +1 for the no-jump trajectories: + result.ntraj_per_initial_state = [(n+1) for n in ics_info.ntraj] + return result + def _get_integrator(self): _time_start = time() method = self.options["method"] @@ -557,22 +803,15 @@ def _get_integrator(self): integrator = method else: raise ValueError("Integrator method not supported.") - integrator_instance = integrator(self.system(), self.options) + integrator_instance = integrator(self.rhs(), self.options) mc_integrator = self._mc_integrator_class( - integrator_instance, self.system, self.options + integrator_instance, self.rhs, self.options ) self._init_integrator_time = time() - _time_start return mc_integrator @property - def _resultclass(self): - if self.options.get("improved_sampling", False): - return McResultImprovedSampling - else: - return McResult - - @property - def options(self): + def options(self) -> dict: """ Options for monte carlo solver: @@ -640,7 +879,7 @@ def options(self): return self._options @options.setter - def options(self, new_options): + def options(self, new_options: dict[str, Any]): MultiTrajSolver.options.fset(self, new_options) @classmethod @@ -653,7 +892,7 @@ def avail_integrators(cls): } @classmethod - def CollapseFeedback(cls, default=None): + def CollapseFeedback(cls, default: list = None): """ Collapse of the trajectory argument for time dependent systems. @@ -671,14 +910,19 @@ def CollapseFeedback(cls, default=None): Parameters ---------- - default : callable, default : [] - Default function used outside the solver. + default : list, default : [] + Argument value to use outside of solver. """ return _CollapseFeedback(default) @classmethod - def StateFeedback(cls, default=None, raw_data=False, open=False): + def StateFeedback( + cls, + default: Qobj | _data.Data = None, + raw_data: bool = False, + prop: bool = False + ): """ State of the evolution to be used in a time-dependent operator. @@ -705,3 +949,20 @@ def StateFeedback(cls, default=None, raw_data=False, open=False): if raw_data: return _DataFeedback(default, open=open) return _QobjFeedback(default, open=open) + + +class _unpack_arguments: + """ + If `f = _unpack_arguments(func, ('a', 'b'))` + then calling `f((3, 4), ...)` is equivalent to `func(a=3, b=4, ...)`. + + Useful since the map functions in `qutip.parallel` only allow one + of the parameters of the task to be variable. + """ + def __init__(self, func, argument_names): + self.func = func + self.argument_names = argument_names + + def __call__(self, args, **kwargs): + rearranged = dict(zip(self.argument_names, args)) + return self.func(**rearranged, **kwargs) diff --git a/qutip/solver/mesolve.py b/qutip/solver/mesolve.py index caafc5ef1c..43a7c5fa13 100644 --- a/qutip/solver/mesolve.py +++ b/qutip/solver/mesolve.py @@ -3,20 +3,33 @@ equation. """ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['mesolve', 'MESolver'] -import numpy as np +from numpy.typing import ArrayLike +from typing import Any, Callable from time import time -from .. import (Qobj, QobjEvo, isket, liouvillian, ket2dm, lindblad_dissipator) -from ..core import stack_columns, unstack_columns -from ..core.data import to +from .. import (Qobj, QobjEvo, liouvillian, lindblad_dissipator) +from ..typing import EopsLike, QobjEvoLike +from ..core import data as _data from .solver_base import Solver, _solver_deprecation from .sesolve import sesolve, SESolver from ._feedback import _QobjFeedback, _DataFeedback - - -def mesolve(H, rho0, tlist, c_ops=None, e_ops=None, args=None, options=None, - **kwargs): +from . import Result + + +def mesolve( + H: QobjEvoLike, + rho0: Qobj, + tlist: ArrayLike, + c_ops: Qobj | QobjEvo | list[QobjEvoLike] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + **kwargs +) -> Result: """ Master equation evolution of a density matrix for a given Hamiltonian and set of collapse operators, or a Liouvillian. @@ -74,11 +87,10 @@ def mesolve(H, rho0, tlist, c_ops=None, e_ops=None, args=None, options=None, Single collapse operator, or list of collapse operators, or a list of Liouvillian superoperators. None is equivalent to an empty list. - e_ops : list of :obj:`.Qobj` / callback function, optional - Single operator or list of operators for which to evaluate - expectation values or callable or list of callable. - Callable signature must be, `f(t: float, state: Qobj)`. - See :func:`expect` for more detail of operator expectation. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. args : dict, optional dictionary of parameters for time-dependent Hamiltonians and @@ -95,7 +107,8 @@ def mesolve(H, rho0, tlist, c_ops=None, e_ops=None, args=None, options=None, On `None` the states will be saved if no expectation operators are given. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -182,7 +195,7 @@ class MESolver(SESolver): Diverse diagnostic statistics of the evolution. """ name = "mesolve" - _avail_integrators = {} + _avail_integrators: dict[str, object] = {} solver_options = { "progress_bar": "", "progress_kwargs": {"chunk_size":10}, @@ -192,7 +205,13 @@ class MESolver(SESolver): 'method': 'adams', } - def __init__(self, H, c_ops=None, *, options=None): + def __init__( + self, + H: Qobj | QobjEvo, + c_ops: Qobj | QobjEvo | list[Qobj | QobjEvo] = None, + *, + options: dict = None, + ): _time_start = time() if not isinstance(H, (Qobj, QobjEvo)): @@ -220,7 +239,12 @@ def _initialize_stats(self): return stats @classmethod - def StateFeedback(cls, default=None, raw_data=False, prop=False): + def StateFeedback( + cls, + default: Qobj | _data.Data = None, + raw_data: bool = False, + prop: bool = False + ): """ State of the evolution to be used in a time-dependent operator. diff --git a/qutip/solver/multitraj.py b/qutip/solver/multitraj.py index 3ca7930a96..2c4fe80d1a 100644 --- a/qutip/solver/multitraj.py +++ b/qutip/solver/multitraj.py @@ -1,23 +1,31 @@ -from .result import Result, MultiTrajResult +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + +from .result import TrajectoryResult +from .multitrajresult import MultiTrajResult from .parallel import _get_map from time import time from .solver_base import Solver -from ..core import QobjEvo +from ..core import QobjEvo, Qobj import numpy as np +from numpy.typing import ArrayLike +from numpy.random import SeedSequence +from numbers import Number +from typing import Any, Callable +import bisect +from operator import itemgetter + __all__ = ["MultiTrajSolver"] -class _MTSystem: +class _MultiTrajRHS: """ Container for the operators of the solver. """ def __init__(self, rhs): self.rhs = rhs - def __call__(self): - return self.rhs - def arguments(self, args): self.rhs.arguments(args) @@ -25,6 +33,8 @@ def _register_feedback(self, type, val): pass def __getattr__(self, attr): + if attr == "rhs": + raise AttributeError if hasattr(self.rhs, attr): return getattr(self.rhs, attr) raise AttributeError @@ -51,7 +61,7 @@ class MultiTrajSolver(Solver): """ name = "generic multi trajectory" _resultclass = MultiTrajResult - _trajectory_resultclass = Result + _trajectory_resultclass = TrajectoryResult _avail_integrators = {} # Class of option used by the solver @@ -71,19 +81,18 @@ class MultiTrajSolver(Solver): def __init__(self, rhs, *, options=None): if isinstance(rhs, QobjEvo): - self.system = _MTSystem(rhs) - elif isinstance(rhs, _MTSystem): - self.system = rhs + self.rhs = _MultiTrajRHS(rhs) + elif isinstance(rhs, _MultiTrajRHS): + self.rhs = rhs else: raise TypeError("The system should be a QobjEvo") - self.rhs = self.system() self.options = options self.seed_sequence = np.random.SeedSequence() self._integrator = self._get_integrator() self._state_metadata = {} self.stats = self._initialize_stats() - def start(self, state, t0, seed=None): + def start(self, state0: Qobj, t0: float, seed: int | SeedSequence = None): """ Set the initial state and time for a step evolution. @@ -106,9 +115,11 @@ def start(self, state, t0, seed=None): """ seeds = self._read_seed(seed, 1) generator = self._get_generator(seeds[0]) - self._integrator.set_state(t0, self._prepare_state(state), generator) + self._integrator.set_state(t0, self._prepare_state(state0), generator) - def step(self, t, *, args=None, copy=True): + def step( + self, t: float, *, args: dict[str, Any] = None, copy: bool = True + ) -> Qobj: """ Evolve the state to ``t`` and return the state as a :obj:`.Qobj`. @@ -148,19 +159,32 @@ def _initialize_run(self, state, ntraj=1, args=None, e_ops=(), 'timeout': timeout, 'num_cpus': self.options['num_cpus'], }) - state0 = self._prepare_state(state) + if isinstance(state, (list, tuple)): # mixed initial conditions + state0 = [(self._prepare_state(psi), p) for psi, p in state] + else: + state0 = self._prepare_state(state) stats['preparation time'] += time() - start_time - return stats, seeds, result, map_func, map_kw, state0 - - def run(self, state, tlist, ntraj=1, *, - args=None, e_ops=(), timeout=None, target_tol=None, seeds=None): + return seeds, result, map_func, map_kw, state0 + + def run( + self, + state: Qobj, + tlist: ArrayLike, + ntraj: int = 1, + *, + args: dict[str, Any] = None, + e_ops: dict[Any, Qobj | QobjEvo | Callable[[float, Qobj], Any]] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + seeds: int | SeedSequence | list[int | SeedSequence] = None, + ) -> MultiTrajResult: """ Do the evolution of the Quantum system. For a ``state`` at time ``tlist[0]`` do the evolution as directed by ``rhs`` and for each time in ``tlist`` store the state and/or - expectation values in a :class:`.Result`. The evolution method and - stored results are determined by ``options``. + expectation values in a :class:`.MultiTrajResult`. The evolution method + and stored results are determined by ``options``. Parameters ---------- @@ -213,7 +237,7 @@ def run(self, state, tlist, ntraj=1, *, The simulation will end when the first end condition is reached between ``ntraj``, ``timeout`` and ``target_tol``. """ - stats, seeds, result, map_func, map_kw, state0 = self._initialize_run( + seeds, result, map_func, map_kw, state0 = self._initialize_run( state, ntraj, args=args, @@ -233,26 +257,102 @@ def run(self, state, tlist, ntraj=1, *, result.stats['run time'] = time() - start_time return result - def _initialize_run_one_traj(self, seed, state, tlist, e_ops): + def _initialize_run_one_traj(self, seed, state, tlist, e_ops, + **integrator_kwargs): result = self._trajectory_resultclass(e_ops, self.options) - generator = self._get_generator(seed) - self._integrator.set_state(tlist[0], state, generator) + if "generator" in integrator_kwargs: + generator = integrator_kwargs.pop("generator") + else: + generator = self._get_generator(seed) + self._integrator.set_state(tlist[0], state, generator, + **integrator_kwargs) result.add(tlist[0], self._restore_state(state, copy=False)) return result - def _run_one_traj(self, seed, state, tlist, e_ops): + def _run_one_traj(self, seed, state, tlist, e_ops, **integrator_kwargs): """ Run one trajectory and return the result. """ - result = self._initialize_run_one_traj(seed, state, tlist, e_ops) + result = self._initialize_run_one_traj(seed, state, tlist, e_ops, + **integrator_kwargs) return self._integrate_one_traj(seed, tlist, result) def _integrate_one_traj(self, seed, tlist, result): - for t in tlist[1:]: - t, state = self._integrator.integrate(t, copy=False) + for t, state in self._integrator.run(tlist): result.add(t, self._restore_state(state, copy=False)) return seed, result + def _run_one_traj_mixed(self, id, seeds, ics, + tlist, e_ops, **integrator_kwargs): + """ + The serial number `id` identifies which seed and which initial state to + use for running one trajectory. + """ + seed = seeds[id] + state, weight = ics.get_state_and_weight(id) + + seed, result = self._run_one_traj(seed, state, tlist, e_ops, + **integrator_kwargs) + + if weight != 1: + result.add_relative_weight(weight) + return seed, result + + def _run_mixed( + self, + initial_conditions: list[tuple[Qobj, float]], + tlist: ArrayLike, + ntraj: int | list[int], + *, + args: dict[str, Any] = None, + e_ops: dict[Any, Qobj | QobjEvo | Callable[[float, Qobj], Any]] = None, + timeout: float = None, + seeds: int | SeedSequence | list[int | SeedSequence] = None, + ) -> MultiTrajResult: + """ + Subclasses can use this method to allow simulations with a mixed + initial state. The following parameters differ from the `run` method: + + Parameters + ---------- + initial_conditions : list of (:obj:`.Qobj`, float) + Statistical ensemble at the beginning of the evolution. The first + element of each tuple is a state contributing to the mixture, and + the second element is its weight, i.e., a number between 0 and 1 + describing the fraction of the ensemble in that state. The sum of + all weights is assumed to be one. + + ntraj : {int, list of int} + Number of trajectories to add. If a single number is provided, this + will be the total number of trajectories, which are distributed + over the initial ensemble automatically. This parameter may also be + a list of numbers with the same number of entries as in + `initial_conditions`, specifying the number of trajectories for + each initial state explicitly. + + .. note: + The simulation will end when the first end condition is reached + between ``ntraj`` and ``timeout``. Setting a target tolerance is + not supported with mixed initial conditions. + """ + seeds, result, map_func, map_kw, prepared_ics = self._initialize_run( + initial_conditions, np.sum(ntraj), args=args, e_ops=e_ops, + timeout=timeout, seeds=seeds) + ics_info = _InitialConditions(prepared_ics, ntraj) + start_time = time() + map_func( + self._run_one_traj_mixed, range(len(seeds)), + (seeds, ics_info, tlist, e_ops), + reduce_func=result.add, map_kw=map_kw, + progress_bar=self.options["progress_bar"], + progress_bar_kwargs=self.options["progress_kwargs"] + ) + result.stats['run time'] = time() - start_time + result.initial_states = [self._restore_state(state, copy=False) + for state, _ in ics_info.state_list] + result.ntraj_per_initial_state = list(ics_info.ntraj) + return result + def _read_seed(self, seed, ntraj): """ Read user provided seed(s) and produce one for each trajectory. @@ -278,7 +378,8 @@ def _read_seed(self, seed, ntraj): def _argument(self, args): """Update the args, for the `rhs` and `c_ops` and other operators.""" if args: - self.system.arguments(args) + self.rhs.arguments(args) + self._integrator.arguments(args) def _get_generator(self, seed): """ @@ -286,14 +387,131 @@ def _get_generator(self, seed): If the ``seed`` has a ``random`` method, it will be used as the generator. """ - if hasattr(seed, 'random'): - # We check for the method, not the type to accept pseudo non-random - # generator for debug/testing purpose. - return seed - if self.options['bitgenerator']: bit_gen = getattr(np.random, self.options['bitgenerator']) generator = np.random.Generator(bit_gen(seed)) else: generator = np.random.default_rng(seed) return generator + + +class _InitialConditions: + """ + Information about mixed initial conditions, and the number of trajectories + to be used for for each state in the mixed ensemble. + + Parameters + ---------- + state_list : list of (:obj:`.Qobj`, float) + A list of tuples (state, weight). We assume that all weights add up to + one. + ntraj : {int, list of int} + This parameter may be either the total number of trajectories, or a + list specifying the number of trajectories to be used per state. In the + former case, a list of trajectory numbers is generated such that the + fraction of trajectories for a given state approximates its weight as + well as possible, under the following constraints: + 1. the total number of trajectories is exactly `ntraj` + 2. there is at least one trajectory per initial state + + Attributes + ---------- + state_list : list of (:obj:`.Qobj`, float) + The provided list of states + ntraj : list of int + The number of trajectories to be used per state + ntraj_total : int + The total number of trajectories + """ + def __init__(self, + state_list: list[tuple[Qobj, float]], + ntraj: int | list[int]): + if not isinstance(ntraj, (list, tuple)): + ntraj = self._minimum_roundoff_ensemble(state_list, ntraj) + + self.state_list = state_list + self.ntraj = ntraj + self._state_selector = np.cumsum(ntraj) + self.ntraj_total = self._state_selector[-1] + + def _minimum_roundoff_ensemble(self, state_list, ntraj_total): + """ + Calculate a list ntraj from the given total number, under contraints + explained above. Algorithm based on https://stackoverflow.com/a/792490 + """ + # First we throw out zero-weight states + filtered_states = [(index, weight) + for index, (_, weight) in enumerate(state_list) + if weight > 0] + if len(filtered_states) > ntraj_total: + raise ValueError(f'{ntraj_total} trajectories is not enough for ' + f'initial mixture of {len(filtered_states)} ' + 'states') + + # If the trajectory count of a state reaches one, that is final. + # Here we store the indices of the states with only one trajectory. + one_traj_states = [] + + # All other states are kept here. This is a list of + # (state index, target weight = w, + # current traj number = n, n / (w * ntraj_total) = r) + # sorted by the last entry. We first make a too large guess for n, + # then take away trajectories from the states with largest r + under_consideration = [] + + current_total = 0 + for index, weight in filtered_states: + guess = int(np.ceil(weight * ntraj_total)) + current_total += guess + if guess == 1: + one_traj_states.append(index) + else: + ratio = guess / (weight * ntraj_total) + bisect.insort(under_consideration, + (index, weight, guess, ratio), + key=itemgetter(3)) + + while current_total > ntraj_total: + index, weight, guess, ratio = under_consideration.pop() + guess -= 1 + current_total -= 1 + if guess == 1: + one_traj_states.append(index) + else: + ratio = guess / (weight * ntraj_total) + bisect.insort(under_consideration, + (index, weight, guess, ratio), + key=itemgetter(3)) + + # Finally we arrange the results in a list of ntraj + ntraj = [0] * len(state_list) + for index in one_traj_states: + ntraj[index] = 1 + for index, _, count, _ in under_consideration: + ntraj[index] = count + return ntraj + + def get_state_index(self, id): + """ + For the trajectory id (0 <= id < total_ntraj), returns the index of the + corresponding initial state in the `state_list`. + """ + state_index = bisect.bisect(self._state_selector, id) + if id < 0 or state_index >= len(self.state_list): + raise IndexError(f'State id {id} must be smaller than number of ' + f'trajectories {self.ntraj_total}') + return state_index + + def get_state_and_weight(self, id): + """ + For the trajectory id (0 <= id < total_ntraj), returns the + corresponding initial state and a correction weight such that + correction_weight * (ntraj / ntraj_total) = weight + where ntraj is the number of trajectories used with this initial state + and weight the initially provided weight of the state in the ensemble. + """ + state_index = self.get_state_index(id) + state, target_weight = self.state_list[state_index] + state_frequency = self.ntraj[state_index] / self.ntraj_total + correction_weight = target_weight / state_frequency + return state, correction_weight diff --git a/qutip/solver/multitrajresult.py b/qutip/solver/multitrajresult.py new file mode 100644 index 0000000000..96ea4e8fdd --- /dev/null +++ b/qutip/solver/multitrajresult.py @@ -0,0 +1,1206 @@ +""" +This module provides result classes for multi-trajectory solvers. +Note that single trajectories are described by regular `Result` objects from the +`qutip.solver.result` module. +""" + +from typing import TypedDict +import numpy as np + +from copy import copy + +from .result import _BaseResult, TrajectoryResult +from ..core import qzero_like + +__all__ = [ + "MultiTrajResult", + "McResult", + "NmmcResult", +] + + +class MultiTrajResultOptions(TypedDict): + store_states: bool + store_final_state: bool + keep_runs_results: bool + + +class MultiTrajResult(_BaseResult): + """ + Base class for storing results for solver using multiple trajectories. + + Parameters + ---------- + e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these + The ``e_ops`` parameter defines the set of values to record at + each time step ``t``. If an element is a :obj:`.Qobj` or + :obj:`.QobjEvo` the value recorded is the expectation value of that + operator given the state at ``t``. If the element is a function, ``f``, + the value recorded is ``f(t, state)``. + + The values are recorded in the ``.expect`` attribute of this result + object. ``.expect`` is a list, where each item contains the values + of the corresponding ``e_op``. + + Function ``e_ops`` must return a number so the average can be computed. + + options : dict + The options for this result class. + + solver : str or None + The name of the solver generating these results. + + stats : dict or None + The stats generated by the solver while producing these results. Note + that the solver may update the stats directly while producing results. + + kw : dict + Additional parameters specific to a result sub-class. + + Attributes + ---------- + times : list + A list of the times at which the expectation values and states were + recorded. + + average_states : list of :obj:`.Qobj` + The state at each time ``t`` (if the recording of the state was + requested) averaged over all trajectories as a density matrix. + + runs_states : list of list of :obj:`.Qobj` + The state for each trajectory and each time ``t`` (if the recording of + the states and trajectories was requested) + + final_state : :obj:`.Qobj`: + The final state (if the recording of the final state was requested) + averaged over all trajectories as a density matrix. + + runs_final_state : list of :obj:`.Qobj` + The final state for each trajectory (if the recording of the final + state and trajectories was requested). + + average_expect : list of array of expectation values + A list containing the values of each ``e_op`` averaged over each + trajectories. The list is in the same order in which the ``e_ops`` were + supplied and empty if no ``e_ops`` were given. + + Each element is itself an array and contains the values of the + corresponding ``e_op``, with one value for each time in ``.times``. + + std_expect : list of array of expectation values + A list containing the standard derivation of each ``e_op`` over each + trajectories. The list is in the same order in which the ``e_ops`` were + supplied and empty if no ``e_ops`` were given. + + Each element is itself an array and contains the values of the + corresponding ``e_op``, with one value for each time in ``.times``. + + runs_expect : list of array of expectation values + A list containing the values of each ``e_op`` for each trajectories. + The list is in the same order in which the ``e_ops`` were + supplied and empty if no ``e_ops`` were given. Only available if the + storing of trajectories was requested. + + The order of the elements is ``runs_expect[e_ops][trajectory][time]``. + + Each element is itself an array and contains the values of the + corresponding ``e_op``, with one value for each time in ``.times``. + + average_e_data : dict + A dictionary containing the values of each ``e_op`` averaged over each + trajectories. If the ``e_ops`` were supplied as a dictionary, the keys + are the same as in that dictionary. Otherwise the keys are the index of + the ``e_op`` in the ``.expect`` list. + + The lists of expectation values returned are the *same* lists as + those returned by ``.expect``. + + average_e_data : dict + A dictionary containing the standard derivation of each ``e_op`` over + each trajectories. If the ``e_ops`` were supplied as a dictionary, the + keys are the same as in that dictionary. Otherwise the keys are the + index of the ``e_op`` in the ``.expect`` list. + + The lists of expectation values returned are the *same* lists as + those returned by ``.expect``. + + runs_e_data : dict + A dictionary containing the values of each ``e_op`` for each + trajectories. If the ``e_ops`` were supplied as a dictionary, the keys + are the same as in that dictionary. Otherwise the keys are the index of + the ``e_op`` in the ``.expect`` list. Only available if the storing + of trajectories was requested. + + The order of the elements is ``runs_expect[e_ops][trajectory][time]``. + + The lists of expectation values returned are the *same* lists as + those returned by ``.expect``. + + runs_weights : list + For each trajectory, the weight with which that trajectory enters + averages. + + solver : str or None + The name of the solver generating these results. + + stats : dict or None + The stats generated by the solver while producing these results. + + options : :obj:`~SolverResultsOptions` + The options for this result class. + """ + + options: MultiTrajResultOptions + + def __init__( + self, e_ops, options: MultiTrajResultOptions, *, + solver=None, stats=None, **kw, + ): + super().__init__(options, solver=solver, stats=stats) + self._raw_ops = self._e_ops_to_dict(e_ops) + + self.trajectories = [] + self.num_trajectories = 0 + self.seeds = [] + + self.average_e_data = {} + self.std_e_data = {} + if self.options["keep_runs_results"]: + self.runs_e_data = {k: [] for k in self._raw_ops} + else: + self.runs_e_data = {} + + # Will be initialized at the first trajectory + self.times = None + self.e_ops = None + + # We separate all sums into terms of trajectories with specified + # absolute weight (_abs) or without (_rel). They will be initialized + # when the first trajectory of the respective type is added. + self._sum_rel = None + self._sum_abs = None + # Number of trajectories without specified absolute weight + self._num_rel_trajectories = 0 + # Needed for merging results + self._weight_info = [] + # Needed for target tolerance computation + self._total_abs_weight = np.array(0) + + self._post_init(**kw) + + @property + def _store_average_density_matrices(self) -> bool: + return ( + self.options["store_states"] + or (self.options["store_states"] is None and self._raw_ops == {}) + ) and not self.options["keep_runs_results"] + + @property + def _store_final_density_matrix(self) -> bool: + return ( + self.options["store_final_state"] + and not self._store_average_density_matrices + and not self.options["keep_runs_results"] + ) + + def _add_first_traj(self, trajectory): + """ + Read the first trajectory, intitializing needed data. + """ + self.times = trajectory.times + self.e_ops = trajectory.e_ops + + def _store_trajectory(self, trajectory): + self.trajectories.append(trajectory) + + def _store_weight_info(self, trajectory): + if trajectory.has_absolute_weight: + self._total_abs_weight = ( + self._total_abs_weight + trajectory.total_weight) + if len(self.trajectories) == 0: + # store weight info only if trajectories are not stored + self._weight_info.append( + (trajectory.total_weight, trajectory.has_absolute_weight)) + + def _reduce_states(self, trajectory): + if trajectory.has_absolute_weight: + self._sum_abs.reduce_states(trajectory) + else: + self._sum_rel.reduce_states(trajectory) + + def _reduce_final_state(self, trajectory): + if trajectory.has_absolute_weight: + self._sum_abs.reduce_final_state(trajectory) + else: + self._sum_rel.reduce_final_state(trajectory) + + def _reduce_expect(self, trajectory): + """ + Compute the average of the expectation values and store it in it's + multiple formats. + """ + if trajectory.has_absolute_weight: + self._sum_abs.reduce_expect(trajectory) + else: + self._sum_rel.reduce_expect(trajectory) + + self._create_e_data() + + if self.runs_e_data: + for k in self._raw_ops: + self.runs_e_data[k].append(trajectory.e_data[k]) + + def _create_e_data(self): + for i, k in enumerate(self._raw_ops): + avg = 0 + avg2 = 0 + if self._sum_abs: + avg += self._sum_abs.sum_expect[i] + avg2 += self._sum_abs.sum2_expect[i] + if self._sum_rel: + avg += ( + self._sum_rel.sum_expect[i] / self._num_rel_trajectories + ) + avg2 += ( + self._sum_rel.sum2_expect[i] / self._num_rel_trajectories + ) + + self.average_e_data[k] = list(avg) + # mean(expect**2) - mean(expect)**2 can something be very small + # negative (-1e-15) which raise an error for float sqrt. + self.std_e_data[k] = list(np.sqrt(np.abs(avg2 - np.abs(avg**2)))) + + def _increment_traj(self, trajectory): + if self.num_trajectories == 0: + self._add_first_traj(trajectory) + + if trajectory.has_absolute_weight: + if self._sum_abs is None: + self._sum_abs = _TrajectorySum( + trajectory, + self._store_average_density_matrices, + self._store_final_density_matrix) + else: + self._num_rel_trajectories += 1 + if self._sum_rel is None: + self._sum_rel = _TrajectorySum( + trajectory, + self._store_average_density_matrices, + self._store_final_density_matrix) + + self.num_trajectories += 1 + + def _no_end(self): + """ + Remaining number of trajectories needed to finish cannot be determined + by this object. + """ + return np.inf + + def _fixed_end(self): + """ + Finish at a known number of trajectories. + """ + ntraj_left = self._target_ntraj - self.num_trajectories + if ntraj_left == 0: + self.stats["end_condition"] = "ntraj reached" + return ntraj_left + + def _average_computer(self): + avg = np.array(self._sum_rel.sum_expect) / self._num_rel_trajectories + avg2 = np.array(self._sum_rel.sum2_expect) / self._num_rel_trajectories + return avg, avg2 + + def _target_tolerance_end(self): + """ + Compute the error on the expectation values using jackknife resampling. + Return the approximate number of trajectories needed to have this + error within the tolerance fot all e_ops and times. + """ + if self.num_trajectories >= self._target_ntraj: + # First make sure that "ntraj" setting is always respected + self.stats["end_condition"] = "ntraj reached" + return 0 + + if self._num_rel_trajectories <= 1: + return np.inf + avg, avg2 = self._average_computer() + target = np.array( + [ + atol + rtol * mean + for mean, (atol, rtol) in zip(avg, self._target_tols) + ] + ) + + one = np.array(1) + if self._num_rel_trajectories < self.num_trajectories: + # We only include traj. without abs. weights in this calculation. + # Since there are traj. with abs. weights., the weights don't add + # up to one. We have to consider that as follows: + # err = (std * **2 / (N-1)) ** 0.5 + # avg = + # avg2 = + # std * **2 = ( - **2) * **2 + # = avg2 * - avg**2 + # and "" is one minus the sum of all absolute weights + one = one - self._total_abs_weight + + std = avg2 * one - abs(avg)**2 + target_ntraj = np.max(std / target**2) + 1 + self._estimated_ntraj = min(target_ntraj - self._num_rel_trajectories, + self._target_ntraj - self.num_trajectories) + if self._estimated_ntraj <= 0: + self.stats["end_condition"] = "target tolerance reached" + return self._estimated_ntraj + + def _post_init(self): + self._target_ntraj = None + self._target_tols = None + self._early_finish_check = self._no_end + + self.add_processor(self._increment_traj) + store_trajectory = self.options["keep_runs_results"] + if store_trajectory: + self.add_processor(self._store_trajectory) + if self._store_average_density_matrices: + self.add_processor(self._reduce_states) + if self._store_final_density_matrix: + self.add_processor(self._reduce_final_state) + if self._raw_ops: + self.add_processor(self._reduce_expect) + self.add_processor(self._store_weight_info) + + self.stats["end_condition"] = "unknown" + + def add(self, trajectory_info): + """ + Add a trajectory to the evolution. + + Trajectories can be saved or average canbe extracted depending on the + options ``keep_runs_results``. + + Parameters + ---------- + trajectory_info : tuple of seed and trajectory + - seed: int, SeedSequence + Seed used to generate the trajectory. + - trajectory : :class:`Result` + Run result for one evolution over the times. + + Returns + ------- + remaing_traj : number + Return the number of trajectories still needed to reach the target + tolerance. If no tolerance is provided, return infinity. + """ + seed, trajectory = trajectory_info + self.seeds.append(seed) + + if not isinstance(trajectory, TrajectoryResult): + trajectory.has_weight = False + trajectory.has_absolute_weight = False + trajectory.has_time_dependent_weight = False + trajectory.total_weight = 1 + + for op in self._state_processors: + op(trajectory) + + return self._early_finish_check() + + def add_end_condition(self, ntraj, target_tol=None): + """ + Set the condition to stop the computing trajectories when the certain + condition are fullfilled. + Supported end condition for multi trajectories computation are: + + - Reaching a number of trajectories. + - Error bar on the expectation values reach smaller than a given + tolerance. + + Parameters + ---------- + ntraj : int + Number of trajectories expected. + + target_tol : float, array_like, [optional] + Target tolerance of the evolution. The evolution will compute + trajectories until the error on the expectation values is lower + than this tolerance. The error is computed using jackknife + resampling. ``target_tol`` can be an absolute tolerance, a pair of + absolute and relative tolerance, in that order. Lastly, it can be a + list of pairs of (atol, rtol) for each e_ops. + + Error estimation is done with jackknife resampling. + """ + self._target_ntraj = ntraj + self.stats["end_condition"] = "timeout" + + if target_tol is None: + self._early_finish_check = self._fixed_end + return + + num_e_ops = len(self._raw_ops) + + if not num_e_ops: + raise ValueError("Cannot target a tolerance without e_ops") + + self._estimated_ntraj = ntraj + + targets = np.array(target_tol) + if targets.ndim == 0: + self._target_tols = np.array([(target_tol, 0.0)] * num_e_ops) + elif targets.shape == (2,): + self._target_tols = np.ones((num_e_ops, 2)) * targets + elif targets.shape == (num_e_ops, 2): + self._target_tols = targets + else: + raise ValueError( + "target_tol must be a number, a pair of (atol, " + "rtol) or a list of (atol, rtol) for each e_ops" + ) + + self._early_finish_check = self._target_tolerance_end + + @property + def runs_states(self): + """ + States of every runs as ``states[run][t]``. + """ + if self.trajectories and self.trajectories[0].states: + return [traj.states for traj in self.trajectories] + else: + return None + + @property + def average_states(self): + """ + States averages as density matrices. + """ + + trajectory_states_available = (self.trajectories and + self.trajectories[0].states) + need_to_reduce_states = False + if self._sum_abs and not self._sum_abs.sum_states: + if not trajectory_states_available: + return None + self._sum_abs._initialize_sum_states(self.trajectories[0]) + need_to_reduce_states = True + if self._sum_rel and not self._sum_rel.sum_states: + if not trajectory_states_available: + return None + self._sum_rel._initialize_sum_states(self.trajectories[0]) + need_to_reduce_states = True + if need_to_reduce_states: + for trajectory in self.trajectories: + self._reduce_states(trajectory) + + if self._sum_abs and self._sum_rel: + return [a + r / self._num_rel_trajectories for a, r in zip( + self._sum_abs.sum_states, self._sum_rel.sum_states) + ] + if self._sum_rel: + return [r / self._num_rel_trajectories + for r in self._sum_rel.sum_states] + return self._sum_abs.sum_states + + @property + def states(self): + """ + Runs final states if available, average otherwise. + """ + return self.runs_states or self.average_states + + @property + def runs_final_states(self): + """ + Last states of each trajectories. + """ + if self.trajectories and self.trajectories[0].final_state: + return [traj.final_state for traj in self.trajectories] + else: + return None + + @property + def average_final_state(self): + """ + Last states of each trajectories averaged into a density matrix. + """ + trajectory_states_available = (self.trajectories and + self.trajectories[0].final_state) + states = self.average_states + need_to_reduce_states = False + if self._sum_abs and not self._sum_abs.sum_final_state: + if not (trajectory_states_available or states): + return None + need_to_reduce_states = True + + if self._sum_rel and not self._sum_rel.sum_final_state: + if not (trajectory_states_available or states): + return None + need_to_reduce_states = True + + if need_to_reduce_states and states: + return states[-1] + elif need_to_reduce_states: + if self._sum_abs: + self._sum_abs._initialize_sum_finalstate(self.trajectories[0]) + if self._sum_rel: + self._sum_rel._initialize_sum_finalstate(self.trajectories[0]) + for trajectory in self.trajectories: + self._reduce_final_state(trajectory) + + if self._sum_abs and self._sum_rel: + return (self._sum_abs.sum_final_state + + self._sum_rel.sum_final_state / self._num_rel_trajectories) + if self._sum_rel: + return self._sum_rel.sum_final_state / self._num_rel_trajectories + return self._sum_abs.sum_final_state + + @property + def final_state(self): + """ + Runs final states if available, average otherwise. + """ + return self.runs_final_states or self.average_final_state + + @property + def average_expect(self): + return [np.array(val) for val in self.average_e_data.values()] + + @property + def std_expect(self): + return [np.array(val) for val in self.std_e_data.values()] + + @property + def runs_expect(self): + return [np.array(val) for val in self.runs_e_data.values()] + + @property + def expect(self): + return [np.array(val) for val in self.e_data.values()] + + @property + def e_data(self): + return self.runs_e_data or self.average_e_data + + @property + def runs_weights(self): + result = [] + if self._weight_info: + for w, isabs in self._weight_info: + result.append(w if isabs else w / self._num_rel_trajectories) + else: + for traj in self.trajectories: + w = traj.total_weight + isabs = traj.has_absolute_weight + result.append(w if isabs else w / self._num_rel_trajectories) + return result + + def steady_state(self, N=0): + """ + Average the states of the last ``N`` times of every runs as a density + matrix. Should converge to the steady state in the right circumstances. + + Parameters + ---------- + N : int [optional] + Number of states from the end of ``tlist`` to average. Per default + all states will be averaged. + """ + N = int(N) or len(self.times) + N = len(self.times) if N > len(self.times) else N + states = self.average_states + if states is not None: + return sum(states[-N:]) / N + else: + return None + + def __repr__(self): + lines = [ + f"<{self.__class__.__name__}", + f" Solver: {self.solver}", + ] + if self.stats: + lines.append(" Solver stats:") + lines.extend(f" {k}: {v!r}" for k, v in self.stats.items()) + if self.times: + lines.append( + f" Time interval: [{self.times[0]}, {self.times[-1]}]" + f" ({len(self.times)} steps)" + ) + lines.append(f" Number of e_ops: {len(self.e_data)}") + if self.states: + lines.append(" States saved.") + elif self.final_state is not None: + lines.append(" Final state saved.") + else: + lines.append(" State not saved.") + lines.append(f" Number of trajectories: {self.num_trajectories}") + if self.trajectories: + lines.append(" Trajectories saved.") + else: + lines.append(" Trajectories not saved.") + lines.append(">") + return "\n".join(lines) + + def merge(self, other, p=None): + r""" + Merges two multi-trajectory results. + + If this result represent an ensemble :math:`\rho`, and `other` + represents an ensemble :math:`\rho'`, then the merged result + represents the ensemble + + .. math:: + \rho_{\mathrm{merge}} = p \rho + (1 - p) \rho' + + where p is a parameter between 0 and 1. Its default value is + :math:`p_{\textrm{def}} = N / (N + N')`, N and N' being the number of + trajectories in the two result objects. + + Parameters + ---------- + other : MultiTrajResult + The multi-trajectory result to merge with this one + p : float [optional] + The relative weight of this result in the combination. By default, + will be chosen such that all trajectories contribute equally + to the merged result. + """ + if not isinstance(other, MultiTrajResult): + return NotImplemented + if self._raw_ops != other._raw_ops: + raise ValueError("Shared `e_ops` is required to merge results") + if self.times != other.times: + raise ValueError("Shared `times` are is required to merge results") + + new = self.__class__( + self._raw_ops, self.options, solver=self.solver, stats=self.stats + ) + new.times = self.times + new.e_ops = self.e_ops + + if bool(self.trajectories) != bool(other.trajectories): + # ensure the states are reduced. + if self.trajectories: + self.average_states + self.average_final_state + else: + other.average_states + other.average_final_state + + new.num_trajectories = self.num_trajectories + other.num_trajectories + new._num_rel_trajectories = (self._num_rel_trajectories + + other._num_rel_trajectories) + new.seeds = self.seeds + other.seeds + + p_equal = self._num_rel_trajectories / new._num_rel_trajectories + if p is None: + p = self.num_trajectories / new.num_trajectories + + if self.trajectories and other.trajectories: + new.trajectories = self._merge_trajectories(other, p, p_equal) + else: + new._weight_info = self._merge_weight_info(other, p, p_equal) + new.trajectories = [] + new.options["keep_runs_results"] = False + new.runs_e_data = {} + + self_states = self.options["store_states"] + self_fstate = self.options["store_final_state"] + other_states = other.options["store_states"] + other_fstate = other.options["store_final_state"] + + new.options["store_states"] = self_states and other_states + + new.options["store_final_state"] = ( + (self_fstate or self_states) and (other_fstate or other_states) + ) + + new._sum_abs = _TrajectorySum.merge( + self._sum_abs, p, other._sum_abs, 1 - p) + new._sum_rel = _TrajectorySum.merge( + self._sum_rel, p / p_equal, + other._sum_rel, (1 - p) / (1 - p_equal)) + + new._create_e_data() + + if self.runs_e_data and other.runs_e_data: + for k in self._raw_ops: + new.runs_e_data[k] = self.runs_e_data[k] + other.runs_e_data[k] + + new.stats["run time"] += other.stats["run time"] + new.stats["end_condition"] = "Merged results" + + return new + + def _merge_weight(self, p, p_equal, isabs): + """ + Merging two result objects can make the trajectories pick up + merge weights. In order to have + rho_merge = p * rho1 + (1-p) * rho2, + the merge weights must be as defined here. The merge weight depends on + whether that trajectory has an absolute weight (`isabs`). The parameter + `p_equal` is the value of p where all trajectories contribute equally. + """ + if isabs: + return p + return p / p_equal + + def _merge_weight_info(self, other, p, p_equal): + new_weight_info = [] + + if self._weight_info: + for w, isabs in self._weight_info: + new_weight_info.append( + (w * self._merge_weight(p, p_equal, isabs), isabs) + ) + else: + for traj in self.trajectories: + w = traj.total_weight + isabs = traj.has_absolute_weight + new_weight_info.append( + (w * self._merge_weight(p, p_equal, isabs), isabs) + ) + + if other._weight_info: + for w, isabs in other._weight_info: + new_weight_info.append( + (w * self._merge_weight(1 - p, 1 - p_equal, isabs), isabs) + ) + else: + for traj in other.trajectories: + w = traj.total_weight + isabs = traj.has_absolute_weight + new_weight_info.append( + (w * self._merge_weight(1 - p, 1 - p_equal, isabs), isabs) + ) + + return new_weight_info + + def _merge_trajectories(self, other, p, p_equal): + if (p == p_equal and + self.num_trajectories == self._num_rel_trajectories and + other.num_trajectories == other._num_rel_trajectories): + return self.trajectories + other.trajectories + + result = [] + for traj in self.trajectories: + if (mweight := self._merge_weight( + p, p_equal, traj.has_absolute_weight)) != 1: + traj = copy(traj) + traj.add_relative_weight(mweight) + result.append(traj) + for traj in other.trajectories: + if (mweight := self._merge_weight( + 1 - p, 1 - p_equal, traj.has_absolute_weight)) != 1: + traj = copy(traj) + traj.add_relative_weight(mweight) + result.append(traj) + return result + + def __add__(self, other): + return self.merge(other, p=None) + + +class _TrajectorySum: + """ + Keeps running sums of expectation values, and (if requested) states and + final states, over a set of trajectories as they are added one-by-one. + This is used in the `MultiTrajResult` class, which needs to keep track of + several sums of this type. + + Parameters + ---------- + example_trajectory : :obj:`.Result` + An example trajectory with expectation values and states of the same + shape like for the trajectories that will be added later. The data is + only used for initializing arrays in the correct shape and otherwise + ignored. + + store_states : bool + Whether the states of the trajectories will be summed. + + store_final_state : bool + Whether the final states of the trajectories will be summed. + """ + def __init__(self, example_trajectory, store_states, store_final_state): + if example_trajectory.states and store_states: + self._initialize_sum_states(example_trajectory) + else: + self.sum_states = None + + if example_trajectory.final_state and store_final_state: + self._initialize_sum_finalstate(example_trajectory) + else: + self.sum_final_state = None + + self.sum_expect = [ + np.zeros_like(expect) for expect in example_trajectory.expect + ] + self.sum2_expect = [ + np.zeros_like(expect) for expect in example_trajectory.expect + ] + + def _initialize_sum_states(self, example_trajectory): + self.sum_states = [ + qzero_like(_to_dm(state)) for state in example_trajectory.states] + + def _initialize_sum_finalstate(self, example_trajectory): + self.sum_final_state = qzero_like( + _to_dm(example_trajectory.final_state) + ) + + def reduce_states(self, trajectory): + """ + Adds the states stored in the given trajectory to the running sum + `sum_states`. Takes account of the trajectory's total weight if + present. + """ + if trajectory.has_weight: + self.sum_states = [ + accu + weight * _to_dm(state) + for accu, state, weight in zip(self.sum_states, + trajectory.states, + trajectory._total_weight_tlist) + ] + else: + self.sum_states = [ + accu + _to_dm(state) + for accu, state in zip(self.sum_states, trajectory.states) + ] + + def reduce_final_state(self, trajectory): + """ + Adds the final state stored in the given trajectory to the running sum + `sum_final_state`. Takes account of the trajectory's total weight if + present. + """ + if trajectory.has_weight: + self.sum_final_state += (trajectory._final_weight * + _to_dm(trajectory.final_state)) + else: + self.sum_final_state += _to_dm(trajectory.final_state) + + def reduce_expect(self, trajectory): + """ + Adds the expectation values, and their squares, that are stored in the + given trajectory to the running sums `sum_expect` and `sum2_expect`. + Takes account of the trajectory's total weight if present. + """ + weight = trajectory.total_weight + for i, expect_traj in enumerate(trajectory.expect): + self.sum_expect[i] += weight * expect_traj + self.sum2_expect[i] += weight * expect_traj**2 + + @staticmethod + def merge(sum1, weight1, sum2, weight2): + """ + Merges the sums of expectation values, states and final states with + the given weights, i.e., `result = weight1 * sum1 + weight2 * sum2`. + """ + if sum1 is None and sum2 is None: + return None + if sum1 is None: + return _TrajectorySum.merge(sum2, weight2, sum1, weight1) + + new = copy(sum1) + + if sum2 is None: + if sum1.sum_states: + new.sum_states = [ + weight1 * state1 for state1 in sum1.sum_states + ] + if sum1.sum_final_state: + new.sum_final_state = weight1 * sum1.sum_final_state + new.sum_expect = [weight1 * e1 for e1 in sum1.sum_expect] + new.sum2_expect = [weight1 * e1 for e1 in sum1.sum2_expect] + return new + + if sum1.sum_states and sum2.sum_states: + new.sum_states = [ + weight1 * state1 + weight2 * state2 for state1, state2 in zip( + sum1.sum_states, sum2.sum_states + ) + ] + else: + new.sum_states = None + + if sum1.sum_final_state and sum2.sum_final_state: + new.sum_final_state = ( + weight1 * sum1.sum_final_state + + weight2 * sum2.sum_final_state) + else: + new.sum_final_state = None + + new.sum_expect = [weight1 * e1 + weight2 * e2 for e1, e2 in zip( + sum1.sum_expect, sum2.sum_expect) + ] + new.sum2_expect = [weight1 * e1 + weight2 * e2 for e1, e2 in zip( + sum1.sum2_expect, sum2.sum2_expect) + ] + + return new + + +class McResult(MultiTrajResult): + """ + Class for storing Monte-Carlo solver results. + + Parameters + ---------- + e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these + The ``e_ops`` parameter defines the set of values to record at + each time step ``t``. If an element is a :obj:`.Qobj` or + :obj:`.QobjEvo` the value recorded is the expectation value of that + operator given the state at ``t``. If the element is a function, ``f``, + the value recorded is ``f(t, state)``. + + The values are recorded in the ``.expect`` attribute of this result + object. ``.expect`` is a list, where each item contains the values + of the corresponding ``e_op``. + + options : :obj:`~SolverResultsOptions` + The options for this result class. + + solver : str or None + The name of the solver generating these results. + + stats : dict + The stats generated by the solver while producing these results. Note + that the solver may update the stats directly while producing results. + Must include a value for "num_collapse". + + kw : dict + Additional parameters specific to a result sub-class. + + Attributes + ---------- + collapse : list + For each run, a list of every collapse as a tuple of the time it + happened and the corresponding ``c_ops`` index. + """ + + # Collapse are only produced by mcsolve. + def _add_collapse(self, trajectory): + self.collapse.append(trajectory.collapse) + if trajectory.has_time_dependent_weight: + self._time_dependent_weights = True + + def _post_init(self): + super()._post_init() + self.num_c_ops = self.stats["num_collapse"] + self._time_dependent_weights = False + self.collapse = [] + self.add_processor(self._add_collapse) + + @property + def col_times(self): + """ + List of the times of the collapses for each runs. + """ + out = [] + for col_ in self.collapse: + col = list(zip(*col_)) + col = [] if len(col) == 0 else col[0] + out.append(col) + return out + + @property + def col_which(self): + """ + List of the indexes of the collapses for each runs. + """ + out = [] + for col_ in self.collapse: + col = list(zip(*col_)) + col = [] if len(col) == 0 else col[1] + out.append(col) + return out + + @property + def photocurrent(self): + """ + Average photocurrent or measurement of the evolution. + """ + if self._time_dependent_weights: + raise NotImplementedError("photocurrent is not implemented " + "for this solver.") + + collapse_times = [[] for _ in range(self.num_c_ops)] + collapse_weights = [[] for _ in range(self.num_c_ops)] + tlist = self.times + for collapses, weight in zip(self.collapse, self.runs_weights): + for t, which in collapses: + collapse_times[which].append(t) + collapse_weights[which].append(weight) + + mesurement = [ + np.histogram(times, bins=tlist, weights=weights)[0] + / np.diff(tlist) + for times, weights in zip(collapse_times, collapse_weights) + ] + return mesurement + + @property + def runs_photocurrent(self): + """ + Photocurrent or measurement of each runs. + """ + if self._time_dependent_weights: + raise NotImplementedError("runs_photocurrent is not implemented " + "for this solver.") + + tlist = self.times + measurements = [] + for collapses in self.collapse: + collapse_times = [[] for _ in range(self.num_c_ops)] + for t, which in collapses: + collapse_times[which].append(t) + measurements.append( + [ + np.histogram(times, tlist)[0] / np.diff(tlist) + for times in collapse_times + ] + ) + return measurements + + def merge(self, other, p=None): + new = super().merge(other, p) + new.collapse = self.collapse + other.collapse + new._time_dependent_weights = ( + self._time_dependent_weights or other._time_dependent_weights) + return new + + +class NmmcResult(McResult): + """ + Class for storing the results of the non-Markovian Monte-Carlo solver. + + Parameters + ---------- + e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these + The ``e_ops`` parameter defines the set of values to record at + each time step ``t``. If an element is a :obj:`.Qobj` or + :obj:`.QobjEvo` the value recorded is the expectation value of that + operator given the state at ``t``. If the element is a function, ``f``, + the value recorded is ``f(t, state)``. + + The values are recorded in the ``.expect`` attribute of this result + object. ``.expect`` is a list, where each item contains the values + of the corresponding ``e_op``. + + options : :obj:`~SolverResultsOptions` + The options for this result class. + + solver : str or None + The name of the solver generating these results. + + stats : dict + The stats generated by the solver while producing these results. Note + that the solver may update the stats directly while producing results. + Must include a value for "num_collapse". + + kw : dict + Additional parameters specific to a result sub-class. + + Attributes + ---------- + average_trace : list + The average trace (i.e., averaged over all trajectories) at each time. + + std_trace : list + The standard deviation of the trace at each time. + + runs_trace : list of lists + For each recorded trajectory, the trace at each time. + Only present if ``keep_runs_results`` is set in the options. + """ + + def _post_init(self): + super()._post_init() + + self._sum_trace_abs = None + self._sum_trace_rel = None + self._sum2_trace_abs = None + self._sum2_trace_rel = None + + self.average_trace = [] + self.std_trace = [] + self.runs_trace = [] + + self.add_processor(self._add_trace) + + def _add_first_traj(self, trajectory): + super()._add_first_traj(trajectory) + self._sum_trace_abs = np.zeros_like(trajectory.trace) + self._sum_trace_rel = np.zeros_like(trajectory.trace) + self._sum2_trace_abs = np.zeros_like(trajectory.trace) + self._sum2_trace_rel = np.zeros_like(trajectory.trace) + + def _add_trace(self, trajectory): + if trajectory.has_absolute_weight: + self._sum_trace_abs += trajectory._total_weight_tlist + self._sum2_trace_abs += np.abs(trajectory._total_weight_tlist) ** 2 + else: + self._sum_trace_rel += trajectory._total_weight_tlist + self._sum2_trace_rel += np.abs(trajectory._total_weight_tlist) ** 2 + + self._compute_avg_trace() + if self.options["keep_runs_results"]: + self.runs_trace.append(trajectory.trace) + + def _compute_avg_trace(self): + avg = self._sum_trace_abs + if self._num_rel_trajectories > 0: + avg = avg + self._sum_trace_rel / self._num_rel_trajectories + avg2 = self._sum2_trace_abs + if self._num_rel_trajectories > 0: + avg2 = avg2 + self._sum2_trace_rel / self._num_rel_trajectories + + self.average_trace = avg + self.std_trace = np.sqrt(np.abs(avg2 - np.abs(avg) ** 2)) + + @property + def trace(self): + """ + Refers to ``average_trace`` or ``runs_trace``, depending on whether + ``keep_runs_results`` is set in the options. + """ + return self.runs_trace or self.average_trace + + def merge(self, other, p=None): + new = super().merge(other, p) + + p_eq = self._num_rel_trajectories / new._num_rel_trajectories + if p is None: + p = self.num_trajectories / new.num_trajectories + + new._sum_trace_abs = ( + self._merge_weight(p, p_eq, True) * self._sum_trace_abs + + self._merge_weight(1 - p, 1 - p_eq, True) * other._sum_trace_abs + ) + new._sum2_trace_abs = ( + self._merge_weight(p, p_eq, True) * self._sum2_trace_abs + + self._merge_weight(1 - p, 1 - p_eq, True) * other._sum2_trace_abs + ) + new._sum_trace_rel = ( + self._merge_weight(p, p_eq, False) * self._sum_trace_rel + + self._merge_weight(1 - p, 1 - p_eq, False) * other._sum_trace_rel + ) + new._sum2_trace_rel = ( + self._merge_weight(p, p_eq, False) * self._sum2_trace_rel + + self._merge_weight(1 - p, 1 - p_eq, False) * other._sum2_trace_rel + ) + new._compute_avg_trace() + + if self.runs_trace and other.runs_trace: + new.runs_trace = self.runs_trace + other.runs_trace + + return new + + +def _to_dm(state): + if state.type == "ket": + state = state.proj() + return state diff --git a/qutip/solver/nm_mcsolve.py b/qutip/solver/nm_mcsolve.py index 6c064bb975..1931a513f3 100644 --- a/qutip/solver/nm_mcsolve.py +++ b/qutip/solver/nm_mcsolve.py @@ -1,20 +1,23 @@ __all__ = ['nm_mcsolve', 'NonMarkovianMCSolver'] -import functools import numbers - +from typing import Any +from collections.abc import Sequence import numpy as np +from numpy.typing import ArrayLike +from numpy.random import SeedSequence import scipy from .multitraj import MultiTrajSolver +from .multitrajresult import NmmcResult from .mcsolve import MCSolver, MCIntegrator from .mesolve import MESolver, mesolve -from .result import NmmcResult, NmmcTrajectoryResult from .cy.nm_mcsolve import RateShiftCoefficient, SqrtRealCoefficient -from ..core.coefficient import ConstantCoefficient +from ..core.coefficient import ConstantCoefficient, Coefficient from ..core import ( CoreOptions, Qobj, QobjEvo, isket, ket2dm, qeye, coefficient, ) +from ..typing import QobjEvoLike, EopsLike, CoefficientLike # The algorithm implemented here is based on the influence martingale approach @@ -26,9 +29,21 @@ # https://arxiv.org/abs/2209.08958 -def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, - args=None, options=None, seeds=None, target_tol=None, - timeout=None): +def nm_mcsolve( + H: QobjEvoLike, + state: Qobj, + tlist: ArrayLike, + ops_and_rates: list[tuple[Qobj, CoefficientLike]] = (), + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + ntraj: int = 500, + *, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + seeds: int | SeedSequence | list[int | SeedSequence] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + **kwargs, +) -> NmmcResult: """ Monte-Carlo evolution corresponding to a Lindblad equation with "rates" that may be negative. Usage of this function is analogous to ``mcsolve``, @@ -45,7 +60,7 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, operators are to be treated deterministically. state : :class:`.Qobj` - Initial state vector. + Initial state vector or density matrix. tlist : array_like Times at which results are recorded. @@ -60,10 +75,10 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, specified using any format accepted by :func:`~qutip.core.coefficient.coefficient`. - e_ops : list, optional - A ``list`` of operator as Qobj, QobjEvo or callable with signature of - (t, state: Qobj) for calculating expectation values. When no ``e_ops`` - are given, the solver will default to save the states. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. ntraj : int, default: 500 Maximum number of trajectories to run. Can be cut short if a time limit @@ -114,6 +129,9 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, ``norm_tol`` are the tolerance in time and norm respectively. An error will be raised if the collapse could not be found within ``norm_steps`` tries. + - | improved_sampling : Bool + | Whether to use the improved sampling algorithm from Abdelhafez et + al. PRA (2019) - | mc_corr_eps : float | Small number used to detect non-physical collapse caused by numerical imprecision. @@ -161,7 +179,9 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, ``trace`` (and ``runs_trace`` if ``store_final_state`` is set). Note that the states on the individual trajectories are not normalized. This field contains the average of their trace, which will converge to one - in the limit of sufficiently many trajectories. + in the limit of sufficiently many trajectories. If the initial + condition is mixed, the result has additional attributes + ``initial_states`` and ``ntraj_per_initial_state``. """ H = QobjEvo(H, args=args, tlist=tlist) @@ -192,10 +212,7 @@ def _parse_op_and_rate(op, rate, **kw): """ Sanity check the op and convert rates to coefficients. """ if not isinstance(op, Qobj): raise ValueError("NonMarkovianMCSolver ops must be of type Qobj") - if isinstance(rate, numbers.Number): - rate = ConstantCoefficient(rate) - else: - rate = coefficient(rate, **kw) + rate = coefficient(rate, **kw) return op, rate @@ -217,7 +234,10 @@ def initialize(self, t0, cache='clear'): # to pre-compute the continuous contribution to the martingale self._t_prev = t0 self._continuous_martingale_at_t_prev = 1 - self._discrete_martingale = 1 + + # _discrete_martingale is a list of (time, factor) such that + # mu_d(t) is the product of all factors with time < t + self._discrete_martingale = [] if np.array_equal(cache, 'clear'): self._precomputed_continuous_martingale = {} @@ -239,7 +259,7 @@ def add_collapse(self, collapse_time, collapse_channel): rate = self._nm_solver.rate(collapse_time, collapse_channel) shift = self._nm_solver.rate_shift(collapse_time) factor = rate / (rate + shift) - self._discrete_martingale *= factor + self._discrete_martingale.append((collapse_time, factor)) def value(self, t): if self._t_prev is None: @@ -256,7 +276,13 @@ def value(self, t): self._t_prev = t self._continuous_martingale_at_t_prev = mu_c - return self._discrete_martingale * mu_c + # find value of discrete martingale at given time + mu_d = 1 + for time, factor in self._discrete_martingale: + if t > time: + mu_d *= factor + + return mu_d * mu_c def _compute_continuous_martingale(self, t1, t2): if t1 == t2: @@ -313,11 +339,8 @@ class NonMarkovianMCSolver(MCSolver): is a :class:`.Qobj` and ``Gamma`` represents the corresponding rate, which is allowed to be negative. The Lindblad operators must be operators even if ``H`` is a superoperator. Each rate ``Gamma`` may be - just a number (in the case of a constant rate) or, otherwise, specified - using any format accepted by :func:`qutip.coefficient`. - - args : None / dict - Arguments for time-dependent Hamiltonian and collapse operator terms. + just a number (in the case of a constant rate) or, otherwise, a + :class:`~qutip.core.cy.Coefficient`. options : SolverOptions, [optional] Options for the evolution. @@ -339,36 +362,47 @@ class NonMarkovianMCSolver(MCSolver): "norm_steps": 5, "norm_t_tol": 1e-6, "norm_tol": 1e-4, + "improved_sampling": False, "completeness_rtol": 1e-5, "completeness_atol": 1e-8, "martingale_quad_limit": 100, } - # both classes will be partially initialized in constructor - _trajectory_resultclass = NmmcTrajectoryResult - _mc_integrator_class = NmMCIntegrator - def __init__( - self, H, ops_and_rates, args=None, options=None, + self, + H: Qobj | QobjEvo, + ops_and_rates: Sequence[tuple[Qobj, float | Coefficient]], + *, + options: dict[str, Any] = None, ): self.options = options - ops_and_rates = [ - _parse_op_and_rate(op, rate, args=args or {}) - for op, rate in ops_and_rates - ] - a_parameter, L = self._check_completeness(ops_and_rates) + self.ops = [] + self._rates = [] + + for op, rate in ops_and_rates: + if not isinstance(op, Qobj): + raise ValueError("ops_and_rates' ops must be Qobj") + if isinstance(rate, numbers.Number): + rate = ConstantCoefficient(rate) + if not isinstance(rate, Coefficient): + raise ValueError( + "ops_and_rates' rates must be scalar or Coefficient" + ) + self.ops.append(op) + self._rates.append(rate) + + a_parameter, L = self._check_completeness(self.ops) if L is not None: - ops_and_rates.append((L, ConstantCoefficient(0))) + self.ops.append(L) + self._rates.append(ConstantCoefficient(0)) - self.ops = [op for op, _ in ops_and_rates] self._martingale = InfluenceMartingale( self, a_parameter, self.options["martingale_quad_limit"] ) # Many coefficients. These should not be publicly exposed # and will all need to be updated in _arguments(): - self._rates = [rate for _, rate in ops_and_rates] self._rate_shift = RateShiftCoefficient(self._rates) self._sqrt_shifted_rates = [ SqrtRealCoefficient(rate + self._rate_shift) @@ -380,15 +414,12 @@ def __init__( for op, sqrt_shifted_rate in zip(self.ops, self._sqrt_shifted_rates) ] - self._trajectory_resultclass = functools.partial( - NmmcTrajectoryResult, __nm_solver=self, - ) - self._mc_integrator_class = functools.partial( - NmMCIntegrator, __martingale=self._martingale, - ) super().__init__(H, c_ops, options=options) - def _check_completeness(self, ops_and_rates): + def _mc_integrator_class(self, *args): + return NmMCIntegrator(*args, __martingale=self._martingale) + + def _check_completeness(self, ops): """ Checks whether ``sum(Li.dag() * Li)`` is proportional to the identity operator. If not, creates an extra Lindblad operator so that it is. @@ -396,7 +427,7 @@ def _check_completeness(self, ops_and_rates): Returns the proportionality factor a, and the extra Lindblad operator (or None if no extra Lindblad operator is necessary). """ - op = sum((L.dag() * L) for L, _ in ops_and_rates) + op = sum((L.dag() * L) for L in ops) a_candidate = op.tr() / op.shape[0] with CoreOptions(rtol=self.options["completeness_rtol"], @@ -509,22 +540,42 @@ def sqrt_shifted_rate(self, t, i): # the run. # # Regarding (b), in the start/step-interface we just include the martingale - # in the step method. In order to include the martingale in the - # run-interface, we use a custom trajectory-resultclass that grabs the - # martingale value from the NonMarkovianMCSolver whenever a state is added. + # in the step method. In the run-interface, the martingale is added as a + # relative weight to the trajectory result at the end of `_run_one_traj`. - def start(self, state, t0, seed=None): + def start(self, state: Qobj, t0: float, seed: int | SeedSequence = None): self._martingale.initialize(t0, cache='clear') return super().start(state, t0, seed=seed) # The returned state will be a density matrix with trace=mu the martingale - def step(self, t, *, args=None, copy=True): + def step( + self, t: float, *, args: dict[str, Any] = None, copy: bool = True + ) -> Qobj: state = super().step(t, args=args, copy=copy) if isket(state): state = ket2dm(state) return state * self.current_martingale() - def run(self, state, tlist, ntraj=1, *, args=None, **kwargs): + def _run_one_traj(self, seed, state, tlist, e_ops, **integrator_kwargs): + """ + Run one trajectory and return the result. + """ + seed, result = super()._run_one_traj(seed, state, tlist, e_ops, + **integrator_kwargs) + martingales = [self._martingale.value(t) for t in tlist] + result.add_relative_weight(martingales) + result.trace = martingales + return seed, result + + def run( + self, + state: Qobj, + tlist: ArrayLike, + ntraj: int = 1, + *, + args: dict[str, Any] = None, + **kwargs + ): # update `args` dictionary before precomputing martingale self._argument(args) @@ -535,7 +586,7 @@ def run(self, state, tlist, ntraj=1, *, args=None, **kwargs): return result @property - def options(self): + def options(self) -> dict[str, Any]: """ Options for non-Markovian Monte Carlo solver: @@ -596,6 +647,10 @@ def options(self): norm_steps: int, default: 5 Maximum number of tries to find the collapse. + improved_sampling: Bool, default: False + Whether to use the improved sampling algorithm + of Abdelhafez et al. PRA (2019) + completeness_rtol: float, default: 1e-5 Used in determining whether the given Lindblad operators satisfy a certain completeness relation. If they do not, an additional @@ -615,7 +670,7 @@ def options(self): return self._options @options.setter - def options(self, new_options): + def options(self, new_options: dict[str, Any]): MCSolver.options.fset(self, new_options) start.__doc__ = MultiTrajSolver.start.__doc__ diff --git a/qutip/solver/nonmarkov/transfertensor.py b/qutip/solver/nonmarkov/transfertensor.py index ce8bea512f..386bdd4ee8 100644 --- a/qutip/solver/nonmarkov/transfertensor.py +++ b/qutip/solver/nonmarkov/transfertensor.py @@ -71,7 +71,7 @@ def ttmsolve(dynmaps, state0, times, e_ops=(), num_learning=0, options=None): opt = { "store_final_state": False, "store_states": None, - "normalize_output": "ket", + "normalize_output": True, "threshold": 0.0, "num_learning": 0, } diff --git a/qutip/solver/propagator.py b/qutip/solver/propagator.py index f64bf377d0..2abef1680d 100644 --- a/qutip/solver/propagator.py +++ b/qutip/solver/propagator.py @@ -1,3 +1,6 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['Propagator', 'propagator', 'propagator_steadystate'] import numbers @@ -5,14 +8,24 @@ from .. import Qobj, qeye, qeye_like, unstack_columns, QobjEvo, liouvillian from ..core import data as _data +from ..typing import QobjEvoLike from .mesolve import mesolve, MESolver from .sesolve import sesolve, SESolver from .heom.bofin_solvers import HEOMSolver from .solver_base import Solver from .multitraj import MultiTrajSolver - - -def propagator(H, t, c_ops=(), args=None, options=None, **kwargs): +from numbers import Number +from typing import Any + + +def propagator( + H: QobjEvoLike, + t: Number, + c_ops: QobjEvoLike | list[QobjEvoLike] = None, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + **kwargs, +) -> Qobj | list[Qobj]: r""" Calculate the propagator U(t) for the density matrix or wave function such that :math:`\psi(t) = U(t)\psi(0)` or @@ -28,7 +41,11 @@ def propagator(H, t, c_ops=(), args=None, options=None, **kwargs): that can be made into :obj:`.QobjEvo` are also accepted. t : float or array-like - Time or list of times for which to evaluate the propagator. + Time or list of times for which to evaluate the propagator. If a single + time ``t`` is passed, the propagator from ``0`` to ``t`` is computed. + When ``t`` is a list, the propagators from the first time in the list + to each elements in ``t`` is returned. In that case, the first output + will always be the identity matrix. c_ops : list, optional List of Qobj or QobjEvo collapse operators. @@ -77,7 +94,7 @@ def propagator(H, t, c_ops=(), args=None, options=None, **kwargs): return out[-1] -def propagator_steadystate(U): +def propagator_steadystate(U: Qobj) -> Qobj: r"""Find the steady state for successive applications of the propagator :math:`U`. @@ -154,8 +171,16 @@ class Propagator: U = QobjEvo(Propagator(H)) """ - def __init__(self, system, *, c_ops=(), args=None, options=None, - memoize=10, tol=1e-14): + def __init__( + self, + system: Qobj | QobjEvo | Solver, + *, + c_ops: QobjEvoLike | list[QobjEvoLike] = None, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + memoize: int = 10, + tol: float = 1e-14, + ): if isinstance(system, MultiTrajSolver): raise TypeError("Non-deterministic solvers cannot be used " "as a propagator system") @@ -168,6 +193,7 @@ def __init__(self, system, *, c_ops=(), args=None, options=None, self.solver = system else: Hevo = QobjEvo(system, args=args) + c_ops = c_ops if c_ops is not None else [] c_ops = [QobjEvo(op, args=args) for op in c_ops] if Hevo.issuper or c_ops: self.solver = MESolver(Hevo, c_ops=c_ops, options=options) @@ -199,7 +225,7 @@ def _lookup_or_compute(self, t): self._insert(t, U, idx) return U - def __call__(self, t, t_start=0, **args): + def __call__(self, t: float, t_start: float = 0, **args): """ Get the propagator from ``t_start`` to ``t``. @@ -235,7 +261,7 @@ def __call__(self, t, t_start=0, **args): U = self._lookup_or_compute(t) return U - def inv(self, t, **args): + def inv(self, t: float, **args): """ Get the inverse of the propagator at ``t``, such that ``psi_0 = U.inv(t) @ psi_t`` diff --git a/qutip/solver/result.py b/qutip/solver/result.py index 2f32dddbc9..7b019381a3 100644 --- a/qutip/solver/result.py +++ b/qutip/solver/result.py @@ -1,16 +1,16 @@ """ Class for solve function results""" -from typing import TypedDict +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + +from typing import TypedDict, Any, Callable import numpy as np -from ..core import Qobj, QobjEvo, expect, isket, ket2dm, qzero_like +from numpy.typing import ArrayLike +from ..core import Qobj, QobjEvo, expect __all__ = [ "Result", - "MultiTrajResult", - "McResult", - "NmmcResult", - "McTrajectoryResult", - "McResultImprovedSampling", + "TrajectoryResult", ] @@ -130,7 +130,7 @@ def add_processor(self, f, requires_copy=False): class ResultOptions(TypedDict): - store_states: bool + store_states: bool | None store_final_state: bool @@ -216,15 +216,18 @@ class Result(_BaseResult): The options for this result class. """ + times: list[float] + states: list[Qobj] options: ResultOptions + e_data: dict[Any, list[Any]] def __init__( self, - e_ops, + e_ops: dict[Any, Qobj | QobjEvo | Callable[[float, Qobj], Any]], options: ResultOptions, *, - solver=None, - stats=None, + solver: str = None, + stats: dict[str, Any] = None, **kw, ): super().__init__(options, solver=solver, stats=stats) @@ -355,11 +358,11 @@ def __repr__(self): return "\n".join(lines) @property - def expect(self): + def expect(self) -> list[ArrayLike]: return [np.array(e_op) for e_op in self.e_data.values()] @property - def final_state(self): + def final_state(self) -> Qobj: if self._final_state is not None: return self._final_state if self.states: @@ -367,1008 +370,104 @@ def final_state(self): return None -class MultiTrajResultOptions(TypedDict): - store_states: bool - store_final_state: bool - keep_runs_results: bool - - -class MultiTrajResult(_BaseResult): - """ - Base class for storing results for solver using multiple trajectories. - - Parameters - ---------- - e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these - The ``e_ops`` parameter defines the set of values to record at - each time step ``t``. If an element is a :obj:`.Qobj` or - :obj:`.QobjEvo` the value recorded is the expectation value of that - operator given the state at ``t``. If the element is a function, ``f``, - the value recorded is ``f(t, state)``. - - The values are recorded in the ``.expect`` attribute of this result - object. ``.expect`` is a list, where each item contains the values - of the corresponding ``e_op``. +class TrajectoryResult(Result): + r""" + Result class used for single trajectories in multi-trajectory simulations. - Function ``e_ops`` must return a number so the average can be computed. + A trajectory may come with a weight. The trajectory average of an + observable O is then performed as - options : dict - The options for this result class. + .. math:: + \langle O \rangle = \sum_k w(k) O(k) , - solver : str or None - The name of the solver generating these results. + where O is an observable, w(k) the weight of the k-th trajectory, and O(k) + the observable on the k-th trajectory. The weight may be time-dependent. - stats : dict or None - The stats generated by the solver while producing these results. Note - that the solver may update the stats directly while producing results. - - kw : dict - Additional parameters specific to a result sub-class. + There may be an absolute weight `wa` and / or a relative weight `wr`. + The total weight is `w = wa * wr` if the absolute weight is set, and + `w = wr / N` otherwise (where N is the number of trajectories with no + absolute weight specified). Attributes ---------- - times : list - A list of the times at which the expectation values and states were - recorded. - - average_states : list of :obj:`.Qobj` - The state at each time ``t`` (if the recording of the state was - requested) averaged over all trajectories as a density matrix. - - runs_states : list of list of :obj:`.Qobj` - The state for each trajectory and each time ``t`` (if the recording of - the states and trajectories was requested) - - final_state : :obj:`.Qobj`: - The final state (if the recording of the final state was requested) - averaged over all trajectories as a density matrix. - - runs_final_state : list of :obj:`.Qobj` - The final state for each trajectory (if the recording of the final - state and trajectories was requested). - - average_expect : list of array of expectation values - A list containing the values of each ``e_op`` averaged over each - trajectories. The list is in the same order in which the ``e_ops`` were - supplied and empty if no ``e_ops`` were given. - - Each element is itself an array and contains the values of the - corresponding ``e_op``, with one value for each time in ``.times``. - - std_expect : list of array of expectation values - A list containing the standard derivation of each ``e_op`` over each - trajectories. The list is in the same order in which the ``e_ops`` were - supplied and empty if no ``e_ops`` were given. - - Each element is itself an array and contains the values of the - corresponding ``e_op``, with one value for each time in ``.times``. - - runs_expect : list of array of expectation values - A list containing the values of each ``e_op`` for each trajectories. - The list is in the same order in which the ``e_ops`` were - supplied and empty if no ``e_ops`` were given. Only available if the - storing of trajectories was requested. - - The order of the elements is ``runs_expect[e_ops][trajectory][time]``. - - Each element is itself an array and contains the values of the - corresponding ``e_op``, with one value for each time in ``.times``. - - average_e_data : dict - A dictionary containing the values of each ``e_op`` averaged over each - trajectories. If the ``e_ops`` were supplied as a dictionary, the keys - are the same as in that dictionary. Otherwise the keys are the index of - the ``e_op`` in the ``.expect`` list. - - The lists of expectation values returned are the *same* lists as - those returned by ``.expect``. - - average_e_data : dict - A dictionary containing the standard derivation of each ``e_op`` over - each trajectories. If the ``e_ops`` were supplied as a dictionary, the - keys are the same as in that dictionary. Otherwise the keys are the - index of the ``e_op`` in the ``.expect`` list. - - The lists of expectation values returned are the *same* lists as - those returned by ``.expect``. - - runs_e_data : dict - A dictionary containing the values of each ``e_op`` for each - trajectories. If the ``e_ops`` were supplied as a dictionary, the keys - are the same as in that dictionary. Otherwise the keys are the index of - the ``e_op`` in the ``.expect`` list. Only available if the storing - of trajectories was requested. - - The order of the elements is ``runs_expect[e_ops][trajectory][time]``. + rel_weight: float or list + The relative weight, constant or time-dependent. - The lists of expectation values returned are the *same* lists as - those returned by ``.expect``. - - solver : str or None - The name of the solver generating these results. - - stats : dict or None - The stats generated by the solver while producing these results. - - options : :obj:`~SolverResultsOptions` - The options for this result class. + abs_weight: float or list or None + The absolute weight, constant or time-dependent. + None if no absolute weight has been set. """ - options: MultiTrajResultOptions - - def __init__( - self, - e_ops, - options: MultiTrajResultOptions, - *, - solver=None, - stats=None, - **kw, - ): - super().__init__(options, solver=solver, stats=stats) - self._raw_ops = self._e_ops_to_dict(e_ops) - - self.times = [] - self.trajectories = [] - self.num_trajectories = 0 - self.seeds = [] - - self._sum_states = None - self._sum_final_states = None - self._sum_expect = None - self._sum2_expect = None - self._target_tols = None - - self.average_e_data = {} - self.std_e_data = {} - self.runs_e_data = {} - - self._post_init(**kw) - - @property - def _store_average_density_matricies(self) -> bool: - return ( - self.options["store_states"] - or (self.options["store_states"] is None and self._raw_ops == {}) - ) and not self.options["keep_runs_results"] - - @property - def _store_final_density_matrix(self) -> bool: - return ( - self.options["store_final_state"] - and not self._store_average_density_matricies - and not self.options["keep_runs_results"] - ) - - @staticmethod - def _to_dm(state): - if state.type == "ket": - state = state.proj() - return state - - def _add_first_traj(self, trajectory): - """ - Read the first trajectory, intitializing needed data. - """ - self.times = trajectory.times - - if trajectory.states and self._store_average_density_matricies: - self._sum_states = [ - qzero_like(self._to_dm(state)) for state in trajectory.states - ] - - if trajectory.final_state and self._store_final_density_matrix: - state = trajectory.final_state - self._sum_final_states = qzero_like(self._to_dm(state)) - - self._sum_expect = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum2_expect = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - - self.e_ops = trajectory.e_ops - - self.average_e_data = { - k: list(avg_expect) - for k, avg_expect in zip(self._raw_ops, self._sum_expect) - } - if self.options["keep_runs_results"]: - self.runs_e_data = {k: [] for k in self._raw_ops} - - def _store_trajectory(self, trajectory): - self.trajectories.append(trajectory) - - def _reduce_states(self, trajectory): - self._sum_states = [ - accu + self._to_dm(state) - for accu, state in zip(self._sum_states, trajectory.states) - ] - - def _reduce_final_state(self, trajectory): - self._sum_final_states += self._to_dm(trajectory.final_state) - - def _reduce_expect(self, trajectory): - """ - Compute the average of the expectation values and store it in it's - multiple formats. - """ - for i, k in enumerate(self._raw_ops): - expect_traj = trajectory.expect[i] - - self._sum_expect[i] += expect_traj - self._sum2_expect[i] += expect_traj**2 - - avg = self._sum_expect[i] / self.num_trajectories - avg2 = self._sum2_expect[i] / self.num_trajectories - - self.average_e_data[k] = list(avg) - - # mean(expect**2) - mean(expect)**2 can something be very small - # negative (-1e-15) which raise an error for float sqrt. - self.std_e_data[k] = list(np.sqrt(np.abs(avg2 - np.abs(avg**2)))) - - if self.runs_e_data: - self.runs_e_data[k].append(trajectory.e_data[k]) - - def _increment_traj(self, trajectory): - if self.num_trajectories == 0: - self._add_first_traj(trajectory) - self.num_trajectories += 1 - - def _no_end(self): - """ - Remaining number of trajectories needed to finish cannot be determined - by this object. - """ - return np.inf - - def _fixed_end(self): - """ - Finish at a known number of trajectories. - """ - ntraj_left = self._target_ntraj - self.num_trajectories - if ntraj_left == 0: - self.stats["end_condition"] = "ntraj reached" - return ntraj_left - - def _average_computer(self): - avg = np.array(self._sum_expect) / self.num_trajectories - avg2 = np.array(self._sum2_expect) / self.num_trajectories - return avg, avg2 - - def _target_tolerance_end(self): - """ - Compute the error on the expectation values using jackknife resampling. - Return the approximate number of trajectories needed to have this - error within the tolerance fot all e_ops and times. - """ - if self.num_trajectories <= 1: - return np.inf - avg, avg2 = self._average_computer() - target = np.array( - [ - atol + rtol * mean - for mean, (atol, rtol) in zip(avg, self._target_tols) - ] - ) - target_ntraj = np.max((avg2 - abs(avg) ** 2) / target**2 + 1) - - self._estimated_ntraj = min(target_ntraj, self._target_ntraj) - if (self._estimated_ntraj - self.num_trajectories) <= 0: - self.stats["end_condition"] = "target tolerance reached" - return self._estimated_ntraj - self.num_trajectories - def _post_init(self): - self.num_trajectories = 0 - self._target_ntraj = None - - self.add_processor(self._increment_traj) - store_trajectory = self.options["keep_runs_results"] - if store_trajectory: - self.add_processor(self._store_trajectory) - if self._store_average_density_matricies: - self.add_processor(self._reduce_states) - if self._store_final_density_matrix: - self.add_processor(self._reduce_final_state) - if self._raw_ops: - self.add_processor(self._reduce_expect) - - self._early_finish_check = self._no_end - self.stats["end_condition"] = "unknown" - - def add(self, trajectory_info): - """ - Add a trajectory to the evolution. - - Trajectories can be saved or average canbe extracted depending on the - options ``keep_runs_results``. - - Parameters - ---------- - trajectory_info : tuple of seed and trajectory - - seed: int, SeedSequence - Seed used to generate the trajectory. - - trajectory : :class:`Result` - Run result for one evolution over the times. - - Returns - ------- - remaing_traj : number - Return the number of trajectories still needed to reach the target - tolerance. If no tolerance is provided, return infinity. - """ - seed, trajectory = trajectory_info - self.seeds.append(seed) - - for op in self._state_processors: - op(trajectory) - - return self._early_finish_check() - - def add_end_condition(self, ntraj, target_tol=None): - """ - Set the condition to stop the computing trajectories when the certain - condition are fullfilled. - Supported end condition for multi trajectories computation are: - - - Reaching a number of trajectories. - - Error bar on the expectation values reach smaller than a given - tolerance. - - Parameters - ---------- - ntraj : int - Number of trajectories expected. - - target_tol : float, array_like, [optional] - Target tolerance of the evolution. The evolution will compute - trajectories until the error on the expectation values is lower - than this tolerance. The error is computed using jackknife - resampling. ``target_tol`` can be an absolute tolerance, a pair of - absolute and relative tolerance, in that order. Lastly, it can be a - list of pairs of (atol, rtol) for each e_ops. - - Error estimation is done with jackknife resampling. - """ - self._target_ntraj = ntraj - self.stats["end_condition"] = "timeout" - - if target_tol is None: - self._early_finish_check = self._fixed_end - return - - num_e_ops = len(self._raw_ops) - - if not num_e_ops: - raise ValueError("Cannot target a tolerance without e_ops") - - self._estimated_ntraj = ntraj - - targets = np.array(target_tol) - if targets.ndim == 0: - self._target_tols = np.array([(target_tol, 0.0)] * num_e_ops) - elif targets.shape == (2,): - self._target_tols = np.ones((num_e_ops, 2)) * targets - elif targets.shape == (num_e_ops, 2): - self._target_tols = targets - else: - raise ValueError( - "target_tol must be a number, a pair of (atol, " - "rtol) or a list of (atol, rtol) for each e_ops" - ) - - self._early_finish_check = self._target_tolerance_end - - @property - def runs_states(self): - """ - States of every runs as ``states[run][t]``. - """ - if self.trajectories and self.trajectories[0].states: - return [traj.states for traj in self.trajectories] - else: - return None - - @property - def average_states(self): - """ - States averages as density matrices. - """ - if self._sum_states is None: - if not (self.trajectories and self.trajectories[0].states): - return None - self._sum_states = [ - qzero_like(self._to_dm(state)) - for state in self.trajectories[0].states - ] - for trajectory in self.trajectories: - self._reduce_states(trajectory) - - return [ - final / self.num_trajectories for final in self._sum_states - ] - - @property - def states(self): - """ - Runs final states if available, average otherwise. - """ - return self.runs_states or self.average_states - - @property - def runs_final_states(self): - """ - Last states of each trajectories. - """ - if self.trajectories and self.trajectories[0].final_state: - return [traj.final_state for traj in self.trajectories] - else: - return None - - @property - def average_final_state(self): - """ - Last states of each trajectories averaged into a density matrix. - """ - if self._sum_final_states is None: - if self.average_states is not None: - return self.average_states[-1] - return None - return self._sum_final_states / self.num_trajectories - - @property - def final_state(self): - """ - Runs final states if available, average otherwise. - """ - return self.runs_final_states or self.average_final_state - - @property - def average_expect(self): - return [np.array(val) for val in self.average_e_data.values()] - - @property - def std_expect(self): - return [np.array(val) for val in self.std_e_data.values()] - - @property - def runs_expect(self): - return [np.array(val) for val in self.runs_e_data.values()] - - @property - def expect(self): - return [np.array(val) for val in self.e_data.values()] + super()._post_init() - @property - def e_data(self): - return self.runs_e_data or self.average_e_data + self.rel_weight = np.array(1) + self.abs_weight = None + self._has_weight = False - def steady_state(self, N=0): + def add_absolute_weight(self, new_weight): """ - Average the states of the last ``N`` times of every runs as a density - matrix. Should converge to the steady state in the right circumstances. - - Parameters - ---------- - N : int [optional] - Number of states from the end of ``tlist`` to average. Per default - all states will be averaged. + Adds the given weight (which may be either a number or an array of the + same length as the list of times) as an absolute weight. """ - N = int(N) or len(self.times) - N = len(self.times) if N > len(self.times) else N - states = self.average_states - if states is not None: - return sum(states[-N:]) / N + new_weight = np.array(new_weight) + if self.abs_weight is None: + self.abs_weight = new_weight else: - return None - - def __repr__(self): - lines = [ - f"<{self.__class__.__name__}", - f" Solver: {self.solver}", - ] - if self.stats: - lines.append(" Solver stats:") - lines.extend(f" {k}: {v!r}" for k, v in self.stats.items()) - if self.times: - lines.append( - f" Time interval: [{self.times[0]}, {self.times[-1]}]" - f" ({len(self.times)} steps)" - ) - lines.append(f" Number of e_ops: {len(self.e_data)}") - if self.states: - lines.append(" States saved.") - elif self.final_state is not None: - lines.append(" Final state saved.") - else: - lines.append(" State not saved.") - lines.append(f" Number of trajectories: {self.num_trajectories}") - if self.trajectories: - lines.append(" Trajectories saved.") - else: - lines.append(" Trajectories not saved.") - lines.append(">") - return "\n".join(lines) - - def __add__(self, other): - if not isinstance(other, MultiTrajResult): - return NotImplemented - if self._raw_ops != other._raw_ops: - raise ValueError("Shared `e_ops` is required to merge results") - if self.times != other.times: - raise ValueError("Shared `times` are is required to merge results") - - new = self.__class__( - self._raw_ops, self.options, solver=self.solver, stats=self.stats - ) - new.e_ops = self.e_ops - - if self.trajectories and other.trajectories: - new.trajectories = self.trajectories + other.trajectories - new.num_trajectories = self.num_trajectories + other.num_trajectories - new.times = self.times - new.seeds = self.seeds + other.seeds - - if ( - self._sum_states is not None - and other._sum_states is not None - ): - new._sum_states = [ - state1 + state2 for state1, state2 in zip( - self._sum_states, other._sum_states - ) - ] - - if ( - self._sum_final_states is not None - and other._sum_final_states is not None - ): - new._sum_final_states = ( - self._sum_final_states - + other._sum_final_states - ) - new._target_tols = None - - new._sum_expect = [] - new._sum2_expect = [] - new.average_e_data = {} - new.std_e_data = {} - - for i, k in enumerate(self._raw_ops): - new._sum_expect.append(self._sum_expect[i] + other._sum_expect[i]) - new._sum2_expect.append( - self._sum2_expect[i] + other._sum2_expect[i] - ) - - avg = new._sum_expect[i] / new.num_trajectories - avg2 = new._sum2_expect[i] / new.num_trajectories - - new.average_e_data[k] = list(avg) - new.std_e_data[k] = np.sqrt(np.abs(avg2 - np.abs(avg**2))) - - if self.runs_e_data and other.runs_e_data: - new.runs_e_data[k] = self.runs_e_data[k] + other.runs_e_data[k] - - new.stats["run time"] += other.stats["run time"] - new.stats["end_condition"] = "Merged results" - - return new - - -class McTrajectoryResult(Result): - """ - Result class used by the :class:`.MCSolver` for single trajectories. - """ - - def __init__(self, e_ops, options, *args, **kwargs): - super().__init__( - e_ops, {**options, "normalize_output": False}, *args, **kwargs - ) - - -class McResult(MultiTrajResult): - """ - Class for storing Monte-Carlo solver results. - - Parameters - ---------- - e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these - The ``e_ops`` parameter defines the set of values to record at - each time step ``t``. If an element is a :obj:`.Qobj` or - :obj:`.QobjEvo` the value recorded is the expectation value of that - operator given the state at ``t``. If the element is a function, ``f``, - the value recorded is ``f(t, state)``. - - The values are recorded in the ``.expect`` attribute of this result - object. ``.expect`` is a list, where each item contains the values - of the corresponding ``e_op``. - - options : :obj:`~SolverResultsOptions` - The options for this result class. - - solver : str or None - The name of the solver generating these results. - - stats : dict - The stats generated by the solver while producing these results. Note - that the solver may update the stats directly while producing results. - Must include a value for "num_collapse". - - kw : dict - Additional parameters specific to a result sub-class. - - Attributes - ---------- - collapse : list - For each runs, a list of every collapse as a tuple of the time it - happened and the corresponding ``c_ops`` index. - """ - - # Collapse are only produced by mcsolve. - - def _add_collapse(self, trajectory): - self.collapse.append(trajectory.collapse) - - def _post_init(self): - super()._post_init() - self.num_c_ops = self.stats["num_collapse"] - self.collapse = [] - self.add_processor(self._add_collapse) + self.abs_weight = self.abs_weight * new_weight + self._has_weight = True - @property - def col_times(self): + def add_relative_weight(self, new_weight): """ - List of the times of the collapses for each runs. + Adds the given weight (which may be either a number or an array of the + same length as the list of times) as a relative weight. """ - out = [] - for col_ in self.collapse: - col = list(zip(*col_)) - col = [] if len(col) == 0 else col[0] - out.append(col) - return out + new_weight = np.array(new_weight) + self.rel_weight = self.rel_weight * new_weight + self._has_weight = True @property - def col_which(self): - """ - List of the indexes of the collapses for each runs. - """ - out = [] - for col_ in self.collapse: - col = list(zip(*col_)) - col = [] if len(col) == 0 else col[1] - out.append(col) - return out + def has_weight(self): + """Whether any weight has been set.""" + return self._has_weight @property - def photocurrent(self): - """ - Average photocurrent or measurement of the evolution. - """ - cols = [[] for _ in range(self.num_c_ops)] - tlist = self.times - for collapses in self.collapse: - for t, which in collapses: - cols[which].append(t) - mesurement = [ - np.histogram(cols[i], tlist)[0] - / np.diff(tlist) - / self.num_trajectories - for i in range(self.num_c_ops) - ] - return mesurement + def has_absolute_weight(self): + """Whether an absolute weight has been set.""" + return (self.abs_weight is not None) @property - def runs_photocurrent(self): - """ - Photocurrent or measurement of each runs. - """ - tlist = self.times - measurements = [] - for collapses in self.collapse: - cols = [[] for _ in range(self.num_c_ops)] - for t, which in collapses: - cols[which].append(t) - measurements.append( - [ - np.histogram(cols[i], tlist)[0] / np.diff(tlist) - for i in range(self.num_c_ops) - ] - ) - return measurements - - -class McResultImprovedSampling(McResult, MultiTrajResult): - """ - See docstring for McResult and MultiTrajResult for all relevant documentation. - This class computes expectation values and sums of states, etc - using the improved sampling algorithm, which samples the no-jump trajectory - first and then only samples jump trajectories afterwards. - """ - - def __init__(self, e_ops, options, **kw): - MultiTrajResult.__init__(self, e_ops=e_ops, options=options, **kw) - self._sum_expect_no_jump = None - self._sum_expect_jump = None - self._sum2_expect_no_jump = None - self._sum2_expect_jump = None - - self._sum_states_no_jump = None - self._sum_states_jump = None - self._sum_final_states_no_jump = None - self._sum_final_states_jump = None - - self.no_jump_prob = None - - def _reduce_states(self, trajectory): - if self.num_trajectories == 1: - self._sum_states_no_jump = [ - accu + self._to_dm(state) - for accu, state in zip( - self._sum_states_no_jump, trajectory.states - ) - ] - else: - self._sum_states_jump = [ - accu + self._to_dm(state) - for accu, state in zip( - self._sum_states_jump, trajectory.states - ) - ] - - def _reduce_final_state(self, trajectory): - dm_final_state = self._to_dm(trajectory.final_state) - if self.num_trajectories == 1: - self._sum_final_states_no_jump += dm_final_state - else: - self._sum_final_states_jump += dm_final_state - - def _average_computer(self): - avg = np.array(self._sum_expect_jump) / (self.num_trajectories - 1) - avg2 = np.array(self._sum2_expect_jump) / (self.num_trajectories - 1) - return avg, avg2 - - def _add_first_traj(self, trajectory): - super()._add_first_traj(trajectory) - if trajectory.states and self._store_average_density_matricies: - del self._sum_states - self._sum_states_no_jump = [ - qzero_like(self._to_dm(state)) for state in trajectory.states - ] - self._sum_states_jump = [ - qzero_like(self._to_dm(state)) for state in trajectory.states - ] - if trajectory.final_state and self._store_final_density_matrix: - state = trajectory.final_state - del self._sum_final_states - self._sum_final_states_no_jump = qzero_like(self._to_dm(state)) - self._sum_final_states_jump = qzero_like(self._to_dm(state)) - self._sum_expect_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum2_expect_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum_expect_no_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum2_expect_no_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum_expect_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - self._sum2_expect_jump = [ - np.zeros_like(expect) for expect in trajectory.expect - ] - del self._sum_expect - del self._sum2_expect - - def _reduce_expect(self, trajectory): - """ - Compute the average of the expectation values appropriately - weighting the jump and no-jump trajectories - """ - for i, k in enumerate(self._raw_ops): - expect_traj = trajectory.expect[i] - p = self.no_jump_prob - if self.num_trajectories == 1: - self._sum_expect_no_jump[i] += expect_traj * p - self._sum2_expect_no_jump[i] += expect_traj**2 * p - # no jump trajectory will always be the first one, no need - # to worry about including jump trajectories - avg = self._sum_expect_no_jump[i] - avg2 = self._sum2_expect_no_jump[i] - else: - self._sum_expect_jump[i] += expect_traj * (1 - p) - self._sum2_expect_jump[i] += expect_traj**2 * (1 - p) - avg = self._sum_expect_no_jump[i] + ( - self._sum_expect_jump[i] / (self.num_trajectories - 1) - ) - avg2 = self._sum2_expect_no_jump[i] + ( - self._sum2_expect_jump[i] / (self.num_trajectories - 1) - ) - - self.average_e_data[k] = list(avg) - - # mean(expect**2) - mean(expect)**2 can something be very small - # negative (-1e-15) which raise an error for float sqrt. - self.std_e_data[k] = list(np.sqrt(np.abs(avg2 - np.abs(avg**2)))) - - if self.runs_e_data: - self.runs_e_data[k].append(trajectory.e_data[k]) + def has_time_dependent_weight(self): + """Whether the total weight is time-dependent.""" + # np.ndim(None) returns zero, which is what we want + return np.ndim(self.rel_weight) > 0 or np.ndim(self.abs_weight) > 0 @property - def average_states(self): + def total_weight(self): """ - States averages as density matrices. + Returns the total weight, either a single number or an array in case of + a time-dependent weight. If no absolute weight was set, this is only + the relative weight. If an absolute weight was set, this is the product + of the absolute and the relative weights. """ - if self._sum_states_no_jump is None: - if not (self.trajectories and self.trajectories[0].states): - return None - self._sum_states_no_jump = [ - qzero_like(self._to_dm(state)) - for state in self.trajectories[0].states - ] - self._sum_states_jump = [ - qzero_like(self._to_dm(state)) - for state in self.trajectories[0].states - ] - self.num_trajectories = 0 - for trajectory in self.trajectories: - self.num_trajectories += 1 - self._reduce_states(trajectory) - p = self.no_jump_prob - return [ - p * final_no_jump - + (1 - p) * final_jump / (self.num_trajectories - 1) - for final_no_jump, final_jump in zip( - self._sum_states_no_jump, self._sum_states_jump - ) - ] + if self.has_absolute_weight: + return self.abs_weight * self.rel_weight + return self.rel_weight @property - def average_final_state(self): + def _total_weight_tlist(self): """ - Last states of each trajectory averaged into a density matrix. + Returns the total weight as a function of time (i.e., as an array with + the same shape as the `tlist`) """ - if self._sum_final_states_no_jump is None: - if self.average_states is not None: - return self.average_states[-1] - p = self.no_jump_prob - return p * self._sum_final_states_no_jump + ( - ((1 - p) * self._sum_final_states_jump) - / (self.num_trajectories - 1) - ) - - def __add__(self, other): - raise NotImplemented + total_weight = self.total_weight + if self.has_time_dependent_weight: + return total_weight + return np.ones_like(self.times) * total_weight @property - def photocurrent(self): - """ - Average photocurrent or measurement of the evolution. - """ - cols = [[] for _ in range(self.num_c_ops)] - tlist = self.times - for collapses in self.collapse: - for t, which in collapses: - cols[which].append(t) - mesurement = [ - (1 - self.no_jump_prob) - / (self.num_trajectories - 1) - * np.histogram(cols[i], tlist)[0] - / np.diff(tlist) - for i in range(self.num_c_ops) - ] - return mesurement - - -class NmmcTrajectoryResult(McTrajectoryResult): - """ - Result class used by the :class:`.NonMarkovianMCSolver` for single - trajectories. Additionally stores the trace of the state along the - trajectory. - """ - - def __init__(self, e_ops, options, *args, **kwargs): - self._nm_solver = kwargs.pop("__nm_solver") - super().__init__(e_ops, options, *args, **kwargs) - self.trace = [] - - # This gets called during the Monte-Carlo simulation of the associated - # completely positive master equation. To obtain the state of the actual - # system, we simply multiply the provided state with the current martingale - # before storing it / computing expectation values. - def add(self, t, state): - if isket(state): - state = ket2dm(state) - mu = self._nm_solver.current_martingale() - super().add(t, state * mu) - self.trace.append(mu) - - add.__doc__ = Result.add.__doc__ - - -class NmmcResult(McResult): - """ - Class for storing the results of the non-Markovian Monte-Carlo solver. - - Parameters - ---------- - e_ops : :obj:`.Qobj`, :obj:`.QobjEvo`, function or list or dict of these - The ``e_ops`` parameter defines the set of values to record at - each time step ``t``. If an element is a :obj:`.Qobj` or - :obj:`.QobjEvo` the value recorded is the expectation value of that - operator given the state at ``t``. If the element is a function, ``f``, - the value recorded is ``f(t, state)``. - - The values are recorded in the ``.expect`` attribute of this result - object. ``.expect`` is a list, where each item contains the values - of the corresponding ``e_op``. - - options : :obj:`~SolverResultsOptions` - The options for this result class. - - solver : str or None - The name of the solver generating these results. - - stats : dict - The stats generated by the solver while producing these results. Note - that the solver may update the stats directly while producing results. - Must include a value for "num_collapse". - - kw : dict - Additional parameters specific to a result sub-class. - - Attributes - ---------- - average_trace : list - The average trace (i.e., averaged over all trajectories) at each time. - - std_trace : list - The standard deviation of the trace at each time. - - runs_trace : list of lists - For each recorded trajectory, the trace at each time. - Only present if ``keep_runs_results`` is set in the options. - """ - - def _post_init(self): - super()._post_init() - - self._sum_trace = None - self._sum2_trace = None - self.average_trace = [] - self.std_trace = [] - self.runs_trace = [] - - self.add_processor(self._add_trace) - - def _add_first_traj(self, trajectory): - super()._add_first_traj(trajectory) - self._sum_trace = np.zeros_like(trajectory.times) - self._sum2_trace = np.zeros_like(trajectory.times) - - def _add_trace(self, trajectory): - new_trace = np.array(trajectory.trace) - self._sum_trace += new_trace - self._sum2_trace += np.abs(new_trace) ** 2 - - avg = self._sum_trace / self.num_trajectories - avg2 = self._sum2_trace / self.num_trajectories - - self.average_trace = avg - self.std_trace = np.sqrt(np.abs(avg2 - np.abs(avg) ** 2)) - - if self.options["keep_runs_results"]: - self.runs_trace.append(trajectory.trace) - - @property - def trace(self): - """ - Refers to ``average_trace`` or ``runs_trace``, depending on whether - ``keep_runs_results`` is set in the options. - """ - return self.runs_trace or self.average_trace + def _final_weight(self): + total_weight = self.total_weight + if self.has_time_dependent_weight: + return total_weight[-1] + return total_weight diff --git a/qutip/solver/scattering.py b/qutip/solver/scattering.py index 23b5450fe9..80a8bd8855 100644 --- a/qutip/solver/scattering.py +++ b/qutip/solver/scattering.py @@ -11,6 +11,7 @@ # Contact: benbartlett@stanford.edu import numpy as np +from scipy.integrate import trapezoid from itertools import product, combinations_with_replacement from ..core import basis, tensor, zero_ket, Qobj, QobjEvo from .propagator import propagator, Propagator @@ -297,5 +298,5 @@ def scattering_probability(H, psi0, n_emissions, c_ops, tlist, # Iteratively integrate to obtain single value while probs.shape != (): - probs = np.trapz(probs, x=tlist) + probs = trapezoid(probs, x=tlist) return np.abs(probs) diff --git a/qutip/solver/sesolve.py b/qutip/solver/sesolve.py index 8d43988299..afe63e1d9c 100644 --- a/qutip/solver/sesolve.py +++ b/qutip/solver/sesolve.py @@ -2,16 +2,31 @@ This module provides solvers for the unitary Schrodinger equation. """ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['sesolve', 'SESolver'] -import numpy as np +from numpy.typing import ArrayLike from time import time +from typing import Any, Callable from .. import Qobj, QobjEvo +from ..core import data as _data +from ..typing import QobjEvoLike, EopsLike from .solver_base import Solver, _solver_deprecation from ._feedback import _QobjFeedback, _DataFeedback - - -def sesolve(H, psi0, tlist, e_ops=None, args=None, options=None, **kwargs): +from . import Result + + +def sesolve( + H: QobjEvoLike, + psi0: Qobj, + tlist: ArrayLike, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + args: dict[str, Any] = None, + options: dict[str, Any] = None, + **kwargs +) -> Result: """ Schrodinger equation evolution of a state vector or unitary matrix for a given Hamiltonian. @@ -48,12 +63,10 @@ def sesolve(H, psi0, tlist, e_ops=None, args=None, options=None, **kwargs): tlist : *list* / *array* list of times for :math:`t`. - e_ops : :obj:`.Qobj`, callable, or list, optional - Single operator or list of operators for which to evaluate - expectation values or callable or list of callable. - Callable signature must be, `f(t: float, state: Qobj)`. - See :func:`~qutip.core.expect.expect` for more detail of operator - expectation. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. args : dict, optional dictionary of parameters for time-dependent Hamiltonians @@ -69,7 +82,8 @@ def sesolve(H, psi0, tlist, e_ops=None, args=None, options=None, **kwargs): On `None` the states will be saved if no expectation operators are given. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -138,7 +152,7 @@ class SESolver(Solver): 'method': 'adams', } - def __init__(self, H, *, options=None): + def __init__(self, H: Qobj | QobjEvo, *, options: dict[str, Any] = None): _time_start = time() if not isinstance(H, (Qobj, QobjEvo)): @@ -157,7 +171,7 @@ def _initialize_stats(self): return stats @property - def options(self): + def options(self) -> dict: """ Solver's options: @@ -188,11 +202,16 @@ def options(self): return self._options @options.setter - def options(self, new_options): + def options(self, new_options: dict[str, Any]): Solver.options.fset(self, new_options) @classmethod - def StateFeedback(cls, default=None, raw_data=False, prop=False): + def StateFeedback( + cls, + default: Qobj | _data.Data = None, + raw_data: bool = False, + prop: bool = False + ): """ State of the evolution to be used in a time-dependent operator. diff --git a/qutip/solver/sode/_noise.py b/qutip/solver/sode/_noise.py index e1b4592a49..eec07a1485 100644 --- a/qutip/solver/sode/_noise.py +++ b/qutip/solver/sode/_noise.py @@ -1,6 +1,6 @@ import numpy as np -__all__ = ["Wiener"] +__all__ = ["Wiener", "PreSetWiener"] class Wiener: @@ -10,31 +10,84 @@ class Wiener: def __init__(self, t0, dt, generator, shape): self.t0 = t0 self.dt = dt - self.generator = generator - self.t_end = t0 self.shape = shape - self.process = np.zeros((1,) + shape, dtype=float) + self.generator = generator + self.noise = np.zeros((0,) + shape, dtype=float) + self.last_W = np.zeros(shape[-1], dtype=float) + self.idx_last_0 = 0 - def _extend(self, t): - N_new_vals = int((t - self.t_end + self.dt*0.01) // self.dt) + def _extend(self, idx): + N_new_vals = idx - self.noise.shape[0] dW = self.generator.normal( 0, np.sqrt(self.dt), size=(N_new_vals,) + self.shape ) - W = self.process[-1, :, :] + np.cumsum(dW, axis=0) - self.process = np.concatenate((self.process, W), axis=0) - self.t_end = self.t0 + (self.process.shape[0] - 1) * self.dt + self.noise = np.concatenate((self.noise, dW), axis=0) def dW(self, t, N): - if t + N * self.dt > self.t_end: - self._extend(t + N * self.dt) - idx0 = int((t - self.t0 + self.dt * 0.01) // self.dt) - return np.diff(self.process[idx0:idx0 + N + 1, :, :], axis=0) + # Find the index of t. + # Rounded to the closest step, but only multiple of dt are expected. + idx0 = round((t - self.t0) / self.dt) + if idx0 + N - 1 >= self.noise.shape[0]: + self._extend(idx0 + N) + return self.noise[idx0:idx0 + N, :, :] def __call__(self, t): - if t > self.t_end: - self._extend(t) - idx = int((t - self.t0 + self.dt * 0.01) // self.dt) - return self.process[idx, 0, :] + """ + Return the Wiener process at the closest ``dt`` step to ``t``. + """ + # The Wiener process is not used directly in the evolution, so it's + # less optimized than the ``dW`` method. + + # Find the index of t. + # Rounded to the closest step, but only multiple of dt are expected. + idx = round((t - self.t0) / self.dt) + if idx >= self.noise.shape[0]: + self._extend(idx + 1) + + if self.idx_last_0 > idx: + # Before last call, reseting + self.idx_last_0 = 0 + self.last_W = np.zeros(self.shape[-1], dtype=float) + + self.last_W = self.last_W + np.sum( + self.noise[self.idx_last_0:idx+1, 0, :], axis=0 + ) + + self.idx_last_0 = idx + return self.last_W + + +class PreSetWiener(Wiener): + def __init__(self, noise, tlist, n_sc_ops, heterodyne, is_measurement): + if heterodyne: + if noise.shape != (n_sc_ops/2, 2, len(tlist)-1): + raise ValueError( + "Noise is not of the expected shape: " + f"{(n_sc_ops/2, 2, len(tlist)-1)}" + ) + noise = np.reshape(noise, (n_sc_ops, len(tlist)-1), "C") + else: + if noise.shape != (n_sc_ops, len(tlist)-1): + raise ValueError( + "Noise is not of the expected shape: " + f"{(n_sc_ops, len(tlist)-1)}" + ) + + self.t0 = tlist[0] + self.dt = tlist[1] - tlist[0] + self.shape = noise.shape[1:] + self.noise = noise.T[:, np.newaxis, :].copy() + self.last_W = np.zeros(self.shape[-1], dtype=float) + self.idx_last_0 = 0 + self.is_measurement = is_measurement + if self.is_measurement: + # Measurements is scaled as + dW / dt + self.noise *= self.dt + if heterodyne: + self.noise /= 2**0.5 + + def _extend(self, N): + raise ValueError("Requested time is outside the integration range.") class _Noise: diff --git a/qutip/solver/sode/_sode.pyx b/qutip/solver/sode/_sode.pyx index d528228d87..a38079686e 100644 --- a/qutip/solver/sode/_sode.pyx +++ b/qutip/solver/sode/_sode.pyx @@ -11,9 +11,11 @@ import numpy as np cdef class Euler: cdef _StochasticSystem system + cdef bint measurement_noise - def __init__(self, _StochasticSystem system): + def __init__(self, _StochasticSystem system, measurement_noise=False): self.system = system + self.measurement_noise = measurement_noise @cython.wraparound(False) def run( @@ -37,9 +39,16 @@ cdef class Euler: """ cdef int i cdef _StochasticSystem system = self.system + cdef list expect cdef Data a = system.drift(t, state) b = system.diffusion(t, state) + + if self.measurement_noise: + expect = system.expect(t, state) + for i in range(system.num_collapse): + dW[0, i] -= expect[i].real * dt + cdef Data new_state = _data.add(state, a, dt) for i in range(system.num_collapse): new_state = _data.add(new_state, b[i], dW[0, i]) @@ -72,6 +81,12 @@ cdef class Platen(Euler): cdef list d2 = system.diffusion(t, state) cdef Data Vt, out cdef list Vp, Vm + cdef list expect + + if self.measurement_noise: + expect = system.expect(t, state) + for i in range(system.num_collapse): + dW[0, i] -= expect[i].real * dt out = _data.mul(d1, 0.5) Vt = d1.copy() @@ -106,6 +121,9 @@ cdef class Platen(Euler): cdef class Explicit15(Euler): + def __init__(self, _StochasticSystem system): + self.system = system + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -235,9 +253,11 @@ cdef class Explicit15(Euler): cdef class Milstein: cdef _StochasticSystem system + cdef bint measurement_noise - def __init__(self, _StochasticSystem system): - self.system = system + def __init__(self, _StochasticSystem system, measurement_noise=False): + self.system = system + self.measurement_noise = measurement_noise @cython.wraparound(False) def run(self, double t, Data state, double dt, double[:, :, ::1] dW, int ntraj): @@ -273,6 +293,11 @@ cdef class Milstein: iadd_dense(out, state, 1) iadd_dense(out, system.a(), dt) + if self.measurement_noise: + expect = system.expect(t, state) + for i in range(system.num_collapse): + dW[0, i] -= system.expect_i(i).real * dt + for i in range(num_ops): iadd_dense(out, system.bi(i), dW[0, i]) @@ -289,11 +314,17 @@ cdef class PredCorr: cdef Dense euler cdef double alpha, eta cdef _StochasticSystem system + cdef bint measurement_noise - def __init__(self, _StochasticSystem system, double alpha=0., double eta=0.5): + def __init__( + self, _StochasticSystem system, + double alpha=0., double eta=0.5, + measurement_noise=False + ): self.system = system self.alpha = alpha self.eta = eta + self.measurement_noise = measurement_noise @cython.wraparound(False) def run(self, double t, Data state, double dt, double[:, :, ::1] dW, int ntraj): @@ -324,6 +355,11 @@ cdef class PredCorr: system.set_state(t, state) + if self.measurement_noise: + expect = system.expect(t, state) + for i in range(system.num_collapse): + dW[0, i] -= system.expect_i(i).real * dt + imul_dense(out, 0.) iadd_dense(out, state, 1) iadd_dense(out, system.a(), dt * (1-alpha)) @@ -350,6 +386,10 @@ cdef class PredCorr: cdef class Taylor15(Milstein): + def __init__(self, _StochasticSystem system): + self.system = system + self.measurement_noise = False + @cython.boundscheck(False) @cython.wraparound(False) cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense out): @@ -398,17 +438,17 @@ cdef class Milstein_imp: cdef double prev_dt cdef dict imp_opt - def __init__(self, _StochasticSystem system, imp_method=None, imp_options={}): + def __init__(self, _StochasticSystem system, solve_method=None, solve_options={}): self.system = system self.prev_dt = 0 - if imp_method == "inv": + if solve_method == "inv": if not self.system.L.isconstant: raise TypeError("The 'inv' integration method requires that the system Hamiltonian or Liouvillian be constant.") self.use_inv = True self.imp_opt = {} else: self.use_inv = False - self.imp_opt = {"method": imp_method, "options": imp_options} + self.imp_opt = {"method": solve_method, "options": solve_options} @cython.wraparound(False) diff --git a/qutip/solver/sode/itotaylor.py b/qutip/solver/sode/itotaylor.py index 6ad4df1545..a4541206cd 100644 --- a/qutip/solver/sode/itotaylor.py +++ b/qutip/solver/sode/itotaylor.py @@ -17,8 +17,13 @@ class EulerSODE(_Explicit_Simple_Integrator): - Order: 0.5 """ + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + } stepper = _sode.Euler N_dw = 1 + _stepper_options = ["measurement_noise"] class Milstein_SODE(_Explicit_Simple_Integrator): @@ -30,8 +35,13 @@ class Milstein_SODE(_Explicit_Simple_Integrator): - Order strong 1.0 """ + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + } stepper = _sode.Milstein N_dw = 1 + _stepper_options = ["measurement_noise"] class Taylor1_5_SODE(_Explicit_Simple_Integrator): diff --git a/qutip/solver/sode/sode.py b/qutip/solver/sode/sode.py index e43e8f5ac1..d76b371374 100644 --- a/qutip/solver/sode/sode.py +++ b/qutip/solver/sode/sode.py @@ -3,7 +3,7 @@ from . import _sode from ..integrator.integrator import Integrator from ..stochastic import StochasticSolver, SMESolver -from ._noise import Wiener +from ._noise import Wiener, PreSetWiener __all__ = ["SIntegrator", "PlatenSODE", "PredCorr_SODE"] @@ -65,7 +65,24 @@ def set_state(self, t, state0, generator): """ self.t = t self.state = state0 - if isinstance(generator, Wiener): + stepper_opt = { + key: self.options[key] + for key in self._stepper_options + if key in self.options + } + + if isinstance(generator, PreSetWiener): + self.wiener = generator + if ( + generator.is_measurement + and "measurement_noise" not in self._stepper_options + ): + raise NotImplementedError( + f"{type(self).__name__} does not support running" + " the evolution from measurements." + ) + stepper_opt["measurement_noise"] = generator.is_measurement + elif isinstance(generator, Wiener): self.wiener = generator else: num_collapse = len(self.rhs.sc_ops) @@ -74,8 +91,8 @@ def set_state(self, t, state0, generator): (self.N_dw, num_collapse) ) self.rhs._register_feedback(self.wiener) - opt = [self.options[key] for key in self._stepper_options] - self.step_func = self.stepper(self.rhs(self.options), *opt).run + rhs = self.rhs(self.options) + self.step_func = self.stepper(rhs, **stepper_opt).run self._is_set = True def get_state(self, copy=True): @@ -228,9 +245,13 @@ class PlatenSODE(_Explicit_Simple_Integrator): - Order: strong 1, weak 2 """ - + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + } stepper = _sode.Platen N_dw = 1 + _stepper_options = ["measurement_noise"] class PredCorr_SODE(_Explicit_Simple_Integrator): @@ -257,7 +278,7 @@ class PredCorr_SODE(_Explicit_Simple_Integrator): } stepper = _sode.PredCorr N_dw = 1 - _stepper_options = ["alpha", "eta"] + _stepper_options = ["alpha", "eta", "measurement_noise"] @property def options(self): diff --git a/qutip/solver/sode/ssystem.pxd b/qutip/solver/sode/ssystem.pxd index a09212c1db..6b78ebe9f8 100644 --- a/qutip/solver/sode/ssystem.pxd +++ b/qutip/solver/sode/ssystem.pxd @@ -13,10 +13,13 @@ cdef class _StochasticSystem: cpdef list diffusion(self, t, Data state) + cpdef list expect(self, t, Data state) + cpdef void set_state(self, double t, Dense state) except * cpdef Data a(self) cpdef Data bi(self, int i) + cpdef complex expect_i(self, int i) cpdef Data Libj(self, int i, int j) cpdef Data Lia(self, int i) cpdef Data L0bi(self, int i) diff --git a/qutip/solver/sode/ssystem.pyx b/qutip/solver/sode/ssystem.pyx index be15d4634a..88acefaea4 100644 --- a/qutip/solver/sode/ssystem.pyx +++ b/qutip/solver/sode/ssystem.pyx @@ -51,6 +51,12 @@ cdef class _StochasticSystem: """ raise NotImplementedError + cpdef list expect(self, t, Data state): + """ + Compute the expectation terms for the ``state`` at time ``t``. + """ + raise NotImplementedError + cpdef void set_state(self, double t, Dense state) except *: """ Initialize the set of derrivatives. @@ -69,6 +75,12 @@ cdef class _StochasticSystem: """ raise NotImplementedError + cpdef complex expect_i(self, int i): + """ + Expectation value of the ``i``th operator. + """ + raise NotImplementedError + cpdef Data Libj(self, int i, int j): """ bi_n * d bj / dx_n @@ -144,7 +156,7 @@ cdef class StochasticClosedSystem(_StochasticSystem): cpdef list diffusion(self, t, Data state): cdef int i cdef QobjEvo c_op - out = [] + cdef list out = [] for i in range(self.num_collapse): c_op = self.c_ops[i] _out = c_op.matmul_data(t, state) @@ -153,6 +165,15 @@ cdef class StochasticClosedSystem(_StochasticSystem): out.append(_data.add(_out, state, -0.5 * expect)) return out + cpdef list expect(self, t, Data state): + cdef int i + cdef QobjEvo c_op + cdef list expect = [] + for i in range(self.num_collapse): + c_op = self.cpcd_ops[i] + expect.append(c_op.expect_data(t, state)) + return expect + def __reduce__(self): return ( StochasticClosedSystem.restore, @@ -210,7 +231,7 @@ cdef class StochasticOpenSystem(_StochasticSystem): cdef int i cdef QobjEvo c_op cdef complex expect - cdef out = [] + cdef list out = [] for i in range(self.num_collapse): c_op = self.c_ops[i] vec = c_op.matmul_data(t, state) @@ -218,6 +239,16 @@ cdef class StochasticOpenSystem(_StochasticSystem): out.append(_data.add(vec, state, -expect)) return out + cpdef list expect(self, t, Data state): + cdef int i + cdef QobjEvo c_op + cdef list expect = [] + for i in range(self.num_collapse): + c_op = self.c_ops[i] + vec = c_op.matmul_data(t, state) + expect.append(_data.trace_oper_ket(vec)) + return expect + cpdef void set_state(self, double t, Dense state) except *: cdef n, l self.t = t @@ -277,6 +308,16 @@ cdef class StochasticOpenSystem(_StochasticSystem): self._compute_b() return _dense_wrap(self._b[i, :]) + cpdef complex expect_i(self, int i): + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + if not self._b_set: + self._compute_b() + return self.expect_Cv[i] + @cython.boundscheck(False) @cython.wraparound(False) cdef void _compute_b(self) except *: @@ -510,6 +551,13 @@ cdef class SimpleStochasticSystem(_StochasticSystem): out.append(self.c_ops[i].matmul_data(t, state)) return out + cpdef list expect(self, t, Data state): + cdef int i + cdef list expect = [] + for i in range(self.num_collapse): + expect.append(0j) + return expect + cpdef void set_state(self, double t, Dense state) except *: self.t = t self.state = state @@ -520,6 +568,9 @@ cdef class SimpleStochasticSystem(_StochasticSystem): cpdef Data bi(self, int i): return self.c_ops[i].matmul_data(self.t, self.state) + cpdef complex expect_i(self, int i): + return 0j + cpdef Data Libj(self, int i, int j): bj = self.c_ops[i].matmul_data(self.t, self.state) return self.c_ops[j].matmul_data(self.t, bj) diff --git a/qutip/solver/solver_base.py b/qutip/solver/solver_base.py index 60202b8b8e..b171c7ec4b 100644 --- a/qutip/solver/solver_base.py +++ b/qutip/solver/solver_base.py @@ -1,14 +1,23 @@ +# Required for Sphinx to follow autodoc_type_aliases +from __future__ import annotations + __all__ = ['Solver'] +from numpy.typing import ArrayLike +from numbers import Number +from typing import Any, Callable from .. import Qobj, QobjEvo, ket2dm from .options import _SolverOptions from ..core import stack_columns, unstack_columns +from .. import settings from .result import Result from .integrator import Integrator from ..ui.progressbar import progress_bars from ._feedback import _ExpectFeedback +from ..typing import EopsLike from time import time import warnings +import numpy as np class Solver: @@ -38,7 +47,7 @@ class Solver: "progress_kwargs": {"chunk_size": 10}, "store_final_state": False, "store_states": None, - "normalize_output": "ket", + "normalize_output": True, "method": "adams", } _resultclass = Result @@ -85,8 +94,28 @@ def _prepare_state(self, state): self._state_metadata = { 'dims': state._dims, - 'isherm': state.isherm and not (self.rhs.dims == state.dims) + # This is herm flag take for granted that the liouvillian keep + # hermiticity. But we do not check user passed super operator for + # anything other than dimensions. + 'isherm': not (self.rhs.dims == state.dims) and state._isherm, } + if state.isket: + norm = state.norm() + elif state._dims.issquare: + # Qobj.isoper does not differientiate between rectangular operators + # and normal ones. + norm = state.tr() + else: + norm = -1 + self._normalize_output = ( + self._options.get("normalize_output", False) + # Don't normalize output if input is not normalized. + # Use the settings atol instead of the solver one since the second + # refer to the ODE tolerance and some integrator do not use it. + and np.abs(norm - 1) <= settings.core["atol"] + # Only ket and dm can be normalized + and (self.rhs.dims[1] == state.dims or state.shape[1] == 1) + ) if self.rhs.dims[1] == state.dims: return stack_columns(state.data) return state.data @@ -101,12 +130,22 @@ def _restore_state(self, data, *, copy=True): else: state = Qobj(data, **self._state_metadata, copy=copy) - if data.shape[1] == 1 and self._options['normalize_output']: - state = state * (1 / state.norm()) + if self._normalize_output: + if state.isoper: + state = state * (1 / state.tr()) + else: + state = state * (1 / state.norm()) return state - def run(self, state0, tlist, *, args=None, e_ops=None): + def run( + self, + state0: Qobj, + tlist: ArrayLike, + *, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + args: dict[str, Any] = None, + ) -> Result: """ Do the evolution of the Quantum system. @@ -126,12 +165,12 @@ def run(self, state0, tlist, *, args=None, e_ops=None): evolution. Each times of the list must be increasing, but does not need to be uniformy distributed. - args : dict, optional {None} + args : dict, optional Change the ``args`` of the rhs for the evolution. - e_ops : list {None} - List of Qobj, QobjEvo or callable to compute the expectation - values. Function[s] must have the signature + e_ops : Qobj, QobjEvo, callable, list, or dict optional + Single, list or dict of Qobj, QobjEvo or callable to compute the + expectation values. Function[s] must have the signature f(t : float, state : Qobj) -> expect. Returns @@ -165,7 +204,7 @@ def run(self, state0, tlist, *, args=None, e_ops=None): # stats.update(_integrator.stats) return results - def start(self, state0, t0): + def start(self, state0: Qobj, t0: Number) -> None: """ Set the initial state and time for a step evolution. @@ -181,7 +220,13 @@ def start(self, state0, t0): self._integrator.set_state(t0, self._prepare_state(state0)) self.stats["preparation time"] += time() - _time_start - def step(self, t, *, args=None, copy=True): + def step( + self, + t: Number, + *, + args: dict[str, Any] = None, + copy: bool = True + ) -> Qobj: """ Evolve the state to ``t`` and return the state as a :obj:`.Qobj`. @@ -238,7 +283,7 @@ def sys_dims(self): return self.rhs.dims[0] @property - def options(self): + def options(self) -> dict[str, Any]: """ method: str Which ordinary differential equation integration method to use. @@ -278,7 +323,7 @@ def _parse_options(self, new_options, default, old_options): return included_options, extra_options @options.setter - def options(self, new_options): + def options(self, new_options: dict[str, Any]): if not hasattr(self, "_options"): self._options = {} if new_options is None: @@ -399,7 +444,7 @@ def add_integrator(cls, integrator, key): cls._avail_integrators[key] = integrator @classmethod - def ExpectFeedback(cls, operator, default=0.): + def ExpectFeedback(cls, operator: Qobj | QobjEvo, default: Any = 0.): """ Expectation value of the instantaneous state of the evolution to be used by a time-dependent operator. diff --git a/qutip/solver/steadystate.py b/qutip/solver/steadystate.py index 5993822bd7..9e2d0e6ecb 100644 --- a/qutip/solver/steadystate.py +++ b/qutip/solver/steadystate.py @@ -12,14 +12,16 @@ def _permute_wbm(L, b): - perm = scipy.sparse.csgraph.maximum_bipartite_matching(L.as_scipy()) + perm = np.argsort( + scipy.sparse.csgraph.maximum_bipartite_matching(L.as_scipy()) + ) L = _data.permute.indices(L, perm, None) b = _data.permute.indices(b, perm, None) return L, b def _permute_rcm(L, b): - perm = scipy.sparse.csgraph.reverse_cuthill_mckee(L.as_scipy()) + perm = np.argsort(scipy.sparse.csgraph.reverse_cuthill_mckee(L.as_scipy())) L = _data.permute.indices(L, perm, perm) b = _data.permute.indices(b, perm, None) return L, b, perm @@ -233,7 +235,6 @@ def _steadystate_direct(A, weight, **kw): else: warn("Only sparse solver use preconditioners.", RuntimeWarning) - method = kw.pop("method", None) steadystate = _data.solve(L, b, method, options=kw) @@ -243,7 +244,7 @@ def _steadystate_direct(A, weight, **kw): rho_ss = _data.column_unstack(steadystate, n) rho_ss = _data.add(rho_ss, rho_ss.adjoint()) * 0.5 - return Qobj(rho_ss, dims=A.dims[0], isherm=True) + return Qobj(rho_ss, dims=A._dims[0].oper, isherm=True) def _steadystate_eigen(L, **kw): @@ -258,9 +259,12 @@ def _steadystate_eigen(L, **kw): def _steadystate_svd(L, **kw): + N = L.shape[0] + n = int(N**0.5) u, s, vh = _data.svd(L.data, True) - vec = Qobj(_data.split_columns(vh.adjoint())[-1], dims=[L.dims[0],[1]]) - rho = vector_to_operator(vec) + vec = _data.split_columns(vh.adjoint())[-1] + rho = _data.column_unstack(vec, n) + rho = Qobj(rho, dims=L._dims[0].oper, isherm=True) return rho / rho.tr() @@ -305,7 +309,7 @@ def _steadystate_power(A, **kw): if use_rcm: y = _reverse_rcm(y, perm) - rho_ss = Qobj(_data.column_unstack(y, N**0.5), dims=A.dims[0]) + rho_ss = Qobj(_data.column_unstack(y, N**0.5), dims=A._dims[0].oper) rho_ss = rho_ss + rho_ss.dag() rho_ss = rho_ss / rho_ss.tr() rho_ss.isherm = True @@ -354,9 +358,10 @@ def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False, - "mkl_spsolve" sparse solver by mkl. - Extensions to qutip, such as qutip-tensorflow, may provide their own solvers. - When ``H_0`` and ``c_ops`` use these data backends, see their documentation - for the names and details of additional solvers they may provide. + Extensions to qutip, such as qutip-tensorflow, may provide their own + solvers. When ``H_0`` and ``c_ops`` use these data backends, see their + documentation for the names and details of additional solvers they may + provide. **kwargs: Extra options to pass to the linear system solver. See the @@ -371,18 +376,20 @@ def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False, Notes ----- See: Sze Meng Tan, - https://copilot.caltech.edu/documents/16743/qousersguide.pdf, - Section (10.16) + https://painterlab.caltech.edu/wp-content/uploads/2019/06/qe_quantum_optics_toolbox.pdf, + Section (16) """ L_0 = liouvillian(H_0, c_ops) - L_m = L_p = 0.5 * liouvillian(Op_t) + L_m = 0.5 * liouvillian(Op_t) + L_p = 0.5 * liouvillian(Op_t) # L_p and L_m correspond to the positive and negative # frequency terms respectively. # They are independent in the model, so we keep both names. Id = qeye_like(L_0) - S = T = qzero_like(L_0) + S = qzero_like(L_0) + T = qzero_like(L_0) if isinstance(H_0.data, _data.CSR) and not sparse: L_0 = L_0.to("Dense") @@ -393,7 +400,7 @@ def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False, for n_i in np.arange(n_it, 0, -1): L = L_0 - 1j * n_i * w_d * Id + L_m @ S S.data = - _data.solve(L.data, L_p.data, solver, kwargs) - L = L_0 - 1j * n_i * w_d * Id + L_p @ T + L = L_0 + 1j * n_i * w_d * Id + L_p @ T T.data = - _data.solve(L.data, L_m.data, solver, kwargs) M_subs = L_0 + L_m @ S + L_p @ T diff --git a/qutip/solver/stochastic.py b/qutip/solver/stochastic.py index f39837c956..de4ddb8fde 100644 --- a/qutip/solver/stochastic.py +++ b/qutip/solver/stochastic.py @@ -1,39 +1,47 @@ __all__ = ["smesolve", "SMESolver", "ssesolve", "SSESolver"] +import numpy as np +from numpy.typing import ArrayLike +from numpy.random import SeedSequence +from typing import Any, Callable, Literal, overload +from functools import partial +from time import time +from collections.abc import Sequence +from .multitrajresult import MultiTrajResult from .sode.ssystem import StochasticOpenSystem, StochasticClosedSystem -from .result import MultiTrajResult, Result, ExpectOp -from .multitraj import MultiTrajSolver +from .sode._noise import PreSetWiener +from .result import Result, ExpectOp +from .multitraj import _MultiTrajRHS, MultiTrajSolver from .. import Qobj, QobjEvo from ..core.dimensions import Dimensions -import numpy as np -from functools import partial +from ..core import data as _data from .solver_base import _solver_deprecation -from ._feedback import _QobjFeedback, _DataFeedback, _WeinerFeedback +from ._feedback import _QobjFeedback, _DataFeedback, _WienerFeedback +from ..typing import QobjEvoLike, EopsLike class StochasticTrajResult(Result): def _post_init(self, m_ops=(), dw_factor=(), heterodyne=False): super()._post_init() - self.W = [] - self.m_ops = [] - self.m_expect = [] - self.dW_factor = dw_factor + self.noise = [] self.heterodyne = heterodyne - for op in m_ops: - f = self._e_op_func(op) - self.W.append([0.0]) - self.m_expect.append([]) - self.m_ops.append(ExpectOp(op, f, self.m_expect[-1].append)) - self.add_processor(self.m_ops[-1]._store) - - def add(self, t, state, noise): + if self.options["store_measurement"]: + self.m_ops = [] + self.m_expect = [] + self.dW_factor = dw_factor + for op in m_ops: + f = self._e_op_func(op) + self.m_expect.append([]) + self.m_ops.append(ExpectOp(op, f, self.m_expect[-1].append)) + self.add_processor(self.m_ops[-1]._store) + + def add(self, t, state, noise=None): super().add(t, state) - if noise is not None and self.options["store_measurement"]: - for i, dW in enumerate(noise): - self.W[i].append(self.W[i][-1] + dW) + if noise is not None: + self.noise.append(noise) @property - def wiener_process(self): + def wiener_process(self) -> np.typing.NDArray[float]: """ Wiener processes for each stochastic collapse operators. @@ -43,13 +51,17 @@ def wiener_process(self): (len(sc_ops), 2, len(tlist)) for heterodyne detection. """ - W = np.array(self.W) + W = np.zeros( + (self.noise[0].shape[0], len(self.times)), + dtype=np.float64 + ) + np.cumsum(np.array(self.noise).T, axis=1, out=W[:, 1:]) if self.heterodyne: W = W.reshape(-1, 2, W.shape[1]) return W @property - def dW(self): + def dW(self) -> np.typing.NDArray[float]: """ Wiener increment for each stochastic collapse operators. @@ -59,13 +71,13 @@ def dW(self): (len(sc_ops), 2, len(tlist)-1) for heterodyne detection. """ - dw = np.diff(self.W, axis=1) + noise = np.array(self.noise).T if self.heterodyne: - dw = dw.reshape(-1, 2, dw.shape[1]) - return dw + return noise.reshape(-1, 2, noise.shape[1]) + return noise @property - def measurement(self): + def measurement(self) -> np.typing.NDArray[float]: """ Measurements for each stochastic collapse operators. @@ -75,20 +87,41 @@ def measurement(self): (len(sc_ops), 2, len(tlist)-1) for heterodyne detection. """ - dts = np.diff(self.times) - m_expect = np.array(self.m_expect)[:, 1:] - noise = np.einsum( - "i,ij,j->ij", self.dW_factor, np.diff(self.W, axis=1), (1 / dts) + if not self.options["store_measurement"]: + return None + elif len(self.m_ops) == 0: + if self.heterodyne: + return np.empty(shape=(0, 2, len(self.times) - 1)) + else: + return np.empty(shape=(0, len(self.times) - 1)) + elif self.options["store_measurement"] == "start": + m_expect = np.array(self.m_expect)[:, :-1] + elif self.options["store_measurement"] == "middle": + m_expect = np.apply_along_axis( + lambda m: np.convolve(m, [0.5, 0.5], "valid"), + axis=1, arr=self.m_expect, + ) + elif self.options["store_measurement"] in ["end", True]: + m_expect = np.array(self.m_expect)[:, 1:] + else: + raise ValueError( + "store_measurement must be in {'start', 'middle', 'end', ''}, " + f"not {self.options['store_measurement']}" + ) + noise = np.array(self.noise).T + noise_scaled = np.einsum( + "i,ij,j->ij", self.dW_factor, noise, (1 / np.diff(self.times)) ) if self.heterodyne: m_expect = m_expect.reshape(-1, 2, m_expect.shape[1]) - noise = noise.reshape(-1, 2, noise.shape[1]) - return m_expect + noise + noise_scaled = noise_scaled.reshape(-1, 2, noise_scaled.shape[1]) + return m_expect + noise_scaled class StochasticResult(MultiTrajResult): - def _post_init(self): + def _post_init(self, heterodyne=False): super()._post_init() + self.heterodyne = heterodyne store_measurement = self.options["store_measurement"] keep_runs = self.options["keep_runs_results"] @@ -125,7 +158,7 @@ def _trajectories_attr(self, attr): return None @property - def measurement(self): + def measurement(self) -> np.typing.NDArray[float]: """ Measurements for each trajectories and stochastic collapse operators. @@ -138,7 +171,7 @@ def measurement(self): return self._trajectories_attr("measurement") @property - def dW(self): + def dW(self) -> np.typing.NDArray[float]: """ Wiener increment for each trajectories and stochastic collapse operators. @@ -152,7 +185,7 @@ def dW(self): return self._trajectories_attr("dW") @property - def wiener_process(self): + def wiener_process(self) -> np.typing.NDArray[float]: """ Wiener processes for each trajectories and stochastic collapse operators. @@ -165,8 +198,36 @@ def wiener_process(self): """ return self._trajectories_attr("wiener_process") + def merge(self, other: "StochasticResult", p: float = None) -> "StochasticResult": + if not isinstance(other, StochasticResult): + return NotImplemented + if self.stats["solver"] != other.stats["solver"]: + raise ValueError("Can't merge smesolve and ssesolve results") + if self.heterodyne != other.heterodyne: + raise ValueError("Can't merge heterodyne and homodyne results") + if p is not None: + raise ValueError( + "Stochastic solvers does not support custom weights" + ) + new = super().merge(other, p) -class _StochasticRHS: + if ( + self.options["store_measurement"] + and other.options["store_measurement"] + and not new.trajectories + ): + new._measurement = np.concatenate( + (self.measurement, other.measurement), axis=0 + ) + new._wiener_process = np.concatenate( + (self.wiener_process, other.wiener_process), axis=0 + ) + new._dW = np.concatenate((self.dW, other.dW), axis=0) + + return new + + +class _StochasticRHS(_MultiTrajRHS): """ In between object to store the stochastic system. @@ -181,7 +242,7 @@ class _StochasticRHS: def __init__(self, issuper, H, sc_ops, c_ops, heterodyne): if not isinstance(H, (Qobj, QobjEvo)) or not H.isoper: - raise TypeError("The Hamiltonian must be am operator") + raise TypeError("The Hamiltonian must be an operator") self.H = QobjEvo(H) if isinstance(sc_ops, (Qobj, QobjEvo)): @@ -232,22 +293,34 @@ def arguments(self, args): sc_op.arguments(args) def _register_feedback(self, val): - self.H._register_feedback({"wiener_process": val}, "stochatic solver") + self.H._register_feedback({"wiener_process": val}, "stochastic solver") for c_op in self.c_ops: c_op._register_feedback( - {"WeinerFeedback": val}, "stochatic solver" + {"WienerFeedback": val}, "stochastic solver" ) for sc_op in self.sc_ops: sc_op._register_feedback( - {"WeinerFeedback": val}, "stochatic solver" + {"WienerFeedback": val}, "stochastic solver" ) def smesolve( - H, rho0, tlist, c_ops=(), sc_ops=(), heterodyne=False, *, - e_ops=(), args={}, ntraj=500, options=None, - seeds=None, target_tol=None, timeout=None, **kwargs -): + H: QobjEvoLike, + rho0: Qobj, + tlist: ArrayLike, + c_ops: Qobj | QobjEvo | Sequence[QobjEvoLike] = (), + sc_ops: Qobj | QobjEvo | Sequence[QobjEvoLike] = (), + heterodyne: bool = False, + *, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + args: dict[str, Any] = None, + ntraj: int = 500, + options: dict[str, Any] = None, + seeds: int | SeedSequence | Sequence[int | SeedSequence] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + **kwargs +) -> StochasticResult: """ Solve stochastic master equation. @@ -271,11 +344,10 @@ def smesolve( sc_ops : list of (:obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format) List of stochastic collapse operators. - e_ops : : :class:`.qobj`, callable, or list, optional - Single operator or list of operators for which to evaluate - expectation values or callable or list of callable. - Callable signature must be, `f(t: float, state: Qobj)`. - See :func:`.expect` for more detail of operator expectation. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. args : dict, optional Dictionary of parameters for time-dependent Hamiltonians and @@ -319,14 +391,16 @@ def smesolve( | Whether or not to store the state vectors or density matrices. On `None` the states will be saved if no expectation operators are given. - - | store_measurement: bool - | Whether to store the measurement and wiener process for each - trajectories. + - | store_measurement: str, {'start', 'middle', 'end', ''} + | Whether and how to store the measurement for each trajectories. + 'start', 'middle', 'end' indicate when in the interval the + expectation value of the ``m_ops`` is taken. - | keep_runs_results : bool | Whether to store results from all trajectories or just store the averages. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -361,6 +435,10 @@ def smesolve( """ options = _solver_deprecation(kwargs, options, "stoc") H = QobjEvo(H, args=args, tlist=tlist) + if not isinstance(sc_ops, Sequence): + sc_ops = [sc_ops] + if not isinstance(c_ops, Sequence): + c_ops = [c_ops] c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops] sc_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in sc_ops] sol = SMESolver( @@ -373,10 +451,21 @@ def smesolve( def ssesolve( - H, psi0, tlist, sc_ops=(), heterodyne=False, *, - e_ops=(), args={}, ntraj=500, options=None, - seeds=None, target_tol=None, timeout=None, **kwargs -): + H: QobjEvoLike, + psi0: Qobj, + tlist: ArrayLike, + sc_ops: QobjEvoLike | Sequence[QobjEvoLike] = (), + heterodyne: bool = False, + *, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + args: dict[str, Any] = None, + ntraj: int = 500, + options: dict[str, Any] = None, + seeds: int | SeedSequence | Sequence[int | SeedSequence] = None, + target_tol: float | tuple[float, float] | list[tuple[float, float]] = None, + timeout: float = None, + **kwargs +) -> StochasticResult: """ Solve stochastic Schrodinger equation. @@ -396,11 +485,10 @@ def ssesolve( sc_ops : list of (:obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format) List of stochastic collapse operators. - e_ops : :class:`.qobj`, callable, or list, optional - Single operator or list of operators for which to evaluate - expectation values or callable or list of callable. - Callable signature must be, `f(t: float, state: Qobj)`. - See :func:`expect` for more detail of operator expectation. + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to evaluate + expectation values. Operator can be Qobj, QobjEvo or callables with the + signature `f(t: float, state: Qobj) -> Any`. args : dict, optional Dictionary of parameters for time-dependent Hamiltonians and @@ -442,14 +530,16 @@ def ssesolve( | Whether or not to store the state vectors or density matrices. On `None` the states will be saved if no expectation operators are given. - - | store_measurement: bool - Whether to store the measurement and wiener process, or brownian - noise for each trajectories. + - | store_measurement: str, {'start', 'middle', 'end', ''} + | Whether and how to store the measurement for each trajectories. + 'start', 'middle', 'end' indicate when in the interval the + expectation value of the ``m_ops`` is taken. - | keep_runs_results : bool | Whether to store results from all trajectories or just store the averages. - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. + | Normalize output state to hide ODE numerical errors. Only normalize + the state if the initial state is already normalized. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -484,6 +574,8 @@ def ssesolve( """ options = _solver_deprecation(kwargs, options, "stoc") H = QobjEvo(H, args=args, tlist=tlist) + if not isinstance(sc_ops, Sequence): + sc_ops = [sc_ops] sc_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in sc_ops] sol = SSESolver(H, sc_ops, options=options, heterodyne=heterodyne) return sol.run( @@ -498,10 +590,9 @@ class StochasticSolver(MultiTrajSolver): """ name = "StochasticSolver" - _resultclass = StochasticResult _avail_integrators = {} - system = None _open = None + solver_options = { "progress_bar": "text", "progress_kwargs": {"chunk_size": 10}, @@ -514,23 +605,50 @@ class StochasticSolver(MultiTrajSolver): "num_cpus": None, "bitgenerator": None, "method": "platen", - "store_measurement": False, + "store_measurement": "", } - def __init__(self, H, sc_ops, heterodyne, *, c_ops=(), options=None): - self.options = options + def _resultclass(self, e_ops, options, solver, stats): + return StochasticResult( + e_ops, + options, + solver=solver, + stats=stats, + heterodyne=self.heterodyne, + ) + + def _trajectory_resultclass(self, e_ops, options): + return StochasticTrajResult( + e_ops, + options, + m_ops=self.m_ops, + dw_factor=self.dW_factors, + heterodyne=self.heterodyne, + ) + + def _initialize_stats(self): + stats = super()._initialize_stats() + if self._open: + stats["solver"] = "Stochastic Master Equation Evolution" + else: + stats["solver"] = "Stochastic Schrodinger Equation Evolution" + return stats + + def __init__( + self, + H: Qobj | QobjEvo, + sc_ops: Sequence[Qobj | QobjEvo], + heterodyne: bool, + *, + c_ops: Sequence[Qobj | QobjEvo] = (), + options: dict[str, Any] = None, + ): self._heterodyne = heterodyne if self.name == "ssesolve" and c_ops: raise ValueError("c_ops are not supported by ssesolve.") rhs = _StochasticRHS(self._open, H, sc_ops, c_ops, heterodyne) - self.rhs = rhs - self.system = rhs - self.options = options - self.seed_sequence = np.random.SeedSequence() - self._integrator = self._get_integrator() - self._state_metadata = {} - self.stats = self._initialize_stats() + super().__init__(rhs, options=options) if heterodyne: self._m_ops = [] @@ -542,15 +660,15 @@ def __init__(self, H, sc_ops, heterodyne, *, c_ops=(), options=None): self._dW_factors = np.ones(len(sc_ops)) @property - def heterodyne(self): + def heterodyne(self) -> bool: return self._heterodyne @property - def m_ops(self): + def m_ops(self) -> list[QobjEvo | Qobj]: return self._m_ops @m_ops.setter - def m_ops(self, new_m_ops): + def m_ops(self, new_m_ops: list[QobjEvo | Qobj]): """ Measurements operators. @@ -597,11 +715,11 @@ def m_ops(self, new_m_ops): self._m_ops = new_m_ops @property - def dW_factors(self): + def dW_factors(self) -> np.typing.NDArray[float]: return self._dW_factors @dW_factors.setter - def dW_factors(self, new_dW_factors): + def dW_factors(self, new_dW_factors: np.typing.NDArray[float]): """ Scaling of the noise on the measurements. Default are ``1`` for homodyne and ``sqrt(1/2)`` for heterodyne. @@ -619,26 +737,158 @@ def dW_factors(self, new_dW_factors): ) self._dW_factors = new_dW_factors - def _run_one_traj(self, seed, state, tlist, e_ops): + def _integrate_one_traj(self, seed, tlist, result): + for t, state, noise in self._integrator.run(tlist): + result.add(t, self._restore_state(state, copy=False), noise) + return seed, result + + def run_from_experiment( + self, + state: Qobj, + tlist: ArrayLike, + noise: Sequence[float], + *, + args: dict[str, Any] = None, + e_ops: EopsLike | list[EopsLike] | dict[Any, EopsLike] = None, + measurement: bool = False, + ): """ - Run one trajectory and return the result. + Run a single trajectory from a given state and noise. + + Parameters + ---------- + state : Qobj + Initial state of the system. + + tlist : array_like + List of times for which to evaluate the state. The tlist must + increase uniformly. + + noise : array_like + Noise for each time step and each stochastic collapse operators. + For homodyne detection, ``noise[i, t_idx]`` is the Wiener + increments between ``tlist[t_idx]`` and ``tlist[t_idx+1]`` for the + i-th sc_ops. + For heterodyne detection, an extra dimension is added for the pair + of measurement: ``noise[i, j, t_idx]``with ``j`` in ``{0,1}``. + + args : dict, optional + Arguments to pass to the Hamiltonian and collapse operators. + + e_ops : :obj:`.Qobj`, callable, list or dict, optional + Single operator, or list or dict of operators, for which to + evaluate expectation values. Operator can be Qobj, QobjEvo or + callables with the signature `f(t: float, state: Qobj) -> Any`. + + measurement : bool, default : False + Whether the passed noise is the Wiener increments ``dW`` (gaussian + noise with standard derivation of dt**0.5), or the measurement. + + Homodyne measurement is:: + + noise[i][t] = dW/dt + expect(sc_ops[i] + sc_ops[i].dag, state[t]) + + Heterodyne measurement is:: + + noise[i][0][t] = dW/dt * 2**0.5 + + expect(sc_ops[i] + sc_ops[i].dag, state[t]) + + noise[i][1][t] = dW/dt * 2**0.5 + -1j * expect(sc_ops[i] - sc_ops[i].dag, state[t]) + + Note that this function expects the expectation values to be taken + at the start of the time step, corresponding to the "start" setting + for the "store_measurements" option. + + Only available for limited integration methods. + + Returns + ------- + result : StochasticTrajResult + Result of the trajectory. + + Notes + ----- + Only default values of `m_ops` and `dW_factors` are supported. """ - result = StochasticTrajResult( - e_ops, - self.options, - m_ops=self.m_ops, - dw_factor=self.dW_factors, - heterodyne=self.heterodyne, + start_time = time() + self._argument(args) + stats = self._initialize_stats() + dt = tlist[1] - tlist[0] + if not np.allclose(dt, np.diff(tlist)): + raise ValueError("tlist must be evenly spaced.") + generator = PreSetWiener( + noise, tlist, len(self.rhs.sc_ops), self.heterodyne, measurement ) - generator = self._get_generator(seed) - self._integrator.set_state(tlist[0], state, generator) - state_t = self._restore_state(state, copy=False) - result.add(tlist[0], state_t, None) - for t in tlist[1:]: - t, state, noise = self._integrator.integrate(t, copy=False) - state_t = self._restore_state(state, copy=False) - result.add(t, state_t, noise) - return seed, result + state0 = self._prepare_state(state) + try: + old_dt = None + if "dt" in self._integrator.options: + old_dt = self._integrator.options["dt"] + self._integrator.options["dt"] = dt + mid_time = time() + result = self._initialize_run_one_traj( + None, state0, tlist, e_ops, generator=generator + ) + _, result = self._integrate_one_traj(None, tlist, result) + except Exception as err: + if old_dt is not None: + self._integrator.options["dt"] = old_dt + raise + + stats['preparation time'] += mid_time - start_time + stats['run time'] = time() - mid_time + result.stats.update(stats) + return result + + @overload + def step( + self, t: float, + *, + args: dict[str, Any], + copy: bool, + wiener_increment: Literal[False], + ) -> Qobj: ... + + @overload + def step( + self, t: float, + *, + args: dict[str, Any], + copy: bool, + wiener_increment: Literal[True], + ) -> tuple[Qobj, np.typing.NDArray[float]]: ... + + def step(self, t, *, args=None, copy=True, wiener_increment=False): + """ + Evolve the state to ``t`` and return the state as a :obj:`.Qobj`. + + Parameters + ---------- + t : double + Time to evolve to, must be higher than the last call. + + args : dict, optional + Update the ``args`` of the system. + The change is effective from the beginning of the interval. + Changing ``args`` can slow the evolution. + + copy : bool, default: True + Whether to return a copy of the data or the data in the ODE solver. + + wiener_increment: bool, default: False + Whether to return ``dW`` in addition to the state. + """ + if not self._integrator._is_set: + raise RuntimeError("The `start` method must called first.") + self._argument(args) + _, state, dW = self._integrator.integrate(t, copy=False) + state = self._restore_state(state, copy=copy) + if wiener_increment: + if self.heterodyne: + dW = dW.reshape(-1, 2) + return state, dW + return state @classmethod def avail_integrators(cls): @@ -650,7 +900,7 @@ def avail_integrators(cls): } @property - def options(self): + def options(self) -> dict[str, Any]: """ Options for stochastic solver: @@ -663,8 +913,10 @@ def options(self): On `None` the states will be saved if no expectation operators are given. - store_measurement: bool, default: False - Whether to store the measurement for each trajectories. + store_measurement: str, {'start', 'middle', 'end', ''}, default: "" + Whether and how to store the measurement for each trajectories. + 'start', 'middle', 'end' indicate when in the interval the + expectation value of the ``m_ops`` is taken. Storing measurements will also store the wiener process, or brownian noise for each trajectories. @@ -709,17 +961,20 @@ def options(self): return self._options @options.setter - def options(self, new_options): + def options(self, new_options: dict[str, Any]): MultiTrajSolver.options.fset(self, new_options) @classmethod - def WeinerFeedback(cls, default=None): + def WienerFeedback( + cls, + default: Callable[[float], np.typing.NDArray[float]] = None, + ): """ - Weiner function of the trajectory argument for time dependent systems. + Wiener function of the trajectory argument for time dependent systems. When used as an args: - ``QobjEvo([op, func], args={"W": SMESolver.WeinerFeedback()})`` + ``QobjEvo([op, func], args={"W": SMESolver.WienerFeedback()})`` The ``func`` will receive a function as ``W`` that return an array of wiener processes values at ``t``. The wiener process for the i-th @@ -729,7 +984,7 @@ def WeinerFeedback(cls, default=None): .. note:: - WeinerFeedback can't be added to a running solver when updating + WienerFeedback can't be added to a running solver when updating arguments between steps: ``solver.step(..., args={})``. Parameters @@ -739,10 +994,14 @@ def WeinerFeedback(cls, default=None): When not passed, a function returning ``np.array([0])`` is used. """ - return _WeinerFeedback(default) + return _WienerFeedback(default) @classmethod - def StateFeedback(cls, default=None, raw_data=False): + def StateFeedback( + cls, + default: Qobj | _data.Data = None, + raw_data: bool = False + ): """ State of the evolution to be used in a time-dependent operator. @@ -809,7 +1068,7 @@ class SMESolver(StochasticSolver): "num_cpus": None, "bitgenerator": None, "method": "platen", - "store_measurement": False, + "store_measurement": "", } @@ -853,5 +1112,5 @@ class SSESolver(StochasticSolver): "num_cpus": None, "bitgenerator": None, "method": "platen", - "store_measurement": False, + "store_measurement": "", } diff --git a/qutip/tests/core/data/conftest.py b/qutip/tests/core/data/conftest.py index b4348eaa46..6b8b568e73 100644 --- a/qutip/tests/core/data/conftest.py +++ b/qutip/tests/core/data/conftest.py @@ -67,7 +67,7 @@ def random_scipy_csr(shape, density, sorted_): cols = np.random.choice(np.arange(shape[1]), nnz) sci = scipy.sparse.coo_matrix((data, (rows, cols)), shape=shape).tocsr() if not sorted_: - shuffle_indices_scipy_csr(sci) + sci = shuffle_indices_scipy_csr(sci) return sci diff --git a/qutip/tests/core/data/test_convert.py b/qutip/tests/core/data/test_convert.py index 917003dced..0cbb7aaf9c 100644 --- a/qutip/tests/core/data/test_convert.py +++ b/qutip/tests/core/data/test_convert.py @@ -68,7 +68,7 @@ def test_converters(from_, base, to_, dtype): dtype_types = list(data.to._str2type.values()) + list(data.to.dtypes) @pytest.mark.parametrize(['input', 'type_'], zip(dtype_names, dtype_types), ids=[str(dtype) for dtype in dtype_names]) -def test_parse_error(input, type_): +def test_parse(input, type_): assert data.to.parse(input) is type_ diff --git a/qutip/tests/core/data/test_mathematics.py b/qutip/tests/core/data/test_mathematics.py index 8d30aa9763..dc53bfbfeb 100644 --- a/qutip/tests/core/data/test_mathematics.py +++ b/qutip/tests/core/data/test_mathematics.py @@ -937,6 +937,17 @@ def op_numpy(self, matrix): ] +class TestSqrtm(UnaryOpMixin): + def op_numpy(self, matrix): + return scipy.linalg.sqrtm(matrix) + + shapes = shapes_square() + bad_shapes = shapes_not_square() + specialisations = [ + pytest.param(data.sqrtm_dense, Dense, Dense), + ] + + class TestTranspose(UnaryOpMixin): def op_numpy(self, matrix): return matrix.T diff --git a/qutip/tests/core/data/test_properties.py b/qutip/tests/core/data/test_properties.py index 88697ee152..e90b23a4cf 100644 --- a/qutip/tests/core/data/test_properties.py +++ b/qutip/tests/core/data/test_properties.py @@ -3,6 +3,8 @@ from qutip import data as _data from qutip import CoreOptions +from . import conftest +from qutip.core.data.dia import clean_dia @pytest.fixture(params=[_data.CSR, _data.Dense, _data.Dia], ids=["CSR", "Dense", "Dia"]) def datatype(request): @@ -177,3 +179,67 @@ def test_isdiag(self, shape, datatype): mat[1, 0] = 1 data = _data.to(datatype, _data.Dense(mat)) assert not _data.isdiag(data) + + +class TestIsEqual: + def op_numpy(self, left, right, atol, rtol): + return np.allclose(left.to_array(), right.to_array(), rtol, atol) + + def rand_dense(shape): + return conftest.random_dense(shape, False) + + def rand_diag(shape): + return conftest.random_diag(shape, 0.5, True) + + def rand_csr(shape): + return conftest.random_csr(shape, 0.5, True) + + @pytest.mark.parametrize("factory", [rand_dense, rand_diag, rand_csr]) + @pytest.mark.parametrize("shape", [(1, 20), (20, 20), (20, 2)]) + def test_same_shape(self, factory, shape): + atol = 1e-8 + rtol = 1e-6 + A = factory(shape) + B = factory(shape) + assert _data.isequal(A, A, atol, rtol) + assert _data.isequal(B, B, atol, rtol) + assert ( + _data.isequal(A, B, atol, rtol) == self.op_numpy(A, B, atol, rtol) + ) + + @pytest.mark.parametrize("factory", [rand_dense, rand_diag, rand_csr]) + @pytest.mark.parametrize("shapeA", [(1, 10), (9, 9), (10, 2)]) + @pytest.mark.parametrize("shapeB", [(1, 9), (10, 10), (10, 1)]) + def test_different_shape(self, factory, shapeA, shapeB): + A = factory(shapeA) + B = factory(shapeB) + assert not _data.isequal(A, B, np.inf, np.inf) + + @pytest.mark.parametrize("rtol", [1e-6, 100]) + @pytest.mark.parametrize("factory", [rand_dense, rand_diag, rand_csr]) + @pytest.mark.parametrize("shape", [(1, 20), (20, 20), (20, 2)]) + def test_rtol(self, factory, shape, rtol): + mat = factory(shape) + assert _data.isequal(mat + mat * (rtol / 10), mat, 1e-14, rtol) + assert not _data.isequal(mat * (1 + rtol * 10), mat, 1e-14, rtol) + + @pytest.mark.parametrize("atol", [1e-14, 1e-6, 100]) + @pytest.mark.parametrize("factory", [rand_dense, rand_diag, rand_csr]) + @pytest.mark.parametrize("shape", [(1, 20), (20, 20), (20, 2)]) + def test_atol(self, factory, shape, atol): + A = factory(shape) + B = factory(shape) + assert _data.isequal(A, A + B * (atol / 10), atol, 0) + assert not _data.isequal(A, A + B * (atol * 10), atol, 0) + + @pytest.mark.parametrize("shape", [(1, 20), (20, 20), (20, 2)]) + def test_csr_mismatch_sort(self, shape): + A = conftest.random_csr(shape, 0.5, False) + B = A.copy().sort_indices() + assert _data.isequal(A, B) + + @pytest.mark.parametrize("shape", [(1, 20), (20, 20), (20, 2)]) + def test_dia_mismatch_sort(self, shape): + A = conftest.random_diag(shape, 0.5, False) + B = clean_dia(A) + assert _data.isequal(A, B) diff --git a/qutip/tests/core/test_coefficient.py b/qutip/tests/core/test_coefficient.py index bb5204c611..ea87c122dd 100644 --- a/qutip/tests/core/test_coefficient.py +++ b/qutip/tests/core/test_coefficient.py @@ -230,7 +230,7 @@ def test_CoeffOptions(): base = "1 + 1. + 1j" options = [] options.append(CompilationOptions(accept_int=True)) - options.append(CompilationOptions(accept_float=False)) + options.append(CompilationOptions(accept_float=True)) options.append(CompilationOptions(static_types=True)) options.append(CompilationOptions(try_parse=False)) options.append(CompilationOptions(use_cython=False)) @@ -244,10 +244,12 @@ def test_CoeffOptions(): def test_warn_no_cython(): option = CompilationOptions(use_cython=False) WARN_MISSING_MODULE[0] = 1 - with pytest.warns( - UserWarning, match="`cython` and `filelock` are required" - ): + with pytest.warns(UserWarning) as warning: coefficient("t", compile_opt=option) + assert all( + module in warning[0].message.args[0] + for module in ["cython", "filelock", "setuptools"] + ) @pytest.mark.requires_cython @pytest.mark.parametrize(['codestring', 'args', 'reference'], [ @@ -381,9 +383,13 @@ def test_CoeffArray(order): assert derrs[i] == pytest.approx(0.0, abs=0.0001) -def test_CoeffFromScipy(): +@pytest.mark.parametrize('imag', [True, False]) +def test_CoeffFromScipyPPoly(imag): tlist = np.linspace(0, 1.01, 101) - y = np.exp((-1 + 1j) * tlist) + if imag: + y = np.exp(-1j * tlist) + else: + y = np.exp(-1 * tlist) coeff = coefficient(y, tlist=tlist, order=3) from_scipy = coefficient(interp.CubicSpline(tlist, y)) @@ -398,6 +404,24 @@ def test_CoeffFromScipy(): _assert_eq_over_interval(coeff, from_scipy, rtol=1e-8, inside=True) +@pytest.mark.parametrize('imag', [True, False]) +def test_CoeffFromScipyBSpline(imag): + tlist = np.linspace(-0.1, 1.1, 121) + if imag: + y = np.exp(-1j * tlist) + else: + y = np.exp(-1 * tlist) + + spline = interp.BSpline(tlist, y, 2) + + def func(t): + return complex(spline(t)) + + coverted = coefficient(spline) + raw_scipy = coefficient(func) + _assert_eq_over_interval(coverted, raw_scipy, rtol=1e-8, inside=True) + + @pytest.mark.parametrize('map_func', [ pytest.param(qutip.solver.parallel.parallel_map, id='parallel_map'), pytest.param(qutip.solver.parallel.loky_pmap, id='loky_pmap'), diff --git a/qutip/tests/core/test_expect.py b/qutip/tests/core/test_expect.py index 906880ceca..33dd1b5178 100644 --- a/qutip/tests/core/test_expect.py +++ b/qutip/tests/core/test_expect.py @@ -92,11 +92,10 @@ def test_operator_by_basis(self, operator, state, expected): def test_broadcast_operator_list(self, operators, state, expected): result = qutip.expect(operators, state) - expected_dtype = (np.float64 if all(op.isherm for op in operators) - else np.complex128) - assert isinstance(result, np.ndarray) - assert result.dtype == expected_dtype - assert list(result) == list(expected) + assert len(result) == len(operators) + for part, operator, expected_part in zip(result, operators, expected): + assert isinstance(part, float if operator.isherm else complex) + assert part == expected_part def test_broadcast_state_list(self, operator, states, expected): result = qutip.expect(operator, states) @@ -162,18 +161,8 @@ def test_compatibility_with_solver(solve): np.testing.assert_allclose(np.array(direct_), indirect_, atol=1e-12) -def test_no_real_attribute(monkeypatch): - """This tests ensures that expect still works even if the output of a - specialisation does not have the ``real`` attribute. This is the case for - the tensorflow and cupy data layers.""" - - def mocker_expect_return(oper, state): - """ - We simply return None which does not have the `real` attribute. - """ - return "object without .real" - - monkeypatch.setattr(_data, "expect", mocker_expect_return) - +def test_no_real_casting(monkeypatch): sz = qutip.sigmaz() # the choice of the matrix does not matter - assert "object without .real" == qutip.expect(sz, sz) + assert isinstance(qutip.expect(sz, sz), float) + with qutip.CoreOptions(auto_real_casting=False): + assert isinstance(qutip.expect(sz, sz), complex) diff --git a/qutip/tests/core/test_gates.py b/qutip/tests/core/test_gates.py index 00f47ffedd..27f01cdf9d 100644 --- a/qutip/tests/core/test_gates.py +++ b/qutip/tests/core/test_gates.py @@ -111,12 +111,18 @@ class TestCliffordGroup: Test a sufficient set of conditions to prove that we have a full Clifford group for a single qubit. """ - clifford = gates.qubit_clifford_group() + with qutip.CoreOptions(default_dtype="dia"): + clifford = gates.qubit_clifford_group() + pauli = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()] def test_single_qubit_group_dimension_is_24(self): assert len(self.clifford) == 24 + def test_dtype(self): + for gate in self.clifford: + assert isinstance(gate.data, qutip.data.Dia) + def test_all_elements_different(self): clifford = [_remove_global_phase(gate) for gate in self.clifford] for i, gate in enumerate(clifford): @@ -124,7 +130,7 @@ def test_all_elements_different(self): # Big tolerance because we actually want to test the inverse. assert not np.allclose(gate.full(), other.full(), atol=1e-3) - @pytest.mark.parametrize("gate", gates.qubit_clifford_group()) + @pytest.mark.parametrize("gate", gates.qubit_clifford_group(dtype="dense")) def test_gate_normalises_pauli_group(self, gate): """ Test the fundamental definition of the Clifford group, i.e. that it @@ -133,6 +139,8 @@ def test_gate_normalises_pauli_group(self, gate): # Assert that each Clifford gate maps the set of Pauli gates back onto # itself (though not necessarily in order). This condition is no # stronger than simply considering each (gate, Pauli) pair separately. + assert gate._isherm == qutip.data.isherm(gate.data) + assert isinstance(gate.data, qutip.data.Dense) pauli_gates = [_remove_global_phase(x) for x in self.pauli] normalised = [_remove_global_phase(gate * pauli * gate.dag()) for pauli in self.pauli] @@ -142,3 +150,50 @@ def test_gate_normalises_pauli_group(self, gate): del pauli_gates[i] break assert len(pauli_gates) == 0 + + +@pytest.mark.parametrize("alias", [qutip.data.Dense, "CSR"]) +@pytest.mark.parametrize(["gate_func", "args"], [ + pytest.param(gates.cnot, (), id="cnot"), + pytest.param(gates.cy_gate, (), id="cy_gate"), + pytest.param(gates.cz_gate, (), id="cz_gate"), + pytest.param(gates.cs_gate, (), id="cs_gate"), + pytest.param(gates.ct_gate, (), id="ct_gate"), + pytest.param(gates.s_gate, (), id="s_gate"), + pytest.param(gates.t_gate, (), id="t_gate"), + pytest.param(gates.cphase, (np.pi,), id="cphase"), + pytest.param(gates.csign, (), id="csign"), + pytest.param(gates.fredkin, (), id="fredkin"), + pytest.param(gates.toffoli, (), id="toffoli"), + pytest.param(gates.rx, (np.pi,), id="rx"), + pytest.param(gates.ry, (np.pi,), id="ry 1"), + pytest.param(gates.ry, (4 * np.pi,), id="ry 0"), + pytest.param(gates.rz, (1,), id="rz"), + pytest.param(gates.sqrtnot, (), id="sqrtnot"), + pytest.param(gates.snot, (), id="snot"), + pytest.param(gates.phasegate, (0,), id="phasegate 0"), + pytest.param(gates.phasegate, (1,), id="phasegate 1"), + pytest.param(gates.qrot, (0, 0), id="qrot id"), + pytest.param(gates.qrot, (2*np.pi, np.pi), id="qrot 0 pi"), + pytest.param(gates.qrot, (np.pi, 0), id="qrot pi 0"), + pytest.param(gates.qrot, (np.pi, np.pi), id="qrot pi pi"), + pytest.param(gates.berkeley, (), id="berkeley"), + pytest.param(gates.swapalpha, (0,), id="swapalpha 0"), + pytest.param(gates.swapalpha, (1,), id="swapalpha 1"), + pytest.param(gates.swap, (), id="swap"), + pytest.param(gates.iswap, (), id="iswap"), + pytest.param(gates.sqrtswap, (), id="sqrtswap"), + pytest.param(gates.sqrtiswap, (), id="sqrtiswap"), + pytest.param(gates.molmer_sorensen, (0,), id="molmer_sorensen 0"), + pytest.param(gates.molmer_sorensen, (np.pi,), id="molmer_sorensen pi"), + pytest.param(gates.hadamard_transform, (), id="hadamard_transform"), + ]) +def test_metadata(gate_func, args, alias): + gate = gate_func(*args, dtype=alias) + dtype = qutip.data.to.parse(alias) + assert isinstance(gate.data, dtype) + assert gate._isherm == qutip.data.isherm(gate.data) + assert gate._isunitary == gate._calculate_isunitary() + with qutip.CoreOptions(default_dtype=alias): + gate = gate_func(*args) + assert isinstance(gate.data, dtype) diff --git a/qutip/tests/core/test_metrics.py b/qutip/tests/core/test_metrics.py index 575cafdc97..def3e6b1a9 100644 --- a/qutip/tests/core/test_metrics.py +++ b/qutip/tests/core/test_metrics.py @@ -439,22 +439,25 @@ def test_qubit_triangle(self, dimension): assert dnorm(A + B) <= dnorm(A) + dnorm(B) + 1e-7 @pytest.mark.repeat(3) - @pytest.mark.parametrize("generator", [ - pytest.param(rand_super_bcsz, id="super"), - pytest.param(rand_unitary, id="unitary"), - ]) - def test_force_solve(self, dimension, generator): - """ - Metrics: checks that special cases for dnorm agree with SDP solutions. - """ - A, B = generator(dimension), generator(dimension) + def test_unitary_case(self, dimension): + """Check that the diamond norm is one for unitary maps.""" + A, B = rand_unitary(dimension), rand_unitary(dimension) assert ( - dnorm(A, B, force_solve=False) + dnorm(A, B) == pytest.approx(dnorm(A, B, force_solve=True), abs=1e-5) ) @pytest.mark.repeat(3) - def test_cptp(self, dimension, sparse): + def test_cp_case(self, dimension): + """Check that the diamond norm is one for unitary maps.""" + A = rand_super_bcsz(dimension, enforce_tp=False) + assert ( + dnorm(A) + == pytest.approx(dnorm(A, force_solve=True), abs=1e-5) + ) + + @pytest.mark.repeat(3) + def test_cptp_case(self, dimension, sparse): """Check that the diamond norm is one for CPTP maps.""" A = rand_super_bcsz(dimension) assert A.iscptp diff --git a/qutip/tests/core/test_operators.py b/qutip/tests/core/test_operators.py index 712194934f..477d7da8a4 100644 --- a/qutip/tests/core/test_operators.py +++ b/qutip/tests/core/test_operators.py @@ -246,11 +246,18 @@ def _id_func(val): return "" +def _check_meta(object, dtype): + if not isinstance(object, qutip.Qobj): + [_check_meta(qobj, dtype) for qobj in object] + return + assert isinstance(object.data, dtype) + assert object._isherm == qutip.data.isherm(object.data) + assert object._isunitary == object._calculate_isunitary() + + # random object accept `str` and base.Data -# Obtain all valid dtype from `to` -dtype_names = list(qutip.data.to._str2type.keys()) + list(qutip.data.to.dtypes) -dtype_types = list(qutip.data.to._str2type.values()) + list(qutip.data.to.dtypes) -@pytest.mark.parametrize(['alias', 'dtype'], zip(dtype_names, dtype_types), +dtype_names = ["dense", "csr"] + list(qutip.data.to.dtypes) +@pytest.mark.parametrize('alias', dtype_names, ids=[str(dtype) for dtype in dtype_names]) @pytest.mark.parametrize(['func', 'args'], [ (qutip.qdiags, ([0, 1, 2], 1)), @@ -258,7 +265,13 @@ def _id_func(val): (qutip.spin_Jx, (1,)), (qutip.spin_Jy, (1,)), (qutip.spin_Jz, (1,)), + (qutip.spin_Jm, (1,)), (qutip.spin_Jp, (1,)), + (qutip.sigmax, ()), + (qutip.sigmay, ()), + (qutip.sigmaz, ()), + (qutip.sigmap, ()), + (qutip.sigmam, ()), (qutip.destroy, (5,)), (qutip.create, (5,)), (qutip.fdestroy, (5, 0)), @@ -273,25 +286,23 @@ def _id_func(val): (qutip.qutrit_ops, ()), (qutip.phase, (5,)), (qutip.charge, (5,)), + (qutip.charge, (0.5, -0.5, 2.)), (qutip.tunneling, (5,)), + (qutip.tunneling, (4, 2)), + (qutip.qft, (5,)), + (qutip.swap, (2, 2)), + (qutip.swap, (3, 2)), (qutip.enr_destroy, ([3, 3, 3], 4)), (qutip.enr_identity, ([3, 3, 3], 4)), ], ids=_id_func) -def test_operator_type(func, args, alias, dtype): +def test_operator_type(func, args, alias): object = func(*args, dtype=alias) - if isinstance(object, qutip.Qobj): - assert isinstance(object.data, dtype) - else: - for obj in object: - assert isinstance(obj.data, dtype) + dtype = qutip.data.to.parse(alias) + _check_meta(object, dtype) with qutip.CoreOptions(default_dtype=alias): object = func(*args) - if isinstance(object, qutip.Qobj): - assert isinstance(object.data, dtype) - else: - for obj in object: - assert isinstance(obj.data, dtype) + _check_meta(object, dtype) @pytest.mark.parametrize('dims', [8, 15, [2] * 4]) @@ -331,6 +342,7 @@ def test_qeye_like(dims, superrep, dtype): expected.superrep = superrep assert new == expected assert new.dtype is qutip.data.to.parse(dtype) + assert new._isherm opevo = qutip.QobjEvo(op) new = qutip.qeye_like(op) @@ -360,6 +372,7 @@ def test_qzero_like(dims, superrep, dtype): expected.superrep = superrep assert new == expected assert new.dtype is qutip.data.to.parse(dtype) + assert new._isherm opevo = qutip.QobjEvo(op) new = qutip.qzero_like(op) @@ -387,6 +400,7 @@ def test_fcreate_fdestroy(n_sites): assert qutip.commutator(c_1, d_0, 'anti') == zero_tensor assert qutip.commutator(identity, c_0) == zero_tensor + @pytest.mark.parametrize(['func', 'args'], [ (qutip.qzero, (None,)), (qutip.fock, (None,)), diff --git a/qutip/tests/core/test_qobj.py b/qutip/tests/core/test_qobj.py index 7aa7dc0b3b..6e3ec30ee9 100644 --- a/qutip/tests/core/test_qobj.py +++ b/qutip/tests/core/test_qobj.py @@ -442,6 +442,15 @@ def test_QobjEquals(): q2 = qutip.Qobj(-data) assert q1 != q2 + # data's entry are of order 1, + with qutip.CoreOptions(atol=10): + assert q1 == q2 + assert q1 != q2 * 100 + + with qutip.CoreOptions(rtol=10): + assert q1 == q2 + assert q1 == q2 * 100 + def test_QobjGetItem(): "qutip.Qobj getitem" @@ -552,6 +561,12 @@ def test_QobjDiagonals(): assert np.all(b == np.diag(data)) +def test_diag_type(): + assert qutip.sigmaz().diag().dtype == np.float64 + assert (1j * qutip.sigmaz()).diag().dtype == np.complex128 + with qutip.CoreOptions(auto_real_casting=False): + assert qutip.sigmaz().diag().dtype == np.complex128 + def test_QobjEigenEnergies(): "qutip.Qobj eigenenergies" data = np.eye(5) @@ -1123,21 +1138,11 @@ def test_trace(): assert sz.tr() == 0 -def test_no_real_attribute(monkeypatch): - """This tests ensures that trace still works even if the output of a - specialisation does not have the ``real`` attribute. This is the case for - the tensorflow and cupy data layers.""" - - def mocker_trace_return(oper): - """ - We simply return a string which does not have the `real` attribute. - """ - return "object without .real" - - monkeypatch.setattr(_data, "trace", mocker_trace_return) - - sz = qutip.sigmaz() # the choice of the matrix does not matter - assert "object without .real" == sz.tr() +def test_no_real_casting(): + sz = qutip.sigmaz() + assert isinstance(sz.tr(), float) + with qutip.CoreOptions(auto_real_casting=False): + assert isinstance(sz.tr(), complex) @pytest.mark.parametrize('inplace', [True, False], ids=['inplace', 'new']) @@ -1264,7 +1269,13 @@ def test_data_as(): assert "dia_matrix" in str(err.value) -@pytest.mark.parametrize('dtype', ["CSR", "Dense"]) +@pytest.mark.parametrize('dtype', ["CSR", "Dense", "Dia"]) def test_qobj_dtype(dtype): obj = qutip.qeye(2, dtype=dtype) - assert obj.dtype == qutip.data.to.parse(dtype) \ No newline at end of file + assert obj.dtype == qutip.data.to.parse(dtype) + + +@pytest.mark.parametrize('dtype', ["CSR", "Dense", "Dia"]) +def test_dtype_in_info_string(dtype): + obj = qutip.qeye(2, dtype=dtype) + assert dtype.lower() in str(obj).lower() diff --git a/qutip/tests/core/test_superop_reps.py b/qutip/tests/core/test_superop_reps.py index 80da2bd821..7190d0fe8c 100644 --- a/qutip/tests/core/test_superop_reps.py +++ b/qutip/tests/core/test_superop_reps.py @@ -175,39 +175,6 @@ def test_random_iscptp(self, superoperator): assert superoperator.iscptp assert superoperator.ishp - @pytest.mark.parametrize(['qobj', 'hp', 'cp', 'tp'], [ - pytest.param(sprepost(destroy(2), create(2)), True, True, False), - pytest.param(sprepost(destroy(2), destroy(2)), False, False, False), - pytest.param(qeye(2), True, True, True), - pytest.param(sigmax(), True, True, True), - pytest.param(tensor(sigmax(), qeye(2)), True, True, True), - pytest.param(0.5 * (to_super(tensor(sigmax(), qeye(2))) - + to_super(tensor(qeye(2), sigmay()))), - True, True, True, - id="linear combination of bipartite unitaries"), - pytest.param(Qobj(swap(), dims=[[[2],[2]]]*2, superrep='choi'), - True, False, True, - id="partial transpose map"), - pytest.param(Qobj(qeye(4)*0.9, dims=[[[2],[2]]]*2), True, True, False, - id="subnormalized map"), - pytest.param(basis(2, 0), False, False, False, id="ket"), - ]) - def test_known_iscptp(self, qobj, hp, cp, tp): - """ - Superoperator: ishp, iscp, istp and iscptp known cases. - """ - assert qobj.ishp == hp - assert qobj.iscp == cp - assert qobj.istp == tp - assert qobj.iscptp == (cp and tp) - - def test_choi_tr(self): - """ - Superoperator: Trace returned by to_choi matches docstring. - """ - for dims in range(2, 5): - assert abs(to_choi(identity(dims)).tr() - dims) < tol - # Conjugation by a creation operator a = create(2).dag() @@ -244,7 +211,6 @@ def test_choi_tr(self): pytest.param(ptr_swap, True, False, True, id="partial transpose map"), pytest.param(subnorm_map, True, True, False, id="subnorm map"), pytest.param(basis(2), False, False, False, id="not an operator"), - ]) def test_known_iscptp(self, qobj, shouldhp, shouldcp, shouldtp): """ diff --git a/qutip/tests/core/test_tensor.py b/qutip/tests/core/test_tensor.py index 10e493e46f..08556bf66e 100644 --- a/qutip/tests/core/test_tensor.py +++ b/qutip/tests/core/test_tensor.py @@ -255,3 +255,13 @@ def test_non_qubit_systems(self, dimensions): test = expand_operator(base_test, dims=dimensions, targets=targets) assert test.dims == expected.dims np.testing.assert_allclose(test.full(), expected.full()) + + def test_dtype(self): + expanded_qobj = expand_operator( + qutip.gates.cnot(), dims=[2, 2, 2], targets=[0, 1] + ).data + assert isinstance(expanded_qobj, qutip.data.CSR) + expanded_qobj = expand_operator( + qutip.gates.cnot(), dims=[2, 2, 2], targets=[0, 1], dtype="dense" + ).data + assert isinstance(expanded_qobj, qutip.data.Dense) diff --git a/qutip/tests/solver/test_correlation.py b/qutip/tests/solver/test_correlation.py index bf96b045da..1a58247e88 100644 --- a/qutip/tests/solver/test_correlation.py +++ b/qutip/tests/solver/test_correlation.py @@ -2,6 +2,7 @@ import functools from itertools import product import numpy as np +from scipy.integrate import trapezoid import qutip pytestmark = [pytest.mark.usefixtures("in_temporary_directory")] @@ -91,7 +92,7 @@ def test_spectrum_solver_equivalence_to_es(spectrum): def _trapz_2d(z, xy): """2D trapezium-method integration assuming a square grid.""" dx = xy[1] - xy[0] - return dx*dx * np.trapz(np.trapz(z, axis=0)) + return dx*dx * trapezoid(trapezoid(z, axis=0)) def _n_correlation(times, n): @@ -135,7 +136,7 @@ def _2ls_g2_0(H, c_ops): e_ops=[qutip.num(2)], args=_2ls_args).expect[0] integral_correlation = _trapz_2d(np.real(correlation), times) - integral_n_expectation = np.trapz(n_expectation, times) + integral_n_expectation = trapezoid(n_expectation, times) # Factor of two from negative time correlations. return 2 * integral_correlation / integral_n_expectation**2 diff --git a/qutip/tests/solver/test_integrator.py b/qutip/tests/solver/test_integrator.py index e87f334ba6..f7a05ff551 100644 --- a/qutip/tests/solver/test_integrator.py +++ b/qutip/tests/solver/test_integrator.py @@ -166,3 +166,26 @@ def test_concurent_usage(integrator): assert inter1.integrate(t)[1].to_array()[0, 0] == expected1 expected2 = pytest.approx(np.exp(-t/2), abs=1e-5) assert inter2.integrate(t)[1].to_array()[0, 0] == expected2 + +@pytest.mark.parametrize('integrator', + [IntegratorVern7, IntegratorVern9], + ids=["vern7", 'vern9'] +) +def test_pickling_vern_methods(integrator): + """Test whether VernN methods can be pickled and hence used in multiprocessing""" + opt = {'atol':1e-10, 'rtol':1e-7} + + sys = qutip.QobjEvo(0.5*qutip.qeye(1)) + inter = integrator(sys, opt) + inter.set_state(0, qutip.basis(1,0).data) + + import pickle + pickled = pickle.dumps(inter, -1) + recreated = pickle.loads(pickled) + recreated.set_state(0, qutip.basis(1,0).data) + + for t in np.linspace(0,1,6): + expected = pytest.approx(np.exp(t/2), abs=1e-5) + result1 = inter.integrate(t)[1].to_array()[0, 0] + result2 = recreated.integrate(t)[1].to_array()[0, 0] + assert result1 == result2 == expected diff --git a/qutip/tests/solver/test_mcsolve.py b/qutip/tests/solver/test_mcsolve.py index 717fc74fc1..3566e69b2a 100644 --- a/qutip/tests/solver/test_mcsolve.py +++ b/qutip/tests/solver/test_mcsolve.py @@ -3,7 +3,6 @@ import qutip from copy import copy from qutip.solver.mcsolve import mcsolve, MCSolver -from qutip.solver.solver_base import Solver def _return_constant(t, args): @@ -32,10 +31,11 @@ class StatesAndExpectOutputCase: """ size = 10 h = qutip.num(size) - state = qutip.basis(size, size-1) + pure_state = qutip.basis(size, size-1) + mixed_state = qutip.maximally_mixed_dm(size) times = np.linspace(0, 1, 101) e_ops = [qutip.num(size)] - ntraj = 2000 + ntraj = 500 def _assert_states(self, result, expected, tol): assert hasattr(result, 'states') @@ -53,13 +53,15 @@ def _assert_expect(self, result, expected, tol): np.testing.assert_allclose(test, expected_part, rtol=tol) @pytest.mark.parametrize("improved_sampling", [True, False]) - def test_states_and_expect(self, hamiltonian, args, c_ops, expected, tol, - improved_sampling): + def test_states_and_expect(self, hamiltonian, state, args, c_ops, + expected, tol, improved_sampling): options = {"store_states": True, "map": "serial", "improved_sampling": improved_sampling} - result = mcsolve(hamiltonian, self.state, self.times, args=args, + result = mcsolve(hamiltonian, state, self.times, args=args, c_ops=c_ops, e_ops=self.e_ops, ntraj=self.ntraj, - options=options, target_tol=0.05) + options=options, + # target_tol not supported for mixed initial state + target_tol=(0.05 if state.isket else None)) self._assert_expect(result, expected, tol) self._assert_states(result, expected, tol) @@ -71,8 +73,6 @@ class TestNoCollapse(StatesAndExpectOutputCase): """ def pytest_generate_tests(self, metafunc): tol = 1e-8 - expect = (qutip.expect(self.e_ops[0], self.state) - * np.ones_like(self.times)) hamiltonian_types = [ (self.h, "Qobj"), ([self.h], "list"), @@ -80,12 +80,22 @@ def pytest_generate_tests(self, metafunc): args={'constant': 0}), "QobjEvo"), (callable_qobj(self.h), "callable"), ] - cases = [pytest.param(hamiltonian, {}, [], [expect], tol, id=id) + cases = [pytest.param(hamiltonian, {}, [], tol, id=id) for hamiltonian, id in hamiltonian_types] metafunc.parametrize( - ['hamiltonian', 'args', 'c_ops', 'expected', 'tol'], + ['hamiltonian', 'args', 'c_ops', 'tol'], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [qutip.expect(self.e_ops[0], state) * np.ones_like(self.times) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + # Previously the "states_only" and "expect_only" tests were mixed in to # every other test case. We move them out into the simplest set so that # their behaviour remains tested, but isn't repeated as often to keep test @@ -93,20 +103,20 @@ def pytest_generate_tests(self, metafunc): # test cases, this is just testing the single-output behaviour. @pytest.mark.parametrize("improved_sampling", [True, False]) - def test_states_only(self, hamiltonian, args, c_ops, expected, tol, - improved_sampling): + def test_states_only(self, hamiltonian, state, args, c_ops, + expected, tol, improved_sampling): options = {"store_states": True, "map": "serial", "improved_sampling": improved_sampling} - result = mcsolve(hamiltonian, self.state, self.times, args=args, + result = mcsolve(hamiltonian, state, self.times, args=args, c_ops=c_ops, e_ops=[], ntraj=self.ntraj, options=options) self._assert_states(result, expected, tol) @pytest.mark.parametrize("improved_sampling", [True, False]) - def test_expect_only(self, hamiltonian, args, c_ops, expected, tol, - improved_sampling): + def test_expect_only(self, hamiltonian, state, args, c_ops, + expected, tol, improved_sampling): options = {'map': 'serial', "improved_sampling": improved_sampling} - result = mcsolve(hamiltonian, self.state, self.times, args=args, + result = mcsolve(hamiltonian, state, self.times, args=args, c_ops=c_ops, e_ops=self.e_ops, ntraj=self.ntraj, options=options) self._assert_expect(result, expected, tol) @@ -120,8 +130,6 @@ class TestConstantCollapse(StatesAndExpectOutputCase): def pytest_generate_tests(self, metafunc): tol = 0.25 coupling = 0.2 - expect = (qutip.expect(self.e_ops[0], self.state) - * np.exp(-coupling * self.times)) collapse_op = qutip.destroy(self.size) c_op_types = [ (np.sqrt(coupling)*collapse_op, {}, "constant"), @@ -129,12 +137,23 @@ def pytest_generate_tests(self, metafunc): (callable_qobj(collapse_op, _return_constant), {'constant': np.sqrt(coupling)}, "function"), ] - cases = [pytest.param(self.h, args, [c_op], [expect], tol, id=id) + cases = [pytest.param(self.h, args, [c_op], tol, id=id) for c_op, args, id in c_op_types] metafunc.parametrize( - ['hamiltonian', 'args', 'c_ops', 'expected', 'tol'], + ['hamiltonian', 'args', 'c_ops', 'tol'], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [(qutip.expect(self.e_ops[0], state) + * np.exp(-coupling * self.times)) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + class TestTimeDependentCollapse(StatesAndExpectOutputCase): """ @@ -144,8 +163,6 @@ class TestTimeDependentCollapse(StatesAndExpectOutputCase): def pytest_generate_tests(self, metafunc): tol = 0.25 coupling = 0.2 - expect = (qutip.expect(self.e_ops[0], self.state) - * np.exp(-coupling * (1 - np.exp(-self.times)))) collapse_op = qutip.destroy(self.size) collapse_args = {'constant': np.sqrt(coupling), 'rate': 0.5} collapse_string = 'sqrt({} * exp(-t))'.format(coupling) @@ -153,12 +170,23 @@ def pytest_generate_tests(self, metafunc): ([collapse_op, _return_decay], collapse_args, "function"), ([collapse_op, collapse_string], {}, "string"), ] - cases = [pytest.param(self.h, args, [c_op], [expect], tol, id=id) + cases = [pytest.param(self.h, args, [c_op], tol, id=id) for c_op, args, id in c_op_types] metafunc.parametrize( - ['hamiltonian', 'args', 'c_ops', 'expected', 'tol'], + ['hamiltonian', 'args', 'c_ops', 'tol'], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [(qutip.expect(self.e_ops[0], state) + * np.exp(-coupling * (1 - np.exp(-self.times)))) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + def test_stored_collapse_operators_and_times(): """ @@ -180,16 +208,21 @@ def test_stored_collapse_operators_and_times(): @pytest.mark.parametrize("improved_sampling", [True, False]) @pytest.mark.parametrize("keep_runs_results", [True, False]) -def test_states_outputs(keep_runs_results, improved_sampling): +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_states_outputs(keep_runs_results, improved_sampling, + mixed_initial_state): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 - ntraj = 5 - a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) - sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + ntraj = 13 + a = qutip.destroy(focks) & qutip.qeye(2) + sm = qutip.qeye(focks) & qutip.sigmam() H = 1j*a.dag()*sm + a H = H + H.dag() - state = qutip.basis([focks, 2], [0, 1]) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(focks) & qutip.fock_dm(2, 1) + else: + state = qutip.basis([focks, 2], [0, 1]) times = np.linspace(0, 10, 21) c_ops = [a, sm] data = mcsolve(H, state, times, c_ops, ntraj=ntraj, @@ -201,6 +234,10 @@ def test_states_outputs(keep_runs_results, improved_sampling): assert isinstance(data.average_states[0], qutip.Qobj) assert data.average_states[0].norm() == pytest.approx(1.) assert data.average_states[0].isoper + if state.isket: + assert data.average_states[0] == qutip.ket2dm(state) + else: + assert data.average_states[0] == state assert isinstance(data.average_final_state, qutip.Qobj) assert data.average_final_state.norm() == pytest.approx(1.) @@ -223,9 +260,10 @@ def test_states_outputs(keep_runs_results, improved_sampling): assert data.runs_final_states[0].norm() == pytest.approx(1.) assert data.runs_final_states[0].isket - assert isinstance(data.steady_state(), qutip.Qobj) - assert data.steady_state().norm() == pytest.approx(1.) - assert data.steady_state().isoper + steady_state = data.steady_state() + assert isinstance(steady_state, qutip.Qobj) + assert steady_state.norm() == pytest.approx(1.) + assert steady_state.isoper np.testing.assert_allclose(times, data.times) assert data.num_trajectories == ntraj @@ -238,16 +276,21 @@ def test_states_outputs(keep_runs_results, improved_sampling): @pytest.mark.parametrize("improved_sampling", [True, False]) @pytest.mark.parametrize("keep_runs_results", [True, False]) -def test_expectation_outputs(keep_runs_results, improved_sampling): +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_expectation_outputs(keep_runs_results, improved_sampling, + mixed_initial_state): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 - ntraj = 5 - a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) - sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + ntraj = 13 + a = qutip.destroy(focks) & qutip.qeye(2) + sm = qutip.qeye(focks) & qutip.sigmam() H = 1j*a.dag()*sm + a H = H + H.dag() - state = qutip.basis([focks, 2], [0, 1]) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(focks) & qutip.fock_dm(2, 1) + else: + state = qutip.basis([focks, 2], [0, 1]) times = np.linspace(0, 10, 5) c_ops = [a, sm] e_ops = [a.dag()*a, sm.dag()*sm, a] @@ -340,7 +383,7 @@ def test_bad_seed(self, improved_sampling): kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj, "options": {"improved_sampling": improved_sampling}} with pytest.raises(ValueError): - first = mcsolve(*args, seeds=[1], **kwargs) + mcsolve(*args, seeds=[1], **kwargs) @pytest.mark.parametrize("improved_sampling", [True, False]) def test_generator(self, improved_sampling): @@ -374,12 +417,16 @@ def test_stepping(self): @pytest.mark.parametrize("improved_sampling", [True, False]) -def test_timeout(improved_sampling): +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_timeout(improved_sampling, mixed_initial_state): size = 10 ntraj = 1000 a = qutip.destroy(size) H = qutip.num(size) - state = qutip.basis(size, size-1) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(size) + else: + state = qutip.basis(size, size-1) times = np.linspace(0, 1.0, 100) coupling = 0.5 n_th = 0.05 @@ -391,24 +438,52 @@ def test_timeout(improved_sampling): timeout=1e-6) assert res.stats['end_condition'] == 'timeout' - @pytest.mark.parametrize("improved_sampling", [True, False]) -def test_super_H(improved_sampling): +def test_target_tol(improved_sampling): size = 10 - ntraj = 1000 + ntraj = 100 a = qutip.destroy(size) H = qutip.num(size) state = qutip.basis(size, size-1) times = np.linspace(0, 1.0, 100) + coupling = 0.5 + n_th = 0.05 + c_ops = np.sqrt(coupling * (n_th + 1)) * a + e_ops = [qutip.num(size)] + + options = {'map': 'serial', "improved_sampling": improved_sampling} + + res = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, options=options, + target_tol = 0.5) + assert res.stats['end_condition'] == 'target tolerance reached' + + res = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, options=options, + target_tol = 1e-6) + assert res.stats['end_condition'] == 'ntraj reached' + +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_super_H(improved_sampling, mixed_initial_state): + size = 10 + ntraj = 250 + a = qutip.destroy(size) + H = qutip.num(size) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(size) + else: + state = qutip.basis(size, size-1) + times = np.linspace(0, 1.0, 100) # Arbitrary coupling and bath temperature. coupling = 0.5 n_th = 0.05 c_ops = np.sqrt(coupling * (n_th + 1)) * a e_ops = [qutip.num(size)] mc_expected = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, - target_tol=0.1, options={'map': 'serial'}) + target_tol=(0.1 if state.isket else None), + options={'map': 'serial', + "improved_sampling": improved_sampling}) mc = mcsolve(qutip.liouvillian(H), state, times, c_ops, e_ops, ntraj=ntraj, - target_tol=0.1, + target_tol=(0.1 if state.isket else None), options={'map': 'serial', "improved_sampling": improved_sampling}) np.testing.assert_allclose(mc_expected.expect[0], mc.expect[0], atol=0.65) @@ -451,15 +526,24 @@ def test_MCSolver_stepping(): assert state.isket +def _coeff_collapse(t, A): + if t == 0: + # New trajectory, was collapse list reset? + assert len(A) == 0 + if t > 2.75: + # End of the trajectory, was collapse list was filled? + assert len(A) != 0 + return (len(A) < 3) * 1.0 + + @pytest.mark.parametrize(["func", "kind"], [ pytest.param( lambda t, A: A-4, lambda: qutip.MCSolver.ExpectFeedback(qutip.num(10)), - # 7.+0j, id="expect" ), pytest.param( - lambda t, A: (len(A) < 3) * 1.0, + _coeff_collapse, lambda: qutip.MCSolver.CollapseFeedback(), id="collapse" ), @@ -472,9 +556,98 @@ def test_feedback(func, kind): solver = qutip.MCSolver( H, c_ops=[qutip.QobjEvo([a, func], args={"A": kind()})], - options={"map": "serial"} + options={"map": "serial", "max_step": 0.2} ) result = solver.run( - psi0,np.linspace(0, 3, 31), e_ops=[qutip.num(10)], ntraj=10 + psi0, np.linspace(0, 3, 31), e_ops=[qutip.num(10)], ntraj=10 ) assert np.all(result.expect[0] > 4. - tol) + + +@pytest.mark.parametrize(["initial_state", "ntraj"], [ + pytest.param(qutip.maximally_mixed_dm(2), 5, id="dm"), + pytest.param([(qutip.basis(2, 0), 0.3), (qutip.basis(2, 1), 0.7)], + 5, id="statelist"), + pytest.param([(qutip.basis(2, 0), 0.3), (qutip.basis(2, 1), 0.7)], + [4, 2], id="ntraj-spec"), + pytest.param([(qutip.basis(2, 0), 0.3), + ((qutip.basis(2, 0) + qutip.basis(2, 1)).unit(), 0.7)], + [4, 2], id="non-orthogonals"), +]) +@pytest.mark.parametrize("improved_sampling", [True, False]) +def test_mixed_averaging(improved_sampling, initial_state, ntraj): + # we will only check that the initial state of the result equals the + # intended initial state exactly + H = qutip.sigmax() + tlist = [0, 1] + L = qutip.sigmam() + + solver = qutip.MCSolver( + H, [L], options={'improved_sampling': improved_sampling}) + result = solver.run(initial_state, tlist, ntraj) + + if isinstance(initial_state, qutip.Qobj): + reference = initial_state + else: + reference = sum(p * psi.proj() for psi, p in initial_state) + + assert result.states[0] == reference + assert result.num_trajectories == np.sum(ntraj) + + assert hasattr(result, 'initial_states') + assert isinstance(result.initial_states, list) + assert all(isinstance(st, qutip.Qobj) for st in result.initial_states) + assert hasattr(result, 'ntraj_per_initial_state') + assert isinstance(result.ntraj_per_initial_state, list) + assert len(result.ntraj_per_initial_state) == len(result.initial_states) + if isinstance(ntraj, list): + assert result.ntraj_per_initial_state == ntraj + else: + assert sum(result.ntraj_per_initial_state) == ntraj + assert sum(result.runs_weights) == pytest.approx(1.) + + +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("p", [0, 0.25, 0.5]) +def test_mixed_equals_merged(improved_sampling, p): + # Running mcsolve with mixed ICs should be the same as running mcsolve + # multiple times and merging the results afterwards + initial_state1 = qutip.basis(2, 1) + initial_state2 = (qutip.basis(2, 1) + qutip.basis(2, 0)).unit() + H = qutip.sigmax() + L = qutip.sigmam() + tlist = np.linspace(0, 2, 20) + ntraj = [3, 9] + + solver = qutip.MCSolver( + H, [L], options={'improved_sampling': improved_sampling}) + mixed_result = solver.run( + [(initial_state1, p), (initial_state2, 1 - p)], tlist, ntraj) + + # Reuse seeds, then results should be identical + seeds = mixed_result.seeds + if improved_sampling: + # For improved sampling, first two seeds are no-jump trajectories + seeds1 = seeds[0:1] + seeds[2:(ntraj[0]+1)] + seeds2 = seeds[1:2] + seeds[(ntraj[0]+1):] + else: + seeds1 = seeds[:ntraj[0]] + seeds2 = seeds[ntraj[0]:] + + pure_result1 = solver.run(initial_state1, tlist, ntraj[0], seeds=seeds1) + pure_result2 = solver.run(initial_state2, tlist, ntraj[1], seeds=seeds2) + merged_result = pure_result1.merge(pure_result2, p) + + assert mixed_result.num_trajectories == sum(ntraj) + assert merged_result.num_trajectories == sum(ntraj) + for state1, state2 in zip(mixed_result.states, merged_result.states): + assert state1 == state2 + + assert hasattr(mixed_result, 'initial_states') + assert isinstance(mixed_result.initial_states, list) + assert mixed_result.initial_states == [initial_state1, initial_state2] + assert hasattr(mixed_result, 'ntraj_per_initial_state') + assert isinstance(mixed_result.ntraj_per_initial_state, list) + assert mixed_result.ntraj_per_initial_state == ntraj + assert sum(mixed_result.runs_weights) == pytest.approx(1.) + assert sum(merged_result.runs_weights) == pytest.approx(1.) diff --git a/qutip/tests/solver/test_mesolve.py b/qutip/tests/solver/test_mesolve.py index 5a79d59dda..4c092f1690 100644 --- a/qutip/tests/solver/test_mesolve.py +++ b/qutip/tests/solver/test_mesolve.py @@ -206,7 +206,7 @@ def testME_TDDecayliouvillian(self, c_ops): def test_mesolve_normalization(self, state_type): # non-hermitean H causes state to evolve non-unitarily H = qutip.Qobj([[1, -0.1j], [-0.1j, 1]]) - H = qutip.sprepost(H, H) # ensure use of MeSolve + H = qutip.spre(H) + qutip.spost(H.dag()) # ensure use of MeSolve psi0 = qutip.basis(2, 0) options = {"normalize_output": True, "progress_bar": None} @@ -698,3 +698,15 @@ def f(t, A): solver = qutip.MESolver(H, c_ops=[a]) result = solver.run(psi0, np.linspace(0, 30, 301), e_ops=[qutip.num(N)]) assert np.all(result.expect[0] > 4. - tol) + + +@pytest.mark.parametrize( + 'rho0', + [qutip.sigmax(), qutip.sigmaz(), qutip.qeye(2)], + ids=["sigmax", "sigmaz", "tr=2"] +) +def test_non_normalized_dm(rho0): + H = qutip.QobjEvo(qutip.num(2)) + solver = qutip.MESolver(H, c_ops=[qutip.sigmaz()]) + result = solver.run(rho0, np.linspace(0, 1, 10), e_ops=[qutip.qeye(2)]) + np.testing.assert_allclose(result.expect[0], rho0.tr(), atol=1e-7) diff --git a/qutip/tests/solver/test_nm_mcsolve.py b/qutip/tests/solver/test_nm_mcsolve.py index e47b638e4b..bde9cad57f 100644 --- a/qutip/tests/solver/test_nm_mcsolve.py +++ b/qutip/tests/solver/test_nm_mcsolve.py @@ -7,13 +7,20 @@ from qutip.solver.nm_mcsolve import nm_mcsolve, NonMarkovianMCSolver -def test_agreement_with_mesolve_for_negative_rates(): +@pytest.mark.slow +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_agreement_with_mesolve_for_negative_rates( + improved_sampling, mixed_initial_state): """ A rough test that nm_mcsolve agress with mesolve in the presence of negative rates. """ times = np.linspace(0, 0.25, 51) - psi0 = qutip.basis(2, 1) + if mixed_initial_state: + state0 = qutip.maximally_mixed_dm(2) + else: + state0 = qutip.basis(2, 1) a0 = qutip.destroy(2) H = a0.dag() * a0 e_ops = [ @@ -37,9 +44,9 @@ def test_agreement_with_mesolve_for_negative_rates(): [a0, gamma2], ] mc_result = nm_mcsolve( - H, psi0, times, ops_and_rates, - args=args, e_ops=e_ops, ntraj=2000, - options={"rtol": 1e-8}, + H, state0, times, ops_and_rates, + args=args, e_ops=e_ops, ntraj=1000 if improved_sampling else 2000, + options={"rtol": 1e-8, "improved_sampling": improved_sampling}, seeds=0, ) @@ -49,7 +56,7 @@ def test_agreement_with_mesolve_for_negative_rates(): [qutip.lindblad_dissipator(a0, a0), gamma2], ] me_result = qutip.mesolve( - H, psi0, times, d_ops, + H, state0, times, d_ops, args=args, e_ops=e_ops, ) @@ -103,12 +110,13 @@ def test_solver_pickleable(): "sin(t)", ] args = [ - None, + {}, {'constant': 1}, - None, + {}, ] for rate, arg in zip(rates, args): - solver = NonMarkovianMCSolver(H, [(L, rate)], args=arg) + op_and_rate = (L, qutip.coefficient(rate, args=arg)) + solver = NonMarkovianMCSolver(H, [op_and_rate]) jar = pickle.dumps(solver) loaded_solver = pickle.loads(jar) @@ -153,10 +161,11 @@ class StatesAndExpectOutputCase: """ size = 10 h = qutip.num(size) - state = qutip.basis(size, size-1) + pure_state = qutip.basis(size, size-1) + mixed_state = qutip.maximally_mixed_dm(size) times = np.linspace(0, 1, 101) e_ops = [qutip.num(size)] - ntraj = 2000 + ntraj = 500 def _assert_states(self, result, expected, tol): assert hasattr(result, 'states') @@ -173,15 +182,19 @@ def _assert_expect(self, result, expected, tol): for test, expected_part in zip(result.expect, expected): np.testing.assert_allclose(test, expected_part, rtol=tol) + @pytest.mark.parametrize("improved_sampling", [True, False]) def test_states_and_expect( - self, hamiltonian, args, ops_and_rates, expected, tol + self, hamiltonian, state, args, ops_and_rates, + expected, tol, improved_sampling ): - options = {"store_states": True, "map": "serial"} + options = {"store_states": True, "map": "serial", + "improved_sampling": improved_sampling} result = nm_mcsolve( - hamiltonian, self.state, self.times, args=args, + hamiltonian, state, self.times, args=args, ops_and_rates=ops_and_rates, e_ops=self.e_ops, ntraj=self.ntraj, options=options, - target_tol=0.05, + # target_tol not supported for mixed initial state + target_tol=(0.05 if state.isket else None) ) self._assert_expect(result, expected, tol) self._assert_states(result, expected, tol) @@ -195,10 +208,6 @@ class TestNoCollapse(StatesAndExpectOutputCase): def pytest_generate_tests(self, metafunc): tol = 1e-8 - expect = ( - qutip.expect(self.e_ops[0], self.state) - * np.ones_like(self.times) - ) hamiltonian_types = [ (self.h, "Qobj"), ([self.h], "list"), @@ -208,37 +217,49 @@ def pytest_generate_tests(self, metafunc): (callable_qobj(self.h), "callable"), ] cases = [ - pytest.param(hamiltonian, {}, [], [expect], tol, id=id) + pytest.param(hamiltonian, {}, [], tol, id=id) for hamiltonian, id in hamiltonian_types ] metafunc.parametrize([ - 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + 'hamiltonian', 'args', 'ops_and_rates', 'tol', ], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [qutip.expect(self.e_ops[0], state) * np.ones_like(self.times) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + # Previously the "states_only" and "expect_only" tests were mixed in to # every other test case. We move them out into the simplest set so that # their behaviour remains tested, but isn't repeated as often to keep test # runtimes shorter. The known-good cases are still tested in the other # test cases, this is just testing the single-output behaviour. - def test_states_only( - self, hamiltonian, args, ops_and_rates, expected, tol - ): - options = {"store_states": True, "map": "serial"} + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_states_only(self, hamiltonian, state, args, ops_and_rates, + expected, tol, improved_sampling): + options = {"store_states": True, "map": "serial", + "improved_sampling": improved_sampling} result = nm_mcsolve( - hamiltonian, self.state, self.times, args=args, + hamiltonian, state, self.times, args=args, ops_and_rates=ops_and_rates, e_ops=[], ntraj=self.ntraj, options=options, ) self._assert_states(result, expected, tol) - def test_expect_only( - self, hamiltonian, args, ops_and_rates, expected, tol - ): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_expect_only(self, hamiltonian, state, args, ops_and_rates, + expected, tol, improved_sampling): + options = {'map': 'serial', "improved_sampling": improved_sampling} result = nm_mcsolve( - hamiltonian, self.state, self.times, args=args, + hamiltonian, state, self.times, args=args, ops_and_rates=ops_and_rates, - e_ops=self.e_ops, ntraj=self.ntraj, options={'map': 'serial'}, + e_ops=self.e_ops, ntraj=self.ntraj, options=options, ) self._assert_expect(result, expected, tol) @@ -252,10 +273,6 @@ class TestConstantCollapse(StatesAndExpectOutputCase): def pytest_generate_tests(self, metafunc): tol = 0.25 rate = 0.2 - expect = ( - qutip.expect(self.e_ops[0], self.state) - * np.exp(-rate * self.times) - ) op = qutip.destroy(self.size) op_and_rate_types = [ ([op, rate], {}, "constant"), @@ -264,13 +281,24 @@ def pytest_generate_tests(self, metafunc): ([op, lambda t, w: rate], {"w": 1.0}, "function_with_args"), ] cases = [ - pytest.param(self.h, args, [op_and_rate], [expect], tol, id=id) + pytest.param(self.h, args, [op_and_rate], tol, id=id) for op_and_rate, args, id in op_and_rate_types ] metafunc.parametrize([ - 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + 'hamiltonian', 'args', 'ops_and_rates', 'tol', ], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [(qutip.expect(self.e_ops[0], state) + * np.exp(-rate * self.times)) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + class TestTimeDependentCollapse(StatesAndExpectOutputCase): """ @@ -281,10 +309,6 @@ class TestTimeDependentCollapse(StatesAndExpectOutputCase): def pytest_generate_tests(self, metafunc): tol = 0.25 coupling = 0.2 - expect = ( - qutip.expect(self.e_ops[0], self.state) - * np.exp(-coupling * (1 - np.exp(-self.times))) - ) op = qutip.destroy(self.size) rate_args = {'constant': coupling, 'rate': 0.5} rate_string = 'sqrt({} * exp(-t))'.format(coupling) @@ -293,13 +317,24 @@ def pytest_generate_tests(self, metafunc): ([op, _return_decay], rate_args, "function"), ] cases = [ - pytest.param(self.h, args, [op_and_rate], [expect], tol, id=id) + pytest.param(self.h, args, [op_and_rate], tol, id=id) for op_and_rate, args, id in op_and_rate_types ] metafunc.parametrize([ - 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + 'hamiltonian', 'args', 'ops_and_rates', 'tol', ], cases) + initial_state_types = [ + (self.pure_state, "pure"), + (self.mixed_state, "mixed"), + ] + expect = [(qutip.expect(self.e_ops[0], state) + * np.exp(-coupling * (1 - np.exp(-self.times)))) + for state, _ in initial_state_types] + cases = [pytest.param(state, [exp], id=id) + for (state, id), exp in zip(initial_state_types, expect)] + metafunc.parametrize(['state', 'expected'], cases) + def test_stored_collapse_operators_and_times(): """ @@ -324,17 +359,23 @@ def test_stored_collapse_operators_and_times(): assert all(col in [0, 1] for col in result.col_which[0]) -@pytest.mark.parametrize('keep_runs_results', [True, False]) -def test_states_outputs(keep_runs_results): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("keep_runs_results", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_states_outputs(keep_runs_results, improved_sampling, + mixed_initial_state): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 - ntraj = 5 - a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) - sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + ntraj = 13 + a = qutip.destroy(focks) & qutip.qeye(2) + sm = qutip.qeye(focks) & qutip.sigmam() H = 1j*a.dag()*sm + a H = H + H.dag() - state = qutip.basis([focks, 2], [0, 1]) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(focks) & qutip.fock_dm(2, 1) + else: + state = qutip.basis([focks, 2], [0, 1]) times = np.linspace(0, 10, 21) ops_and_rates = [ (a, 1.0), @@ -348,40 +389,37 @@ def test_states_outputs(keep_runs_results): options={ "keep_runs_results": keep_runs_results, "map": "serial", - }, - ) + "improved_sampling": improved_sampling}) assert len(data.average_states) == len(times) assert isinstance(data.average_states[0], qutip.Qobj) assert data.average_states[0].norm() == pytest.approx(1.) assert data.average_states[0].isoper + if state.isket: + assert data.average_states[0] == qutip.ket2dm(state) + else: + assert data.average_states[0] == state assert isinstance(data.average_final_state, qutip.Qobj) assert data.average_final_state.norm() == pytest.approx(1.) assert data.average_final_state.isoper - assert isinstance(data.photocurrent[0][1], float) - assert isinstance(data.photocurrent[1][1], float) - assert ( - np.array(data.runs_photocurrent).shape - == (ntraj, total_ops, len(times)-1) - ) - if keep_runs_results: assert len(data.runs_states) == ntraj assert len(data.runs_states[0]) == len(times) assert isinstance(data.runs_states[0][0], qutip.Qobj) assert data.runs_states[0][0].norm() == pytest.approx(1.) - assert data.runs_states[0][0].isoper + assert data.runs_states[0][0].isket assert len(data.runs_final_states) == ntraj assert isinstance(data.runs_final_states[0], qutip.Qobj) assert data.runs_final_states[0].norm() == pytest.approx(1.) - assert data.runs_final_states[0].isoper + assert data.runs_final_states[0].isket - assert isinstance(data.steady_state(), qutip.Qobj) - assert data.steady_state().norm() == pytest.approx(1.) - assert data.steady_state().isoper + steady_state = data.steady_state() + assert isinstance(steady_state, qutip.Qobj) + assert steady_state.norm() == pytest.approx(1.) + assert steady_state.isoper np.testing.assert_allclose(times, data.times) assert data.num_trajectories == ntraj @@ -392,17 +430,23 @@ def test_states_outputs(keep_runs_results): assert data.stats['end_condition'] == "ntraj reached" -@pytest.mark.parametrize('keep_runs_results', [True, False]) -def test_expectation_outputs(keep_runs_results): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("keep_runs_results", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_expectation_outputs(keep_runs_results, improved_sampling, + mixed_initial_state): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 - ntraj = 5 - a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) - sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + ntraj = 13 + a = qutip.destroy(focks) & qutip.qeye(2) + sm = qutip.qeye(focks) & qutip.sigmam() H = 1j*a.dag()*sm + a H = H + H.dag() - state = qutip.basis([focks, 2], [0, 1]) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(focks) & qutip.fock_dm(2, 1) + else: + state = qutip.basis([focks, 2], [0, 1]) times = np.linspace(0, 10, 5) ops_and_rates = [ (a, 1.0), @@ -417,8 +461,7 @@ def test_expectation_outputs(keep_runs_results): options={ "keep_runs_results": keep_runs_results, "map": "serial", - }, - ) + "improved_sampling": improved_sampling}) assert isinstance(data.average_expect[0][1], float) assert isinstance(data.average_expect[1][1], float) assert isinstance(data.average_expect[2][1], complex) @@ -431,10 +474,6 @@ def test_expectation_outputs(keep_runs_results): assert isinstance(data.runs_expect[0][0][1], float) assert isinstance(data.runs_expect[1][0][1], float) assert isinstance(data.runs_expect[2][0][1], complex) - assert isinstance(data.photocurrent[0][0], float) - assert isinstance(data.photocurrent[1][0], float) - assert (np.array(data.runs_photocurrent).shape - == (ntraj, total_ops, len(times)-1)) np.testing.assert_allclose(times, data.times) assert data.num_trajectories == ntraj assert len(data.e_ops) == len(e_ops) @@ -521,9 +560,9 @@ def test_stepping(self): size = 10 a = qutip.destroy(size) H = qutip.num(size) - ops_and_rates = [(a, 'alpha')] + ops_and_rates = [(a, qutip.coefficient('alpha', args={'alpha': 0}))] mcsolver = NonMarkovianMCSolver( - H, ops_and_rates, args={'alpha': 0}, options={'map': 'serial'}, + H, ops_and_rates, options={'map': 'serial'}, ) mcsolver.start(qutip.basis(size, size-1), 0, seed=5) state_1 = mcsolver.step(1, args={'alpha': 1}) @@ -533,12 +572,17 @@ def test_stepping(self): assert state_1 == state_2 -def test_timeout(): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_timeout(improved_sampling, mixed_initial_state): size = 10 ntraj = 1000 a = qutip.destroy(size) H = qutip.num(size) - state = qutip.basis(size, size-1) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(size) + else: + state = qutip.basis(size, size-1) times = np.linspace(0, 1.0, 100) coupling = 0.5 n_th = 0.05 @@ -548,17 +592,23 @@ def test_timeout(): e_ops = [qutip.num(size)] res = nm_mcsolve( H, state, times, ops_and_rates, e_ops, ntraj=ntraj, - options={'map': 'serial'}, timeout=1e-6, + options={'map': 'serial', "improved_sampling": improved_sampling}, + timeout=1e-6, ) assert res.stats['end_condition'] == 'timeout' -def test_super_H(): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("mixed_initial_state", [True, False]) +def test_super_H(improved_sampling, mixed_initial_state): size = 10 - ntraj = 1000 + ntraj = 250 a = qutip.destroy(size) H = qutip.num(size) - state = qutip.basis(size, size-1) + if mixed_initial_state: + state = qutip.maximally_mixed_dm(size) + else: + state = qutip.basis(size, size-1) times = np.linspace(0, 1.0, 100) # Arbitrary coupling and bath temperature. coupling = 0.5 @@ -569,23 +619,24 @@ def test_super_H(): e_ops = [qutip.num(size)] mc_expected = nm_mcsolve( H, state, times, ops_and_rates, e_ops, ntraj=ntraj, - target_tol=0.1, options={'map': 'serial'}, + target_tol=(0.1 if state.isket else None), + options={'map': 'serial', "improved_sampling": improved_sampling}, ) mc = nm_mcsolve( qutip.liouvillian(H), state, times, ops_and_rates, e_ops, ntraj=ntraj, - target_tol=0.1, options={'map': 'serial'}, - ) - np.testing.assert_allclose(mc_expected.expect[0], mc.expect[0], atol=0.5) + target_tol=(0.1 if state.isket else None), + options={'map': 'serial', "improved_sampling": improved_sampling}) + np.testing.assert_allclose(mc_expected.expect[0], mc.expect[0], atol=0.65) def test_NonMarkovianMCSolver_run(): size = 10 + args = {'coupling': 0} ops_and_rates = [ - (qutip.destroy(size), 'coupling') + (qutip.destroy(size), qutip.coefficient('coupling', args=args)) ] - args = {'coupling': 0} H = qutip.num(size) - solver = NonMarkovianMCSolver(H, ops_and_rates, args=args) + solver = NonMarkovianMCSolver(H, ops_and_rates) solver.options = {'store_final_state': True} res = solver.run(qutip.basis(size, size-1), np.linspace(0, 5.0, 11), e_ops=[qutip.qeye(size)], args={'coupling': 1}) @@ -603,12 +654,12 @@ def test_NonMarkovianMCSolver_run(): def test_NonMarkovianMCSolver_stepping(): size = 10 + args = {'coupling': 0} ops_and_rates = [ - (qutip.destroy(size), 'coupling') + (qutip.destroy(size), qutip.coefficient('coupling', args=args)) ] - args = {'coupling': 0} H = qutip.num(size) - solver = NonMarkovianMCSolver(H, ops_and_rates, args=args) + solver = NonMarkovianMCSolver(H, ops_and_rates) solver.start(qutip.basis(size, size-1), 0, seed=0) state = solver.step(1) assert qutip.expect(qutip.qeye(size), state) == pytest.approx(1) @@ -645,3 +696,93 @@ def test_dynamic_arguments(): H, state, times, ops_and_rates, ntraj=25, args={"collapse": []}, ) assert all(len(collapses) <= 1 for collapses in mc.col_which) + + +@pytest.mark.parametrize(["initial_state", "ntraj"], [ + pytest.param(qutip.maximally_mixed_dm(2), 5, id="dm"), + pytest.param([(qutip.basis(2, 0), 0.3), (qutip.basis(2, 1), 0.7)], + 5, id="statelist"), + pytest.param([(qutip.basis(2, 0), 0.3), (qutip.basis(2, 1), 0.7)], + [4, 2], id="ntraj-spec"), + pytest.param([(qutip.basis(2, 0), 0.3), + ((qutip.basis(2, 0) + qutip.basis(2, 1)).unit(), 0.7)], + [4, 2], id="non-orthogonals"), +]) +@pytest.mark.parametrize("improved_sampling", [True, False]) +def test_mixed_averaging(improved_sampling, initial_state, ntraj): + # we will only check that the initial state of the result equals the + # intended initial state exactly + H = qutip.sigmax() + tlist = [0, 1] + L = qutip.sigmam() + rate = -1 + + solver = qutip.NonMarkovianMCSolver( + H, [(L, rate)], options={'improved_sampling': improved_sampling}) + result = solver.run(initial_state, tlist, ntraj) + + if isinstance(initial_state, qutip.Qobj): + reference = initial_state + else: + reference = sum(p * psi.proj() for psi, p in initial_state) + + assert result.states[0] == reference + assert result.num_trajectories == np.sum(ntraj) + + assert hasattr(result, 'initial_states') + assert isinstance(result.initial_states, list) + assert all(isinstance(st, qutip.Qobj) for st in result.initial_states) + assert hasattr(result, 'ntraj_per_initial_state') + assert isinstance(result.ntraj_per_initial_state, list) + assert len(result.ntraj_per_initial_state) == len(result.initial_states) + if isinstance(ntraj, list): + assert result.ntraj_per_initial_state == ntraj + else: + assert sum(result.ntraj_per_initial_state) == ntraj + + +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("p", [0, 0.25, 0.5]) +def test_mixed_equals_merged(improved_sampling, p): + # Running mcsolve with mixed ICs should be the same as running mcsolve + # multiple times and merging the results afterwards + initial_state1 = qutip.basis(2, 1) + initial_state2 = (qutip.basis(2, 1) + qutip.basis(2, 0)).unit() + H = qutip.sigmax() + L = qutip.sigmam() + def rate_function(t): + return -1 + t + tlist = np.linspace(0, 2, 20) + ntraj = [3, 9] + + solver = qutip.NonMarkovianMCSolver( + H, [(L, qutip.coefficient(rate_function))], + options={'improved_sampling': improved_sampling}) + mixed_result = solver.run( + [(initial_state1, p), (initial_state2, 1 - p)], tlist, ntraj) + + # Reuse seeds, then results should be identical + seeds = mixed_result.seeds + if improved_sampling: + # For improved sampling, first two seeds are no-jump trajectories + seeds1 = seeds[0:1] + seeds[2:(ntraj[0]+1)] + seeds2 = seeds[1:2] + seeds[(ntraj[0]+1):] + else: + seeds1 = seeds[:ntraj[0]] + seeds2 = seeds[ntraj[0]:] + + pure_result1 = solver.run(initial_state1, tlist, ntraj[0], seeds=seeds1) + pure_result2 = solver.run(initial_state2, tlist, ntraj[1], seeds=seeds2) + merged_result = pure_result1.merge(pure_result2, p) + + assert mixed_result.num_trajectories == sum(ntraj) + assert merged_result.num_trajectories == sum(ntraj) + for state1, state2 in zip(mixed_result.states, merged_result.states): + assert state1 == state2 + + assert hasattr(mixed_result, 'initial_states') + assert isinstance(mixed_result.initial_states, list) + assert mixed_result.initial_states == [initial_state1, initial_state2] + assert hasattr(mixed_result, 'ntraj_per_initial_state') + assert isinstance(mixed_result.ntraj_per_initial_state, list) + assert mixed_result.ntraj_per_initial_state == ntraj diff --git a/qutip/tests/solver/test_propagator.py b/qutip/tests/solver/test_propagator.py index 5119fa0d6b..ffb335cbe3 100644 --- a/qutip/tests/solver/test_propagator.py +++ b/qutip/tests/solver/test_propagator.py @@ -1,4 +1,5 @@ import numpy as np +from scipy.integrate import trapezoid from qutip import (destroy, propagator, Propagator, propagator_steadystate, steadystate, tensor, qeye, basis, QobjEvo, sesolve, liouvillian) @@ -44,7 +45,7 @@ def testPropHOTd(): Htd = [H, [H, func]] U = propagator(Htd, 1) ts = np.linspace(0, 1, 101) - U2 = (-1j * H * np.trapz(1 + func(ts), ts)).expm() + U2 = (-1j * H * trapezoid(1 + func(ts), ts)).expm() assert (U - U2).norm('max') < 1e-4 diff --git a/qutip/tests/solver/test_results.py b/qutip/tests/solver/test_results.py index a4830460ef..540f45fe06 100644 --- a/qutip/tests/solver/test_results.py +++ b/qutip/tests/solver/test_results.py @@ -2,7 +2,8 @@ import pytest import qutip -from qutip.solver.result import Result, MultiTrajResult, McResult +from qutip.solver.result import Result, TrajectoryResult +from qutip.solver.multitrajresult import MultiTrajResult, McResult, NmmcResult def fill_options(**kwargs): @@ -161,6 +162,42 @@ def test_repr_full(self): ">", ]) + def test_trajectory_result(self): + res = TrajectoryResult( + e_ops=qutip.num(5), + options=fill_options(store_states=True, store_final_state=True)) + for i in range(5): + res.add(i, qutip.basis(5, i)) + + assert not res.has_weight + assert not res.has_absolute_weight + assert not res.has_time_dependent_weight + assert res.total_weight == 1 + + res.add_absolute_weight(2) + res.add_absolute_weight(2) + assert res.has_weight and res.has_absolute_weight + assert not res.has_time_dependent_weight + assert res.total_weight == 4 + + res.add_relative_weight([1j ** i for i in range(5)]) + assert res.has_weight and res.has_absolute_weight + assert res.has_time_dependent_weight + np.testing.assert_array_equal(res.total_weight, + [4 * (1j ** i) for i in range(5)]) + + # weights do not modify states etc + assert res.states == [qutip.basis(5, i) for i in range(5)] + assert res.final_state == qutip.basis(5, 4) + np.testing.assert_array_equal(res.expect[0], range(5)) + + res = TrajectoryResult(e_ops=[], options=fill_options()) + res.add(0, qutip.fock_dm(2, 0)) + res.add_relative_weight(10) + assert res.has_weight + assert not (res.has_absolute_weight or res.has_time_dependent_weight) + assert res.total_weight == 10 + def e_op_num(t, state): """ An e_ops function that returns the ground state occupation. """ @@ -169,11 +206,16 @@ def e_op_num(t, state): class TestMultiTrajResult: def _fill_trajectories(self, multiresult, N, ntraj, - collapse=False, noise=0, dm=False): + collapse=False, noise=0, dm=False, + include_no_jump=False, rel_weights=None): + if rel_weights is None: + rel_weights = [None] * ntraj + # Fix the seed to avoid failing due to bad luck np.random.seed(1) - for _ in range(ntraj): - result = Result(multiresult._raw_ops, multiresult.options) + for k, w in enumerate(rel_weights): + result = TrajectoryResult(multiresult._raw_ops, + multiresult.options) result.collapse = [] for t in range(N): delta = 1 + noise * np.random.randn() @@ -185,14 +227,22 @@ def _fill_trajectories(self, multiresult, N, ntraj, result.collapse.append((t+0.1, 0)) result.collapse.append((t+0.2, 1)) result.collapse.append((t+0.3, 1)) + if include_no_jump and k == 0: + result.add_absolute_weight(0.25) + elif include_no_jump and k > 0: + result.add_relative_weight(0.75) + if w is not None: + result.add_relative_weight(w) + result.trace = w if multiresult.add((0, result)) <= 0: break - def _expect_check_types(self, multiresult): + def _check_types(self, multiresult): assert isinstance(multiresult.std_expect, list) assert isinstance(multiresult.average_e_data, dict) assert isinstance(multiresult.std_expect, list) assert isinstance(multiresult.average_e_data, dict) + assert isinstance(multiresult.runs_weights, list) if multiresult.trajectories: assert isinstance(multiresult.runs_expect, list) @@ -203,7 +253,8 @@ def _expect_check_types(self, multiresult): @pytest.mark.parametrize('keep_runs_results', [True, False]) @pytest.mark.parametrize('dm', [True, False]) - def test_McResult(self, dm, keep_runs_results): + @pytest.mark.parametrize('include_no_jump', [True, False]) + def test_McResult(self, dm, include_no_jump, keep_runs_results): N = 10 ntraj = 5 e_ops = [qutip.num(N), qutip.qeye(N)] @@ -211,11 +262,12 @@ def test_McResult(self, dm, keep_runs_results): m_res = McResult(e_ops, opt, stats={"num_collapse": 2}) m_res.add_end_condition(ntraj, None) - self._fill_trajectories(m_res, N, ntraj, collapse=True, dm=dm) + self._fill_trajectories(m_res, N, ntraj, collapse=True, + dm=dm, include_no_jump=include_no_jump) np.testing.assert_allclose(np.array(m_res.times), np.arange(N)) assert m_res.stats['end_condition'] == "ntraj reached" - self._expect_check_types(m_res) + self._check_types(m_res) assert np.all(np.array(m_res.col_which) < 2) assert isinstance(m_res.collapse, list) @@ -223,7 +275,50 @@ def test_McResult(self, dm, keep_runs_results): np.testing.assert_allclose(m_res.photocurrent[0], np.ones(N-1)) np.testing.assert_allclose(m_res.photocurrent[1], 2 * np.ones(N-1)) + @pytest.mark.parametrize(['include_no_jump', 'martingale', + 'result_trace', 'result_states'], [ + pytest.param(False, [[1.] * 10] * 5, [1.] * 10, + [qutip.fock_dm(10, i) for i in range(10)], + id='constant-martingale'), + pytest.param(True, [[1.] * 10] * 5, [1.] * 10, + [qutip.fock_dm(10, i) for i in range(10)], + id='constant-marting-no-jump'), + pytest.param(False, [[(j - 1) * np.sin(i) for i in range(10)] + for j in range(5)], + [np.sin(i) for i in range(10)], + [np.sin(i) * qutip.fock_dm(10, i) for i in range(10)], + id='timedep-marting'), + pytest.param(True, [[(j - 1) * np.sin(i) for i in range(10)] + for j in range(5)], + [(-0.25 + 1.5 * 0.75) * np.sin(i) for i in range(10)], + [(-0.25 + 1.5 * 0.75) * np.sin(i) * qutip.fock_dm(10, i) + for i in range(10)], + id='timedep-marting-no-jump'), + ]) + def test_NmmcResult(self, include_no_jump, martingale, + result_trace, result_states): + N = 10 + ntraj = 5 + m_res = NmmcResult([], fill_options(), stats={"num_collapse": 2}) + m_res.add_end_condition(ntraj, None) + self._fill_trajectories(m_res, N, ntraj, collapse=True, + include_no_jump=include_no_jump, + rel_weights=martingale) + + np.testing.assert_allclose(np.array(m_res.times), np.arange(N)) + assert m_res.stats['end_condition'] == "ntraj reached" + self._check_types(m_res) + + assert np.all(np.array(m_res.col_which) < 2) + assert isinstance(m_res.collapse, list) + assert len(m_res.col_which[0]) == len(m_res.col_times[0]) + + np.testing.assert_almost_equal(m_res.average_trace, result_trace) + for s1, s2 in zip(m_res.average_states, result_states): + assert s1 == s2 + @pytest.mark.parametrize('keep_runs_results', [True, False]) + @pytest.mark.parametrize('include_no_jump', [True, False]) @pytest.mark.parametrize(["e_ops", "results"], [ pytest.param(qutip.num(5), [np.arange(5)], id="single-e-op"), pytest.param( @@ -239,12 +334,16 @@ def test_McResult(self, dm, keep_runs_results): id="list-e-ops", ), ]) - def test_multitraj_expect(self, keep_runs_results, e_ops, results): + def test_multitraj_expect(self, keep_runs_results, include_no_jump, + e_ops, results): N = 5 ntraj = 25 - opt = fill_options(keep_runs_results=keep_runs_results) + opt = fill_options( + keep_runs_results=keep_runs_results, store_final_state=True + ) m_res = MultiTrajResult(e_ops, opt, stats={}) - self._fill_trajectories(m_res, N, ntraj, noise=0.01) + self._fill_trajectories(m_res, N, ntraj, noise=0.01, + include_no_jump=include_no_jump) for expect, expected in zip(m_res.average_expect, results): np.testing.assert_allclose(expect, expected, @@ -260,18 +359,20 @@ def test_multitraj_expect(self, keep_runs_results, e_ops, results): np.testing.assert_allclose(expect, expected, atol=1e-14, rtol=0.1) - self._expect_check_types(m_res) - + self._check_types(m_res) + assert m_res.average_final_state is not None assert m_res.stats['end_condition'] == "unknown" @pytest.mark.parametrize('keep_runs_results', [True, False]) + @pytest.mark.parametrize('include_no_jump', [True, False]) @pytest.mark.parametrize('dm', [True, False]) - def test_multitraj_state(self, keep_runs_results, dm): + def test_multitraj_state(self, keep_runs_results, include_no_jump, dm): N = 5 ntraj = 25 opt = fill_options(keep_runs_results=keep_runs_results) m_res = MultiTrajResult([], opt) - self._fill_trajectories(m_res, N, ntraj, dm=dm) + self._fill_trajectories(m_res, N, ntraj, dm=dm, + include_no_jump=include_no_jump) np.testing.assert_allclose(np.array(m_res.times), np.arange(N)) @@ -289,12 +390,14 @@ def test_multitraj_state(self, keep_runs_results, dm): assert m_res.runs_final_states[i] == expected @pytest.mark.parametrize('keep_runs_results', [True, False]) + @pytest.mark.parametrize('include_no_jump', [True, False]) @pytest.mark.parametrize('targettol', [ pytest.param(0.1, id='atol'), pytest.param([0.001, 0.1], id='rtol'), pytest.param([[0.001, 0.1], [0.1, 0]], id='tol_per_e_op'), ]) - def test_multitraj_targettol(self, keep_runs_results, targettol): + def test_multitraj_targettol(self, keep_runs_results, + include_no_jump, targettol): N = 10 ntraj = 1000 opt = fill_options( @@ -302,10 +405,11 @@ def test_multitraj_targettol(self, keep_runs_results, targettol): ) m_res = MultiTrajResult([qutip.num(N), qutip.qeye(N)], opt, stats={}) m_res.add_end_condition(ntraj, targettol) - self._fill_trajectories(m_res, N, ntraj, noise=0.1) + self._fill_trajectories(m_res, N, ntraj, noise=0.1, + include_no_jump=include_no_jump) assert m_res.stats['end_condition'] == "target tolerance reached" - assert m_res.num_trajectories <= 1000 + assert m_res.num_trajectories <= 500 def test_multitraj_steadystate(self): N = 5 @@ -329,20 +433,28 @@ def test_repr(self, keep_runs_results): if keep_runs_results: assert "Trajectories saved." in repr - @pytest.mark.parametrize('keep_runs_results', [True, False]) - def test_merge_result(self, keep_runs_results): + @pytest.mark.parametrize('keep_runs_results1', [True, False]) + @pytest.mark.parametrize('keep_runs_results2', [True, False]) + def test_merge_result(self, keep_runs_results1, keep_runs_results2): N = 10 opt = fill_options( - keep_runs_results=keep_runs_results, store_states=True + keep_runs_results=keep_runs_results1, store_states=True ) m_res1 = MultiTrajResult([qutip.num(10)], opt, stats={"run time": 1}) self._fill_trajectories(m_res1, N, 10, noise=0.1) + opt = fill_options( + keep_runs_results=keep_runs_results2, store_states=True + ) m_res2 = MultiTrajResult([qutip.num(10)], opt, stats={"run time": 2}) self._fill_trajectories(m_res2, N, 30, noise=0.1) merged_res = m_res1 + m_res2 assert merged_res.num_trajectories == 40 + assert len(merged_res.seeds) == 40 + assert len(merged_res.times) == 10 + assert len(merged_res.e_ops) == 1 + self._check_types(merged_res) np.testing.assert_allclose(merged_res.average_expect[0], np.arange(10), rtol=0.1) np.testing.assert_allclose( @@ -350,5 +462,103 @@ def test_merge_result(self, keep_runs_results): np.ones(N), rtol=0.1 ) - assert bool(merged_res.trajectories) == keep_runs_results + assert bool(merged_res.trajectories) == ( + keep_runs_results1 and keep_runs_results2 + ) assert merged_res.stats["run time"] == 3 + + def _random_ensemble(self, abs_weights=True, collapse=False, trace=False, + time_dep_weights=False, cls=MultiTrajResult): + dim = 10 + ntraj = 10 + tlist = [1, 2, 3] + + opt = fill_options( + keep_runs_results=False, store_states=True, store_final_state=True + ) + res = cls([qutip.num(dim)], opt, stats={"run time": 0, + "num_collapse": 2}) + + for _ in range(ntraj): + traj = TrajectoryResult(res._raw_ops, res.options) + seeds = np.random.randint(10_000, size=len(tlist)) + for t, seed in zip(tlist, seeds): + random_state = qutip.rand_ket(dim, seed=seed) + traj.add(t, random_state) + + if time_dep_weights and np.random.randint(2): + weights = np.random.rand(len(tlist)) + else: + weights = np.random.rand() + if abs_weights and np.random.randint(2): + traj.add_absolute_weight(weights) + else: + traj.add_relative_weight(weights) + + if collapse: + traj.collapse = [] + for _ in range(np.random.randint(5)): + traj.collapse.append( + (np.random.uniform(tlist[0], tlist[-1]), + np.random.randint(2))) + if trace: + traj.trace = np.random.rand(len(tlist)) + res.add((0, traj)) + + return res + + @pytest.mark.parametrize('abs_weights1', [True, False]) + @pytest.mark.parametrize('abs_weights2', [True, False]) + @pytest.mark.parametrize('p', [0, 0.1, 1, None]) + def test_merge_weights(self, abs_weights1, abs_weights2, p): + ensemble1 = self._random_ensemble(abs_weights1) + ensemble2 = self._random_ensemble(abs_weights2) + merged = ensemble1.merge(ensemble2, p=p) + + if p is None: + p = 0.5 + + np.testing.assert_almost_equal( + merged.expect[0], + p * ensemble1.expect[0] + (1 - p) * ensemble2.expect[0] + ) + + assert merged.final_state == ( + p * ensemble1.final_state + (1 - p) * ensemble2.final_state + ) + + for state1, state2, state in zip( + ensemble1.states, ensemble2.states, merged.states): + assert state == p * state1 + (1 - p) * state2 + + @pytest.mark.parametrize('p', [0, 0.1, 1, None]) + def test_merge_mcresult(self, p): + ensemble1 = self._random_ensemble(collapse=True, + time_dep_weights=False, cls=McResult) + ensemble2 = self._random_ensemble(collapse=True, + time_dep_weights=False, cls=McResult) + merged = ensemble1.merge(ensemble2, p=p) + + if p is None: + p = 0.5 + + assert merged.num_trajectories == len(merged.collapse) + + for c1, c2, c in zip(ensemble1.photocurrent, + ensemble2.photocurrent, + merged.photocurrent): + np.testing.assert_almost_equal(c, p * c1 + (1 - p) * c2) + + @pytest.mark.parametrize('p', [0, 0.1, 1, None]) + def test_merge_nmmcresult(self, p): + ensemble1 = self._random_ensemble( + collapse=True, trace=True, time_dep_weights=True, cls=NmmcResult) + ensemble2 = self._random_ensemble( + collapse=True, trace=True, time_dep_weights=True, cls=NmmcResult) + merged = ensemble1.merge(ensemble2, p=p) + + if p is None: + p = 0.5 + + np.testing.assert_almost_equal( + merged.trace, p * ensemble1.trace + (1 - p) * ensemble2.trace) diff --git a/qutip/tests/solver/test_sesolve.py b/qutip/tests/solver/test_sesolve.py index cfa5ee86ea..af247c0aec 100644 --- a/qutip/tests/solver/test_sesolve.py +++ b/qutip/tests/solver/test_sesolve.py @@ -301,9 +301,19 @@ def test_krylovsolve(always_compute_step): e_op.dims = H.dims tlist = np.linspace(0, 1, 11) ref = sesolve(H, psi0, tlist, e_ops=[e_op]).expect[0] - options = {"always_compute_step", always_compute_step} - krylov_sol = krylovsolve(H, psi0, tlist, 20, e_ops=[e_op]).expect[0] - np.testing.assert_allclose(ref, krylov_sol) + options = {"always_compute_step": always_compute_step} + krylov_sol = krylovsolve(H, psi0, tlist, 20, e_ops=[e_op], options=options) + np.testing.assert_allclose(ref, krylov_sol.expect[0]) + + +def test_krylovsolve_error(): + H = qutip.rand_herm(256, density=0.2) + psi0 = qutip.basis([256], [255]) + tlist = np.linspace(0, 1, 11) + options = {"min_step": 1e10} + with pytest.raises(ValueError) as err: + krylovsolve(H, psi0, tlist, 20, options=options) + assert "error with the minimum step" in str(err.value) def test_feedback(): diff --git a/qutip/tests/solver/test_sode_method.py b/qutip/tests/solver/test_sode_method.py index 356a2ce70a..5416f5335a 100644 --- a/qutip/tests/solver/test_sode_method.py +++ b/qutip/tests/solver/test_sode_method.py @@ -60,7 +60,7 @@ def _make_oper(kind, N): pytest.param("Euler", 0.5, {}, id="Euler"), pytest.param("Milstein", 1.0, {}, id="Milstein"), pytest.param("Milstein_imp", 1.0, {}, id="Milstein implicit"), - pytest.param("Milstein_imp", 1.0, {"imp_method": "inv"}, + pytest.param("Milstein_imp", 1.0, {"solve_method": "inv"}, id="Milstein implicit inv"), pytest.param("Platen", 1.0, {}, id="Platen"), pytest.param("PredCorr", 1.0, {}, id="PredCorr"), @@ -68,7 +68,7 @@ def _make_oper(kind, N): pytest.param("Taylor15", 1.5, {}, id="Taylor15"), pytest.param("Explicit15", 1.5, {}, id="Explicit15"), pytest.param("Taylor15_imp", 1.5, {}, id="Taylor15 implicit"), - pytest.param("Taylor15_imp", 1.5, {"imp_method": "inv"}, + pytest.param("Taylor15_imp", 1.5, {"solve_method": "inv"}, id="Taylor15 implicit inv"), ]) @pytest.mark.parametrize(['H', 'sc_ops'], [ @@ -79,7 +79,7 @@ def _make_oper(kind, N): pytest.param("qeye", ["qeye", "destroy", "destroy2"], id='3 sc_ops'), ]) def test_methods(H, sc_ops, method, order, kw): - if kw == {"imp_method": "inv"} and ("td" in H or "td" in sc_ops[0]): + if kw == {"solve_method": "inv"} and ("td" in H or "td" in sc_ops[0]): pytest.skip("inverse method only available for constant cases.") N = 5 H = _make_oper(H, N) diff --git a/qutip/tests/solver/test_steadystate.py b/qutip/tests/solver/test_steadystate.py index 161e904e95..43810caa44 100644 --- a/qutip/tests/solver/test_steadystate.py +++ b/qutip/tests/solver/test_steadystate.py @@ -4,6 +4,8 @@ import qutip import warnings from packaging import version as pac_version +from qutip.solver.steadystate import _permute_rcm, _permute_wbm +import qutip.core.data as _data @pytest.mark.parametrize(['method', 'kwargs'], [ @@ -194,42 +196,83 @@ def test_steadystate_floquet(sparse): Test the steadystate solution for a periodically driven system. """ - N_c = 20 - - a = qutip.destroy(N_c) - a_d = a.dag() - X_c = a + a_d + sz = qutip.sigmaz() + sx = qutip.sigmax() w_c = 1 - - A_l = 0.001 + A_l = 0.5 w_l = w_c gam = 0.01 - H = w_c * a_d * a + H = 0.5 * w_c * sz - H_t = [H, [X_c, lambda t, args: args["A_l"] * np.cos(args["w_l"] * t)]] + H_t = [H, [sx, lambda t, args: args["A_l"] * np.cos(args["w_l"] * t)]] - psi0 = qutip.fock(N_c, 0) + psi0 = qutip.basis(2, 0) args = {"A_l": A_l, "w_l": w_l} c_ops = [] - c_ops.append(np.sqrt(gam) * a) - - t_l = np.linspace(0, 20 / gam, 2000) + c_ops.append(np.sqrt(gam) * qutip.destroy(2).dag()) + T_max = 20 * 2 * np.pi / gam + t_l = np.linspace(0, T_max, 20000) expect_me = qutip.mesolve(H_t, psi0, t_l, - c_ops, [a_d * a], args=args).expect[0] + c_ops, [sz], args=args).expect[0] rho_ss = qutip.steadystate_floquet(H, c_ops, - A_l * X_c, w_l, n_it=3, sparse=sparse) - expect_ss = qutip.expect(a_d * a, rho_ss) + A_l * sx, w_l, n_it=3, sparse=sparse) + expect_ss = qutip.expect(sz, rho_ss) + + dt = T_max / len(t_l) + one_period = int(1/(w_l/(2*np.pi)) / dt) - np.testing.assert_allclose(expect_me[-20:], expect_ss, atol=1e-3) + average_ex = sum(expect_me[-one_period:]) / float(one_period) + + np.testing.assert_allclose(average_ex, expect_ss, atol=1e-2) assert rho_ss.tr() == pytest.approx(1, abs=1e-15) +def test_rcm(): + N = 5 + a = qutip.destroy(N, dtype="CSR") + I = qutip.qeye(N, dtype="CSR") + H = (a + a.dag() & I) + (I & a * a.dag()) + c_ops = [a & I, I & a] + L = qutip.liouvillian(H, c_ops).data + b = qutip.basis(N**4).data + + def bandwidth(mat): + return sum(scipy.linalg.bandwidth(mat.to_array())) + + # rcm should reduce bandwidth + assert bandwidth(L) > bandwidth(_permute_rcm(L, b)[0]) + + +def test_wbm(): + N = 5 + a = qutip.destroy(N, dtype="CSR") + I = qutip.qeye(N, dtype="CSR") + H = (a + a.dag() & I) + (I & a * a.dag()) + c_ops = [a & I, I & a] + L = qutip.liouvillian(H, c_ops).data + b = qutip.basis(N**4).data + + # shuffling the Liouvillian to ensure the diag is almost empty + perm = np.arange(N**4) + np.random.shuffle(perm) + L = _data.permute.indices(L, None, perm) + + def dia_dominance(mat): + mat = mat.to_array() + norm = np.sum(np.abs(mat)) + diag = np.sum(np.abs(np.diagonal(mat))) + return diag / norm + + # wbm increase diagonal dominance + assert dia_dominance(L) < dia_dominance(_permute_wbm(L, b)[0]) + + def test_bad_options_steadystate(): N = 4 a = qutip.destroy(N) diff --git a/qutip/tests/solver/test_stochastic.py b/qutip/tests/solver/test_stochastic.py index 32a42265b1..dc375c705b 100644 --- a/qutip/tests/solver/test_stochastic.py +++ b/qutip/tests/solver/test_stochastic.py @@ -15,6 +15,10 @@ def _make_system(N, system): gamma = 0.25 a = destroy(N) + if system == "no sc_ops": + H = a.dag() * a + sc_ops = [] + if system == "simple": H = a.dag() * a sc_ops = [np.sqrt(gamma) * a] @@ -39,7 +43,7 @@ def _make_system(N, system): @pytest.mark.parametrize("system", [ - "simple", "2 c_ops", "H td", "complex", "c_ops td", + "no sc_ops", "simple", "2 c_ops", "H td", "complex", "c_ops td", ]) @pytest.mark.parametrize("heterodyne", [True, False]) def test_smesolve(heterodyne, system): @@ -73,13 +77,15 @@ def test_smesolve(heterodyne, system): ) +@pytest.mark.parametrize("system", [ + "no sc_ops", "simple" +]) @pytest.mark.parametrize("heterodyne", [True, False]) @pytest.mark.parametrize("method", SMESolver.avail_integrators().keys()) -def test_smesolve_methods(method, heterodyne): +def test_smesolve_methods(method, heterodyne, system): tol = 0.05 N = 4 ntraj = 20 - system = "simple" H, sc_ops = _make_system(N, system) c_ops = [destroy(N)] @@ -138,7 +144,7 @@ def test_smesolve_methods(method, heterodyne): @pytest.mark.parametrize("system", [ - "simple", "2 c_ops", "H td", "complex", "c_ops td", + "no sc_ops", "simple", "2 c_ops", "H td", "complex", "c_ops td", ]) @pytest.mark.parametrize("heterodyne", [True, False]) def test_ssesolve(heterodyne, system): @@ -174,14 +180,16 @@ def test_ssesolve(heterodyne, system): assert res.dW is None +@pytest.mark.parametrize("system", [ + "no sc_ops", "simple" +]) @pytest.mark.parametrize("heterodyne", [True, False]) @pytest.mark.parametrize("method", SSESolver.avail_integrators().keys()) -def test_ssesolve_method(method, heterodyne): +def test_ssesolve_method(method, heterodyne, system): "Stochastic: smesolve: homodyne, time-dependent H" tol = 0.1 N = 4 ntraj = 20 - system = "simple" H, sc_ops = _make_system(N, system) psi0 = coherent(N, 0.5) @@ -281,10 +289,43 @@ def test_reuse_seeds(): @pytest.mark.parametrize("heterodyne", [True, False]) -def test_m_ops(heterodyne): +def test_measurements(heterodyne): N = 10 ntraj = 1 + H = num(N) + sc_ops = [destroy(N)] + psi0 = basis(N, N-1) + + times = np.linspace(0, 1.0, 11) + + solver = SMESolver(H, sc_ops, heterodyne=heterodyne) + + solver.options["store_measurement"] = "start" + res_start = solver.run(psi0, times, ntraj=ntraj, seeds=1) + + solver.options["store_measurement"] = "middle" + res_middle = solver.run(psi0, times, ntraj=ntraj, seeds=1) + + solver.options["store_measurement"] = "end" + res_end = solver.run(psi0, times, ntraj=ntraj, seeds=1) + + diff = np.sum(np.abs(res_end.measurement[0] - res_start.measurement[0])) + assert diff > 0.1 # Each measurement should be different by ~dt + np.testing.assert_allclose( + res_middle.measurement[0] * 2, + res_start.measurement[0] + res_end.measurement[0], + ) + + np.testing.assert_allclose( + np.diff(res_start.wiener_process[0][0]), res_start.dW[0][0] + ) + + +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_m_ops(heterodyne): + N = 10 + H = num(N) sc_ops = [destroy(N), qeye(N)] psi0 = basis(N, N-1) @@ -294,7 +335,7 @@ def test_m_ops(heterodyne): times = np.linspace(0, 1.0, 51) - options = {"store_measurement": True,} + options = {"store_measurement": "end",} solver = SMESolver(H, sc_ops, heterodyne=heterodyne, options=options) solver.m_ops = m_ops @@ -322,7 +363,6 @@ def test_m_ops(heterodyne): def test_feedback(): - tol = 0.05 N = 10 ntraj = 2 @@ -334,18 +374,20 @@ def func(t, A, W): [destroy(N), func], args={ "A": SMESolver.ExpectFeedback(num(10)), - "W": SMESolver.WeinerFeedback() + "W": SMESolver.WienerFeedback() } )] psi0 = basis(N, N-3) - times = np.linspace(0, 10, 101) - options = {"map": "serial", "dt": 0.001} + times = np.linspace(0, 2, 101) + options = {"map": "serial", "dt": 0.0005} solver = SMESolver(H, sc_ops=sc_ops, heterodyne=False, options=options) results = solver.run(psi0, times, e_ops=[num(N)], ntraj=ntraj) - assert np.all(results.expect[0] > 6.-1e-6) + # If this was deterministic, it should never go under `6`. + # We add a tolerance ~dt due to the stochatic part. + assert np.all(results.expect[0] > 6. - 0.001) assert np.all(results.expect[0][-20:] < 6.7) @@ -380,3 +422,175 @@ def test_small_step_warnings(method): qeye(2), basis(2), [0, 0.0000001], [qeye(2)], options={"method": method} ) + + +@pytest.mark.parametrize("method", ["euler", "platen"]) +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_run_from_experiment_close(method, heterodyne): + N = 5 + + H = num(N) + a = destroy(N) + sc_ops = [a, a @ a + (a @ a).dag()] + psi0 = basis(N, N-1) + tlist = np.linspace(0, 0.1, 501) + options = { + "store_measurement": "start", + "dt": tlist[1], + "store_states": True, + "method": method, + } + solver = SSESolver(H, sc_ops, heterodyne, options=options) + res_forward = solver.run(psi0, tlist, 1, e_ops=[H]) + res_backward = solver.run_from_experiment( + psi0, tlist, res_forward.dW[0], e_ops=[H] + ) + res_measure = solver.run_from_experiment( + psi0, tlist, res_forward.measurement[0], e_ops=[H], measurement=True + ) + + np.testing.assert_allclose( + res_backward.measurement, res_forward.measurement[0], atol=1e-10 + ) + np.testing.assert_allclose( + res_measure.measurement, res_forward.measurement[0], atol=1e-10 + ) + + np.testing.assert_allclose(res_backward.dW, res_forward.dW[0], atol=1e-10) + np.testing.assert_allclose(res_measure.dW, res_forward.dW[0], atol=1e-10) + + np.testing.assert_allclose( + res_backward.expect, res_forward.expect, atol=1e-10 + ) + np.testing.assert_allclose( + res_measure.expect, res_forward.expect, atol=1e-10 + ) + + +@pytest.mark.parametrize( + "method", ["euler", "milstein", "platen", "pred_corr"] +) +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_run_from_experiment_open(method, heterodyne): + N = 10 + + H = num(N) + a = destroy(N) + sc_ops = [a, a.dag() * 0.1] + psi0 = basis(N, N-1) + tlist = np.linspace(0, 1, 251) + options = { + "store_measurement": "start", + "dt": tlist[1], + "store_states": True, + "method": method, + } + solver = SMESolver(H, sc_ops, heterodyne, options=options) + res_forward = solver.run(psi0, tlist, 1, e_ops=[H]) + res_backward = solver.run_from_experiment( + psi0, tlist, res_forward.dW[0], e_ops=[H] + ) + res_measure = solver.run_from_experiment( + psi0, tlist, res_forward.measurement[0], e_ops=[H], measurement=True + ) + + np.testing.assert_allclose( + res_backward.measurement, res_forward.measurement[0], atol=1e-10 + ) + np.testing.assert_allclose( + res_measure.measurement, res_forward.measurement[0], atol=1e-10 + ) + + np.testing.assert_allclose(res_backward.dW, res_forward.dW[0], atol=1e-10) + np.testing.assert_allclose(res_measure.dW, res_forward.dW[0], atol=1e-10) + + np.testing.assert_allclose( + res_backward.expect, res_forward.expect, atol=1e-10 + ) + np.testing.assert_allclose( + res_measure.expect, res_forward.expect, atol=1e-10 + ) + + +@pytest.mark.parametrize("store_measurement", [True, False]) +@pytest.mark.parametrize("keep_runs_results", [True, False]) +def test_merge_results(store_measurement, keep_runs_results): + # Running smesolve with mixed ICs should be the same as running smesolve + # multiple times and merging the results afterwards + initial_state1 = basis([2, 2], [1, 0]) + initial_state2 = basis([2, 2], [1, 1]) + H = qeye([2, 2]) + L = destroy(2) & qeye(2) + tlist = np.linspace(0, 1, 11) + e_ops = [num(2) & qeye(2), qeye(2) & num(2)] + + options = { + "store_measurement": True, + "keep_runs_results": True, + "store_states": True, + } + solver = SMESolver(H, [L], True, options=options) + result1 = solver.run(initial_state1, tlist, 5, e_ops=e_ops) + + options = { + "store_measurement": store_measurement, + "keep_runs_results": keep_runs_results, + "store_states": True, + } + solver = SMESolver(H, [L], True, options=options) + result2 = solver.run(initial_state2, tlist, 10, e_ops=e_ops) + + result_merged = result1 + result2 + assert len(result_merged.seeds) == 15 + if store_measurement: + assert ( + result_merged.average_states[0] == + (initial_state1.proj() + 2 * initial_state2.proj()).unit() + ) + np.testing.assert_allclose(result_merged.average_expect[0][0], 1) + np.testing.assert_allclose(result_merged.average_expect[1], 2/3) + + if store_measurement: + assert len(result_merged.measurement) == 15 + assert len(result_merged.dW) == 15 + assert all( + dw.shape == result_merged.dW[0].shape + for dw in result_merged.dW + ) + assert len(result_merged.wiener_process) == 15 + assert all( + w.shape == result_merged.wiener_process[0].shape + for w in result_merged.wiener_process + ) + +@pytest.mark.parametrize("open", [True, False]) +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_step(open, heterodyne): + state0 = basis(5, 3) + kw = {} + if open: + SolverCls = SMESolver + state0 = state0.proj() + else: + SolverCls = SSESolver + + solver = SolverCls( + num(5), + sc_ops=[destroy(5), destroy(5)**2 / 10], + heterodyne=heterodyne, + options={"dt": 0.001}, + **kw + ) + solver.start(state0, t0=0) + state1 = solver.step(0.01) + assert state1.dims == state0.dims + assert state1.norm() == pytest.approx(1, abs=0.01) + state2, dW = solver.step(0.02, wiener_increment=True) + assert state2.dims == state0.dims + assert state2.norm() == pytest.approx(1, abs=0.01) + if heterodyne: + assert dW.shape == (2, 2) + assert abs(dW[0, 0]) < 0.5 # 5 sigmas + else: + assert dW.shape == (2,) + assert abs(dW[0]) < 0.5 # 5 sigmas diff --git a/qutip/tests/test_enr_state_operator.py b/qutip/tests/test_enr_state_operator.py index d5949eaea7..9257e3266e 100644 --- a/qutip/tests/test_enr_state_operator.py +++ b/qutip/tests/test_enr_state_operator.py @@ -152,3 +152,40 @@ def test_mesolve_ENR(): np.testing.assert_allclose(result_JC.expect[0], result_enr.expect[0], atol=1e-2) + + +def test_steadystate_ENR(): + # Ensure ENR states work with steadystate functions + # We compare the output to an exact truncation of the + # single-excitation Jaynes-Cummings model + eps = 2 * np.pi + omega_c = 2 * np.pi + g = 0.1 * omega_c + gam = 0.01 * omega_c + N_cut = 2 + + sz = qutip.sigmaz() & qutip.qeye(N_cut) + sm = qutip.destroy(2).dag() & qutip.qeye(N_cut) + a = qutip.qeye(2) & qutip.destroy(N_cut) + H_JC = (0.5 * eps * sz + omega_c * a.dag()*a + + g * (a * sm.dag() + a.dag() * sm)) + c_ops = [np.sqrt(gam) * a] + + result_JC = qutip.steadystate(H_JC, c_ops) + exp_sz_JC = qutip.expect(sz, result_JC) + + N_exc = 1 + dims = [2, N_cut] + d = qutip.enr_destroy(dims, N_exc) + sz = 2*d[0].dag()*d[0]-1 + b = d[0] + a = d[1] + H_enr = (eps * b.dag()*b + omega_c * a.dag() * a + + g * (b.dag() * a + a.dag() * b)) + c_ops = [np.sqrt(gam) * a] + + result_enr = qutip.steadystate(H_enr, c_ops) + exp_sz_enr = qutip.expect(sz, result_enr) + + np.testing.assert_allclose(exp_sz_JC, + exp_sz_enr, atol=1e-2) diff --git a/qutip/tests/test_progressbar.py b/qutip/tests/test_progressbar.py index d1fa275f17..3445ebd2a3 100644 --- a/qutip/tests/test_progressbar.py +++ b/qutip/tests/test_progressbar.py @@ -34,6 +34,30 @@ def test_progressbar(pbar): assert bar.total_time() > 0 +@pytest.mark.parametrize("pbar", bars) +def test_progressbar_too_few_update(pbar): + N = 5 + bar = progress_bars[pbar](N) + assert bar.total_time() < 0 + for _ in range(N-2): + time.sleep(0.01) + bar.update() + bar.finished() + assert bar.total_time() > 0 + + +@pytest.mark.parametrize("pbar", bars) +def test_progressbar_too_many_update(pbar): + N = 5 + bar = progress_bars[pbar](N) + assert bar.total_time() < 0 + for _ in range(N+2): + time.sleep(0.01) + bar.update() + bar.finished() + assert bar.total_time() > 0 + + @pytest.mark.parametrize("pbar", bars[1:]) def test_progressbar_has_print(pbar, capsys): N = 2 diff --git a/qutip/tests/test_random.py b/qutip/tests/test_random.py index 085e940448..d5a860d79d 100644 --- a/qutip/tests/test_random.py +++ b/qutip/tests/test_random.py @@ -6,6 +6,7 @@ from qutip import qeye, num, to_kraus, kraus_to_choi, CoreOptions, Qobj from qutip import data as _data +from qutip.core.dimensions import Space from qutip.random_objects import ( rand_herm, rand_unitary, @@ -22,8 +23,9 @@ 12, [8], [2, 2, 3], - [[2], [2]] -], ids=["int", "list", "tensor", "super"]) + [[2], [2]], + Space(3), +], ids=["int", "list", "tensor", "super", "Space"]) def dimensions(request): return request.param @@ -45,6 +47,8 @@ def _assert_density(qobj, density): def _assert_metadata(random_qobj, dims, dtype=None, super=False, ket=False): if isinstance(dims, int): dims = [dims] + elif isinstance(dims, Space): + dims = dims.as_list() N = np.prod(dims) if super and not isinstance(dims[0], list): target_dims_0 = [dims, dims] @@ -95,7 +99,10 @@ def test_rand_herm_Eigs(dimensions, density): """ Random Qobjs: Hermitian matrix - Eigs given """ - N = np.prod(dimensions) + if isinstance(dimensions, Space): + N = dimensions.size + else: + N = np.prod(dimensions) eigs = np.random.random(N) eigs /= np.sum(eigs) eigs.sort() @@ -141,8 +148,11 @@ def test_rand_dm(dimensions, kw, dtype, distribution): """ Random Qobjs: Density matrix """ - N = np.prod(dimensions) - print(N, kw) + if isinstance(dimensions, Space): + N = dimensions.size + else: + N = np.prod(dimensions) + if "eigenvalues" in kw: eigs = np.random.random(N) eigs /= np.sum(eigs) @@ -238,6 +248,8 @@ def test_rand_super_bcsz(dimensions, dtype, rank, superrep): random_qobj = rand_super_bcsz(dimensions, rank=rank, dtype=dtype, superrep=superrep) + if isinstance(dimensions, Space): + dimensions = dimensions.as_list() assert random_qobj.issuper with CoreOptions(atol=1e-9): assert random_qobj.iscptp diff --git a/qutip/tests/test_wigner.py b/qutip/tests/test_wigner.py index 93b5e114e8..d958183d6c 100644 --- a/qutip/tests/test_wigner.py +++ b/qutip/tests/test_wigner.py @@ -1,5 +1,6 @@ import pytest import numpy as np +from scipy.integrate import trapezoid import itertools from scipy.special import laguerre from numpy.random import rand @@ -634,7 +635,9 @@ def test_spin_q_function_normalized(spin, pure): phi = np.linspace(-np.pi, np.pi, 256, endpoint=True) Q, THETA, _ = qutip.spin_q_function(rho, theta, phi) - norm = d / (4 * np.pi) * np.trapz(np.trapz(Q * np.sin(THETA), theta), phi) + norm = d / (4 * np.pi) * trapezoid( + trapezoid(Q * np.sin(THETA), theta), phi + ) assert_allclose(norm, 1, atol=2e-4) @@ -657,7 +660,9 @@ def test_spin_wigner_normalized(spin, pure): phi = np.linspace(-np.pi, np.pi, 512, endpoint=True) W, THETA, PHI = qutip.spin_wigner(rho, theta, phi) - norm = np.trapz(np.trapz(W * np.sin(THETA) * np.sqrt(d / (4*np.pi)), theta), phi) + norm = trapezoid( + trapezoid(W * np.sin(THETA) * np.sqrt(d / (4*np.pi)), theta), phi + ) assert_almost_equal(norm, 1, decimal=4) @pytest.mark.parametrize(['spin'], [ @@ -684,6 +689,6 @@ def test_spin_wigner_overlap(spin, pure, n=5): state_overlap = (test_state*rho).tr().real W_state, _, _ = qutip.spin_wigner(test_state, theta, phi) - W_overlap = np.trapz( - np.trapz(W_state * W * np.sin(THETA), theta), phi).real + W_overlap = trapezoid( + trapezoid(W_state * W * np.sin(THETA), theta), phi).real assert_almost_equal(W_overlap, state_overlap, decimal=4) diff --git a/qutip/typing.py b/qutip/typing.py new file mode 100644 index 0000000000..88186d72bb --- /dev/null +++ b/qutip/typing.py @@ -0,0 +1,53 @@ +from typing import Sequence, Union, Any, Protocol, Callable, TypeVar +from numbers import Number, Real +import numpy as np +import scipy.interpolate + + +__all__ = ["QobjEvoLike", "CoefficientLike", "LayerType"] + + +class QEvoProtocol(Protocol): + def __call__(self, t: Real, **kwargs) -> "Qobj": + ... + + +class CoeffProtocol(Protocol): + def __call__(self, t: Real, **kwargs) -> Number: + ... + + +CoefficientLike = Union[ + "Coefficient", + float, + str, + CoeffProtocol, + np.ndarray, + scipy.interpolate.PPoly, + scipy.interpolate.BSpline, + Any, +] + + +QobjOrData = TypeVar("QobjOrData", "Qobj", "Data") + + +EopsLike = Union["Qobj", "QobjEvo", Callable[[float, "Qobj"], Any]] + + +ElementType = Union[QEvoProtocol, "Qobj", tuple["Qobj", CoefficientLike]] + + +QobjEvoLike = Union["Qobj", "QobjEvo", ElementType, Sequence[ElementType]] + + +LayerType = Union[str, type] + + +SpaceLike = Union[int, list[int], list[list[int]], "Space"] + + +DimensionLike = Union[ + list[SpaceLike, SpaceLike], + "Dimensions", +] diff --git a/qutip/ui/progressbar.py b/qutip/ui/progressbar.py index 1999d0bf8f..7bb8d8c962 100644 --- a/qutip/ui/progressbar.py +++ b/qutip/ui/progressbar.py @@ -44,7 +44,7 @@ def time_elapsed(self): return "%6.2fs" % (time.time() - self.t_start) def time_remaining_est(self, p): - if p > 0.0: + if 100 >= p > 0.0: t_r_est = (time.time() - self.t_start) * (100.0 - p) / p else: t_r_est = 0 diff --git a/qutip/utilities.py b/qutip/utilities.py index 2032b4be30..46a028a5da 100644 --- a/qutip/utilities.py +++ b/qutip/utilities.py @@ -108,7 +108,8 @@ def clebsch(j1, j2, j3, m1, m2, m3): C = np.sqrt((2.0 * j3 + 1.0)*_to_long(c_factor)) s_factors = np.zeros(((vmax + 1 - vmin), (int(j1 + j2 + j3))), np.int32) - sign = (-1) ** (vmin + j2 + m2) + # `S` and `C` are large integer,s if `sign` is a np.int32 it could oveflow + sign = int((-1) ** (vmin + j2 + m2)) for i,v in enumerate(range(vmin, vmax + 1)): factor = s_factors[i,:] _factorial_prod(j2 + j3 + m1 - v, factor) diff --git a/qutip/visualization.py b/qutip/visualization.py index b5f10f7dcf..048c0b57da 100644 --- a/qutip/visualization.py +++ b/qutip/visualization.py @@ -9,7 +9,6 @@ 'plot_spin_distribution', 'complex_array_to_rgb', 'plot_qubism', 'plot_schmidt'] -import warnings import itertools as it import numpy as np from numpy import pi, array, sin, cos, angle, log2, sqrt @@ -17,9 +16,8 @@ from packaging.version import parse as parse_version from . import ( - Qobj, isket, ket2dm, tensor, vector_to_operator, to_super, settings + Qobj, isket, ket2dm, tensor, vector_to_operator, settings ) -from .core.dimensions import flatten from .core.superop_reps import _to_superpauli, isqubitdims from .wigner import wigner from .matplotlib_utilities import complex_phase_cmap @@ -670,10 +668,54 @@ def _get_matrix_components(option, M, argument): f"{option} for {argument}") -def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, - bar_style='real', color_limits=None, color_style='real', - options=None, *, cmap=None, colorbar=True, - fig=None, ax=None): +def sph2cart(r, theta, phi): + """spherical to cartesian transformation.""" + x = r * np.sin(theta) * np.cos(phi) + y = r * np.sin(theta) * np.sin(phi) + z = r * np.cos(theta) + return x, y, z + + +def sphview(ax): + """ + returns the camera position for 3D axes in spherical coordinates.""" + xlim = ax.get_xlim() + ylim = ax.get_ylim() + zlim = ax.get_zlim() + # Compute based on the plots xyz limits. + r = 0.5 * np.sqrt( + (xlim[1] - xlim[0]) ** 2 + + (ylim[1] - ylim[0]) ** 2 + + (zlim[1] - zlim[0]) ** 2 + ) + theta, phi = np.radians((90 - ax.elev, ax.azim)) + return r, theta, phi + + +def get_camera_position(ax): + """ + returns the camera position for 3D axes in cartesian coordinates + as a 3d numpy array. + """ + r, theta, phi = sphview(ax) + return np.array(sph2cart(r, theta, phi), ndmin=3).T + + +def matrix_histogram( + M, + x_basis=None, + y_basis=None, + limits=None, + bar_style="real", + color_limits=None, + color_style="real", + options=None, + *, + cmap=None, + colorbar=True, + fig=None, + ax=None, +): """ Draw a histogram for the matrix M, with the given x and y labels and title. @@ -791,11 +833,20 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, """ - # default options - default_opts = {'zticks': None, 'bars_spacing': 0.2, - 'bars_alpha': 1., 'bars_lw': 0.5, 'bars_edgecolor': 'k', - 'shade': True, 'azim': -35, 'elev': 35, 'stick': False, - 'cbar_pad': 0.04, 'cbar_to_z': False, 'threshold': None} + default_opts = { + "zticks": None, + "bars_spacing": 0.3, + "bars_alpha": 1.0, + "bars_lw": 0.7, + "bars_edgecolor": "k", + "shade": True, + "azim": -60, + "elev": 30, + "stick": False, + "cbar_pad": 0.04, + "cbar_to_z": False, + "threshold": None, + } # update default_opts from input options if options is None: @@ -804,8 +855,10 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, if isinstance(options, dict): # check if keys in options dict are valid if set(options) - set(default_opts): - raise ValueError("invalid key(s) found in options: " - f"{', '.join(set(options) - set(default_opts))}") + raise ValueError( + "invalid key(s) found in options: " + f"{', '.join(set(options) - set(default_opts))}" + ) else: # updating default options default_opts.update(options) @@ -813,7 +866,7 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, else: raise ValueError("options must be a dictionary") - fig, ax = _is_fig_and_ax(fig, ax, projection='3d') + fig, ax = _is_fig_and_ax(fig, ax, projection="3d") if not isinstance(M, list): Ms = [M] @@ -822,8 +875,7 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, _equal_shape(Ms) - for i in range(len(Ms)): - M = Ms[i] + for i, M in enumerate(Ms): if isinstance(M, Qobj): if x_basis is None: x_basis = list(_cb_labels([M.shape[0]])[0]) @@ -832,10 +884,9 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, # extract matrix data from Qobj M = M.full() - bar_M = _get_matrix_components(bar_style, M, 'bar_style') + bar_M = _get_matrix_components(bar_style, M, "bar_style") - if isinstance(limits, list) and \ - len(limits) == 2: + if isinstance(limits, list) and len(limits) == 2: z_min = limits[0] z_max = limits[1] else: @@ -846,19 +897,18 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, z_min -= 0.1 z_max += 0.1 - color_M = _get_matrix_components(color_style, M, 'color_style') + color_M = _get_matrix_components(color_style, M, "color_style") - if isinstance(color_limits, list) and \ - len(color_limits) == 2: + if isinstance(color_limits, list) and len(color_limits) == 2: c_min = color_limits[0] c_max = color_limits[1] else: - if color_style == 'phase': + if color_style == "phase": c_min = -pi c_max = pi else: c_min = min(color_M) if i == 0 else min(min(color_M), c_min) - c_max = min(color_M) if i == 0 else max(max(color_M), c_max) + c_max = max(color_M) if i == 0 else max(max(color_M), c_max) if c_min == c_max: c_min -= 0.1 @@ -868,66 +918,93 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, if cmap is None: # change later - if color_style == 'phase': + if color_style == "phase": cmap = _cyclic_cmap() else: cmap = _sequential_cmap() artist_list = list() + + ax.view_init(azim=options['azim'], elev=options['elev']) + + camera = get_camera_position(ax) for M in Ms: if isinstance(M, Qobj): M = M.full() - bar_M = _get_matrix_components(bar_style, M, 'bar_style') - color_M = _get_matrix_components(color_style, M, 'color_style') + bar_M = _get_matrix_components(bar_style, M, "bar_style") + color_M = _get_matrix_components(color_style, M, "color_style") n = np.size(M) xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1])) xpos = xpos.T.flatten() + 0.5 ypos = ypos.T.flatten() + 0.5 zpos = np.zeros(n) - dx = dy = (1 - options['bars_spacing']) * np.ones(n) + dx = dy = (1 - options["bars_spacing"]) * np.ones(n) colors = cmap(norm(color_M)) - colors[:, 3] = options['bars_alpha'] + colors[:, 3] = options["bars_alpha"] - if options['threshold'] is not None: - colors[:, 3] *= 1 * (bar_M >= options['threshold']) + if options["threshold"] is not None: + colors[:, 3] *= 1 * (bar_M >= options["threshold"]) - idx, = np.where(bar_M < options['threshold']) + (idx,) = np.where(bar_M < options["threshold"]) bar_M[idx] = 0 - artist = ax.bar3d(xpos, ypos, zpos, dx, dy, bar_M, color=colors, - edgecolors=options['bars_edgecolor'], - linewidths=options['bars_lw'], - shade=options['shade']) - artist_list.append([artist]) + temp_xpos = xpos.reshape(M.shape) + temp_ypos = ypos.reshape(M.shape) + temp_zpos = zpos.reshape(M.shape) + + # calculating z_order for each bar based on its position + # The sorting issue was fixed by making minor change to + # https://stackoverflow.com/questions/18602660/matplotlib-bar3d-clipping-problems + z_order = ( + np.multiply( + [ + temp_xpos, temp_ypos, temp_zpos], camera + ).sum(0).flatten() + ) + + for i, uxpos in enumerate(xpos): + artist = ax.bar3d( + uxpos, + ypos[i], + zpos[i], + dx[i], + dy[i], + bar_M[i], + color=colors[i], + edgecolors=options["bars_edgecolor"], + linewidths=options["bars_lw"], + shade=options["shade"], + ) + # Setting the z-order for rendering + artist._sort_zpos = z_order[i] + artist_list.append([artist]) if len(Ms) == 1: output = ax else: - output = animation.ArtistAnimation(fig, artist_list, interval=50, - blit=True, repeat_delay=1000) + output = animation.ArtistAnimation( + fig, artist_list, interval=50, blit=True, repeat_delay=1000 + ) # remove vertical lines on xz and yz plane - ax.yaxis._axinfo["grid"]['linewidth'] = 0 - ax.xaxis._axinfo["grid"]['linewidth'] = 0 + ax.yaxis._axinfo["grid"]["linewidth"] = 0 + ax.xaxis._axinfo["grid"]["linewidth"] = 0 # x axis - _update_xaxis(options['bars_spacing'], M, ax, x_basis) + _update_xaxis(options["bars_spacing"], M, ax, x_basis) # y axis - _update_yaxis(options['bars_spacing'], M, ax, y_basis) + _update_yaxis(options["bars_spacing"], M, ax, y_basis) # z axis - _update_zaxis(ax, z_min, z_max, options['zticks']) + _update_zaxis(ax, z_min, z_max, options["zticks"]) # stick to xz and yz plane - _stick_to_planes(options['stick'], - options['azim'], ax, M, - options['bars_spacing']) - ax.view_init(azim=options['azim'], elev=options['elev']) + _stick_to_planes(options["stick"], options["azim"], ax, M, options["bars_spacing"]) # removing margins _remove_margins(ax.xaxis) @@ -936,22 +1013,23 @@ def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, # color axis if colorbar: - cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, - pad=options['cbar_pad']) + cax, kw = mpl.colorbar.make_axes( + ax, shrink=0.75, pad=options["cbar_pad"]) cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) - if color_style == 'real': - cb.set_label('real') - elif color_style == 'img': - cb.set_label('imaginary') - elif color_style == 'abs': - cb.set_label('absolute') + if color_style == "real": + cb.set_label("real") + elif color_style == "img": + cb.set_label("imaginary") + elif color_style == "abs": + cb.set_label("absolute") else: - cb.set_label('arg') + cb.set_label("arg") if color_limits is None: cb.set_ticks([-pi, -pi / 2, 0, pi / 2, pi]) cb.set_ticklabels( - (r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$')) + (r"$-\pi$", r"$-\pi/2$", r"$0$", r"$\pi/2$", r"$\pi$") + ) return fig, output diff --git a/setup.cfg b/setup.cfg index ecb3cc0ee4..dfd09c1964 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,7 +9,7 @@ license = BSD 3-Clause License license_files = LICENSE.txt project_urls = Bug Tracker = https://github.com/qutip/qutip/issues - Documentation = https://qutip.org/docs/latest/ + Documentation = https://qutip.readthedocs.io/en/stable/ Source Code = https://github.com/qutip/qutip classifiers = Development Status :: 2 - Pre-Alpha @@ -30,26 +30,26 @@ platforms = Linux, Mac OSX, Unix, Windows packages = find: include_package_data = True zip_safe = False +python_requires = >=3.10 install_requires = numpy>=1.22 - scipy>=1.8,<1.12 + scipy>=1.9 packaging setup_requires = - numpy>=1.19 - scipy>=1.8 + numpy>=2.0.0 + scipy>=1.9 cython>=0.29.20; python_version>='3.10' - cython>=0.29.20,<3.0.3; python_version<='3.9' packaging [options.packages.find] include = qutip* [options.extras_require] -graphics = matplotlib>=1.2.1 +graphics = matplotlib>=3.5 runtime_compilation = cython>=0.29.20; python_version>='3.10' - cython>=0.29.20,<3.0.3; python_version<='3.9' filelock + setuptools semidefinite = cvxpy>=1.0 cvxopt